summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorerwincoumans <erwincoumans@google.com>2021-11-12 05:38:24 +0000
committerGitHub <noreply@github.com>2021-11-12 05:38:24 +0000
commita9a103fc8c0fe758ef38416f082307dae64a72d7 (patch)
tree1ffcf11720335cad8c40b62b5c2733383ad55da2
parent40aeecb72eabfbaae2b91ef6a1e56037b95e7e0f (diff)
parenta3005879bba722cce4f13e79965917cda9b72014 (diff)
downloadbullet3-a9a103fc8c0fe758ef38416f082307dae64a72d7.tar.gz
Merge pull request #4010 from erwincoumans/master
add missing bunny.obj to pybullet_data
-rw-r--r--examples/ThirdPartyLibs/Eigen/CMakeLists.txt19
-rw-r--r--examples/ThirdPartyLibs/Eigen/Cholesky1
-rw-r--r--examples/ThirdPartyLibs/Eigen/Core381
-rw-r--r--examples/ThirdPartyLibs/Eigen/Eigenvalues5
-rw-r--r--examples/ThirdPartyLibs/Eigen/Geometry12
-rw-r--r--examples/ThirdPartyLibs/Eigen/Householder1
-rw-r--r--examples/ThirdPartyLibs/Eigen/Jacobi1
-rw-r--r--examples/ThirdPartyLibs/Eigen/KLUSupport41
-rw-r--r--examples/ThirdPartyLibs/Eigen/LICENSE.txt8
-rw-r--r--examples/ThirdPartyLibs/Eigen/LU7
-rw-r--r--examples/ThirdPartyLibs/Eigen/OrderingMethods3
-rw-r--r--examples/ThirdPartyLibs/Eigen/PaStiXSupport1
-rw-r--r--examples/ThirdPartyLibs/Eigen/QR5
-rw-r--r--examples/ThirdPartyLibs/Eigen/QtAlignedMalloc1
-rw-r--r--examples/ThirdPartyLibs/Eigen/SVD1
-rw-r--r--examples/ThirdPartyLibs/Eigen/Sparse2
-rw-r--r--examples/ThirdPartyLibs/Eigen/SparseCholesky8
-rw-r--r--examples/ThirdPartyLibs/Eigen/SparseLU4
-rw-r--r--examples/ThirdPartyLibs/Eigen/SparseQR1
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Cholesky/LDLT.h80
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Cholesky/LLT.h61
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/CholmodSupport/CholmodSupport.h76
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/ArithmeticSequence.h143
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/Array.h112
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/ArrayBase.h4
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/ArrayWrapper.h42
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/AssignEvaluator.h175
-rwxr-xr-x[-rw-r--r--]examples/ThirdPartyLibs/Eigen/src/Core/Assign_MKL.h26
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/BandMatrix.h32
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/Block.h124
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/BooleanRedux.h16
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/CommaInitializer.h6
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/ConditionEstimator.h2
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/CoreEvaluators.h449
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/CwiseBinaryOp.h38
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/CwiseNullaryOp.h133
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/CwiseUnaryOp.h12
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/CwiseUnaryView.h24
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/DenseBase.h170
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/DenseCoeffsBase.h46
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/DenseStorage.h224
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/Diagonal.h39
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/DiagonalMatrix.h48
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/Dot.h21
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/EigenBase.h19
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/ForceAlignedAccess.h12
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/GeneralProduct.h23
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/GenericPacketMath.h746
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/GlobalFunctions.h23
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/IO.h47
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/IndexedView.h40
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/Inverse.h19
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/Map.h8
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/MapBase.h19
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/MathFunctions.h1016
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/MathFunctionsImpl.h145
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/Matrix.h158
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/MatrixBase.h42
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/NestByValue.h73
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/NoAlias.h4
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/NumTraits.h125
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/PartialReduxEvaluator.h237
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/PermutationMatrix.h34
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/PlainObjectBase.h211
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/Product.h59
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/ProductEvaluators.h334
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/Random.h36
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/Redux.h338
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/Ref.h140
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/Replicate.h12
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/Reshaped.h454
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/ReturnByValue.h10
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/Reverse.h24
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/Select.h14
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/SelfAdjointView.h55
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/SelfCwiseBinaryOp.h4
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/Solve.h28
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/SolveTriangular.h33
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/SolverBase.h44
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/StableNorm.h170
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/StlIterators.h463
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/Stride.h17
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/Swap.h9
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/Transpose.h109
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/Transpositions.h75
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/TriangularMatrix.h153
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/VectorBlock.h10
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/VectorwiseOp.h225
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/Visitor.h166
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX/Complex.h183
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX/MathFunctions.h463
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX/PacketMath.h1251
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX/TypeCasting.h66
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX512/Complex.h424
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX512/MathFunctions.h448
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX512/PacketMath.h1811
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX512/TypeCasting.h89
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/AltiVec/Complex.h317
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/AltiVec/MathFunctions.h270
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/AltiVec/MatrixProduct.h2765
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/AltiVec/MatrixProductCommon.h159
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/AltiVec/MatrixProductMMA.h620
-rwxr-xr-x[-rw-r--r--]examples/ThirdPartyLibs/Eigen/src/Core/arch/AltiVec/PacketMath.h2384
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/CUDA/Complex.h328
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/CUDA/Half.h666
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/CUDA/PacketMath.h333
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/CUDA/PacketMathHalf.h1133
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/CUDA/TypeCasting.h212
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/BFloat16.h688
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/ConjHelper.h116
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h1649
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h110
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/Half.h942
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/Settings.h2
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/TypeCasting.h120
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/GPU/MathFunctions.h (renamed from examples/ThirdPartyLibs/Eigen/src/Core/arch/CUDA/MathFunctions.h)8
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/GPU/PacketMath.h1649
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/GPU/TypeCasting.h79
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/HIP/hcc/math_constants.h23
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/MSA/Complex.h648
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/MSA/MathFunctions.h387
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/MSA/PacketMath.h1233
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/NEON/Complex.h532
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/NEON/GeneralBlockPanelKernel.h183
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/NEON/MathFunctions.h124
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/NEON/PacketMath.h4628
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/NEON/TypeCasting.h1419
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/SSE/Complex.h243
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/SSE/MathFunctions.h493
-rwxr-xr-x[-rw-r--r--]examples/ThirdPartyLibs/Eigen/src/Core/arch/SSE/PacketMath.h1140
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/SSE/TypeCasting.h93
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/SVE/MathFunctions.h44
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/SVE/PacketMath.h752
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/SVE/TypeCasting.h49
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/SYCL/InteropHeaders.h232
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/SYCL/MathFunctions.h301
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/SYCL/PacketMath.h670
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h694
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/SYCL/TypeCasting.h85
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/ZVector/Complex.h407
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/arch/ZVector/MathFunctions.h106
-rwxr-xr-x[-rw-r--r--]examples/ThirdPartyLibs/Eigen/src/Core/arch/ZVector/PacketMath.h893
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/functors/AssignmentFunctors.h13
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/functors/BinaryFunctors.h183
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/functors/NullaryFunctors.h61
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/functors/StlFunctors.h36
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/functors/UnaryFunctors.h335
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralBlockPanelKernel.h1670
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralMatrixMatrix.h63
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h70
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h14
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h6
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralMatrixVector.h183
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/products/Parallelizer.h27
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/products/SelfadjointMatrixMatrix.h83
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h24
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/products/SelfadjointMatrixVector.h14
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/products/SelfadjointProduct.h6
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/products/SelfadjointRank2Update.h7
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularMatrixMatrix.h84
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h26
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularMatrixVector.h22
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularSolverMatrix.h70
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h12
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularSolverVector.h23
-rwxr-xr-x[-rw-r--r--]examples/ThirdPartyLibs/Eigen/src/Core/util/BlasUtil.h422
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/util/ConfigureVectorization.h512
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/util/Constants.h30
-rwxr-xr-x[-rw-r--r--]examples/ThirdPartyLibs/Eigen/src/Core/util/DisableStupidWarnings.h45
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/util/ForwardDeclarations.h37
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/util/IndexedViewHelper.h65
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/util/IntegralConstant.h18
-rwxr-xr-x[-rw-r--r--]examples/ThirdPartyLibs/Eigen/src/Core/util/MKL_support.h9
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/util/Macros.h973
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/util/Memory.h236
-rwxr-xr-x[-rw-r--r--]examples/ThirdPartyLibs/Eigen/src/Core/util/Meta.h382
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/util/ReenableStupidWarnings.h8
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/util/ReshapedHelper.h51
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/util/StaticAssert.h17
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/util/SymbolicIndex.h25
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Core/util/XprHelper.h87
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Eigenvalues/ComplexEigenSolver.h2
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Eigenvalues/ComplexSchur.h11
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Eigenvalues/EigenSolver.h4
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h5
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h2
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Eigenvalues/HessenbergDecomposition.h4
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h2
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Eigenvalues/RealQZ.h15
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Eigenvalues/RealSchur.h34
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h120
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h23
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Eigenvalues/Tridiagonalization.h38
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Geometry/AlignedBox.h102
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Geometry/AngleAxis.h8
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Geometry/EulerAngles.h6
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Geometry/Homogeneous.h26
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Geometry/Hyperplane.h2
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Geometry/OrthoMethods.h5
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Geometry/ParametrizedLine.h2
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Geometry/Quaternion.h84
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Geometry/Rotation2D.h6
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Geometry/Scaling.h28
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Geometry/Transform.h141
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Geometry/Translation.h18
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Geometry/Umeyama.h2
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Geometry/arch/Geometry_SIMD.h168
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Geometry/arch/Geometry_SSE.h161
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Householder/BlockHouseholder.h11
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Householder/Householder.h12
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Householder/HouseholderSequence.h171
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h32
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h30
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h30
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h88
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h111
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h90
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h22
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h20
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/Jacobi/Jacobi.h35
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/KLUSupport/KLUSupport.h358
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/LU/Determinant.h54
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/LU/FullPivLU.h70
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/LU/InverseImpl.h35
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/LU/PartialPivLU.h129
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/LU/arch/InverseSize4.h351
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/LU/arch/Inverse_SSE.h338
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/OrderingMethods/Amd.h24
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/OrderingMethods/Eigen_Colamd.h574
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/OrderingMethods/Ordering.h16
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/PaStiXSupport/PaStiXSupport.h10
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/PardisoSupport/PardisoSupport.h16
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/QR/ColPivHouseholderQR.h60
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/QR/CompleteOrthogonalDecomposition.h125
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/QR/FullPivHouseholderQR.h72
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/QR/HouseholderQR.h62
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h28
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SVD/BDCSVD.h291
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SVD/JacobiSVD.h36
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SVD/JacobiSVD_LAPACKE.h5
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SVD/SVDBase.h106
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SVD/UpperBidiagonalization.h6
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SparseCholesky/SimplicialCholesky.h26
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h49
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SparseCore/AmbiVector.h5
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SparseCore/CompressedStorage.h16
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h12
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseAssign.h104
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseBlock.h82
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseCompressedBase.h35
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseCwiseBinaryOp.h14
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseCwiseUnaryOp.h6
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseDenseProduct.h38
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseMatrix.h143
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseMatrixBase.h17
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseProduct.h14
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseRef.h14
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseSelfAdjointView.h15
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseUtil.h8
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseVector.h2
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseView.h1
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU.h176
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU_Memory.h2
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h78
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU_column_dfs.h4
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU_gemm_kernel.h2
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU_panel_bmod.h2
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SparseQR/SparseQR.h53
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/StlSupport/StdDeque.h14
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/StlSupport/StdList.h4
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/StlSupport/StdVector.h4
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/SuperLUSupport/SuperLUSupport.h18
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/UmfPackSupport/UmfPackSupport.h208
-rwxr-xr-x[-rw-r--r--]examples/ThirdPartyLibs/Eigen/src/misc/lapacke.h0
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/plugins/ArrayCwiseBinaryOps.h120
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/plugins/ArrayCwiseUnaryOps.h149
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/plugins/BlockMethods.h404
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/plugins/CommonCwiseUnaryOps.h14
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/plugins/IndexedViewMethods.h17
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/plugins/MatrixCwiseBinaryOps.h56
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/plugins/MatrixCwiseUnaryOps.h10
-rw-r--r--examples/ThirdPartyLibs/Eigen/src/plugins/ReshapedMethods.h149
-rw-r--r--examples/TwoJoint/CMakeLists.txt2
-rw-r--r--examples/TwoJoint/TwoJointMain.cpp7
-rw-r--r--examples/pybullet/gym/pybullet_data/bunny.obj1360
285 files changed, 44148 insertions, 12944 deletions
diff --git a/examples/ThirdPartyLibs/Eigen/CMakeLists.txt b/examples/ThirdPartyLibs/Eigen/CMakeLists.txt
deleted file mode 100644
index 9eb502b79..000000000
--- a/examples/ThirdPartyLibs/Eigen/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-include(RegexUtils)
-test_escape_string_as_regex()
-
-file(GLOB Eigen_directory_files "*")
-
-escape_string_as_regex(ESCAPED_CMAKE_CURRENT_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}")
-
-foreach(f ${Eigen_directory_files})
- if(NOT f MATCHES "\\.txt" AND NOT f MATCHES "${ESCAPED_CMAKE_CURRENT_SOURCE_DIR}/[.].+" AND NOT f MATCHES "${ESCAPED_CMAKE_CURRENT_SOURCE_DIR}/src")
- list(APPEND Eigen_directory_files_to_install ${f})
- endif()
-endforeach(f ${Eigen_directory_files})
-
-install(FILES
- ${Eigen_directory_files_to_install}
- DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen COMPONENT Devel
- )
-
-install(DIRECTORY src DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen COMPONENT Devel FILES_MATCHING PATTERN "*.h")
diff --git a/examples/ThirdPartyLibs/Eigen/Cholesky b/examples/ThirdPartyLibs/Eigen/Cholesky
index 1332b540d..a318ceb79 100644
--- a/examples/ThirdPartyLibs/Eigen/Cholesky
+++ b/examples/ThirdPartyLibs/Eigen/Cholesky
@@ -43,4 +43,3 @@
#include "src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_CHOLESKY_MODULE_H
-/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/examples/ThirdPartyLibs/Eigen/Core b/examples/ThirdPartyLibs/Eigen/Core
index c66359b79..3c03519fe 100644
--- a/examples/ThirdPartyLibs/Eigen/Core
+++ b/examples/ThirdPartyLibs/Eigen/Core
@@ -11,260 +11,55 @@
#ifndef EIGEN_CORE_H
#define EIGEN_CORE_H
-// first thing Eigen does: stop the compiler from committing suicide
+// first thing Eigen does: stop the compiler from reporting useless warnings.
#include "src/Core/util/DisableStupidWarnings.h"
-#if defined(__CUDACC__) && !defined(EIGEN_NO_CUDA)
- #define EIGEN_CUDACC __CUDACC__
-#endif
-
-#if defined(__CUDA_ARCH__) && !defined(EIGEN_NO_CUDA)
- #define EIGEN_CUDA_ARCH __CUDA_ARCH__
-#endif
-
-// Starting with CUDA 9 the composite __CUDACC_VER__ is not available.
-#if defined(__CUDACC_VER_MAJOR__) && (__CUDACC_VER_MAJOR__ >= 9)
-#define EIGEN_CUDACC_VER ((__CUDACC_VER_MAJOR__ * 10000) + (__CUDACC_VER_MINOR__ * 100))
-#elif defined(__CUDACC_VER__)
-#define EIGEN_CUDACC_VER __CUDACC_VER__
-#else
-#define EIGEN_CUDACC_VER 0
-#endif
-
-// Handle NVCC/CUDA/SYCL
-#if defined(EIGEN_CUDACC) || defined(__SYCL_DEVICE_ONLY__)
- // Do not try asserts on CUDA and SYCL!
- #ifndef EIGEN_NO_DEBUG
- #define EIGEN_NO_DEBUG
- #endif
-
- #ifdef EIGEN_INTERNAL_DEBUGGING
- #undef EIGEN_INTERNAL_DEBUGGING
- #endif
-
- #ifdef EIGEN_EXCEPTIONS
- #undef EIGEN_EXCEPTIONS
- #endif
+// then include this file where all our macros are defined. It's really important to do it first because
+// it's where we do all the compiler/OS/arch detections and define most defaults.
+#include "src/Core/util/Macros.h"
- // All functions callable from CUDA code must be qualified with __device__
- #ifdef EIGEN_CUDACC
- // Do not try to vectorize on CUDA and SYCL!
- #ifndef EIGEN_DONT_VECTORIZE
- #define EIGEN_DONT_VECTORIZE
- #endif
-
- #define EIGEN_DEVICE_FUNC __host__ __device__
- // We need math_functions.hpp to ensure that that EIGEN_USING_STD_MATH macro
- // works properly on the device side
- #include <math_functions.hpp>
- #else
- #define EIGEN_DEVICE_FUNC
- #endif
-#else
- #define EIGEN_DEVICE_FUNC
-#endif
+// This detects SSE/AVX/NEON/etc. and configure alignment settings
+#include "src/Core/util/ConfigureVectorization.h"
-#ifdef __NVCC__
-#define EIGEN_DONT_VECTORIZE
+// We need cuda_runtime.h/hip_runtime.h to ensure that
+// the EIGEN_USING_STD macro works properly on the device side
+#if defined(EIGEN_CUDACC)
+ #include <cuda_runtime.h>
+#elif defined(EIGEN_HIPCC)
+ #include <hip/hip_runtime.h>
#endif
-// When compiling CUDA device code with NVCC, pull in math functions from the
-// global namespace. In host mode, and when device doee with clang, use the
-// std versions.
-#if defined(EIGEN_CUDA_ARCH) && defined(__NVCC__)
- #define EIGEN_USING_STD_MATH(FUNC) using ::FUNC;
-#else
- #define EIGEN_USING_STD_MATH(FUNC) using std::FUNC;
-#endif
-
-#if (defined(_CPPUNWIND) || defined(__EXCEPTIONS)) && !defined(EIGEN_CUDA_ARCH) && !defined(EIGEN_EXCEPTIONS) && !defined(EIGEN_USE_SYCL)
- #define EIGEN_EXCEPTIONS
-#endif
#ifdef EIGEN_EXCEPTIONS
#include <new>
#endif
-// then include this file where all our macros are defined. It's really important to do it first because
-// it's where we do all the alignment settings (platform detection and honoring the user's will if he
-// defined e.g. EIGEN_DONT_ALIGN) so it needs to be done before we do anything with vectorization.
-#include "src/Core/util/Macros.h"
-
// Disable the ipa-cp-clone optimization flag with MinGW 6.x or newer (enabled by default with -O3)
// See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=556 for details.
-#if EIGEN_COMP_MINGW && EIGEN_GNUC_AT_LEAST(4,6)
+#if EIGEN_COMP_MINGW && EIGEN_GNUC_AT_LEAST(4,6) && EIGEN_GNUC_AT_MOST(5,5)
#pragma GCC optimize ("-fno-ipa-cp-clone")
#endif
+// Prevent ICC from specializing std::complex operators that silently fail
+// on device. This allows us to use our own device-compatible specializations
+// instead.
+#if defined(EIGEN_COMP_ICC) && defined(EIGEN_GPU_COMPILE_PHASE) \
+ && !defined(_OVERRIDE_COMPLEX_SPECIALIZATION_)
+#define _OVERRIDE_COMPLEX_SPECIALIZATION_ 1
+#endif
#include <complex>
// this include file manages BLAS and MKL related macros
// and inclusion of their respective header files
#include "src/Core/util/MKL_support.h"
-// if alignment is disabled, then disable vectorization. Note: EIGEN_MAX_ALIGN_BYTES is the proper check, it takes into
-// account both the user's will (EIGEN_MAX_ALIGN_BYTES,EIGEN_DONT_ALIGN) and our own platform checks
-#if EIGEN_MAX_ALIGN_BYTES==0
- #ifndef EIGEN_DONT_VECTORIZE
- #define EIGEN_DONT_VECTORIZE
- #endif
-#endif
-
-#if EIGEN_COMP_MSVC
- #include <malloc.h> // for _aligned_malloc -- need it regardless of whether vectorization is enabled
- #if (EIGEN_COMP_MSVC >= 1500) // 2008 or later
- // Remember that usage of defined() in a #define is undefined by the standard.
- // a user reported that in 64-bit mode, MSVC doesn't care to define _M_IX86_FP.
- #if (defined(_M_IX86_FP) && (_M_IX86_FP >= 2)) || EIGEN_ARCH_x86_64
- #define EIGEN_SSE2_ON_MSVC_2008_OR_LATER
- #endif
- #endif
-#else
- // Remember that usage of defined() in a #define is undefined by the standard
- #if (defined __SSE2__) && ( (!EIGEN_COMP_GNUC) || EIGEN_COMP_ICC || EIGEN_GNUC_AT_LEAST(4,2) )
- #define EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC
- #endif
-#endif
-
-#ifndef EIGEN_DONT_VECTORIZE
-
- #if defined (EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC) || defined(EIGEN_SSE2_ON_MSVC_2008_OR_LATER)
-
- // Defines symbols for compile-time detection of which instructions are
- // used.
- // EIGEN_VECTORIZE_YY is defined if and only if the instruction set YY is used
- #define EIGEN_VECTORIZE
- #define EIGEN_VECTORIZE_SSE
- #define EIGEN_VECTORIZE_SSE2
-
- // Detect sse3/ssse3/sse4:
- // gcc and icc defines __SSE3__, ...
- // there is no way to know about this on msvc. You can define EIGEN_VECTORIZE_SSE* if you
- // want to force the use of those instructions with msvc.
- #ifdef __SSE3__
- #define EIGEN_VECTORIZE_SSE3
- #endif
- #ifdef __SSSE3__
- #define EIGEN_VECTORIZE_SSSE3
- #endif
- #ifdef __SSE4_1__
- #define EIGEN_VECTORIZE_SSE4_1
- #endif
- #ifdef __SSE4_2__
- #define EIGEN_VECTORIZE_SSE4_2
- #endif
- #ifdef __AVX__
- #define EIGEN_VECTORIZE_AVX
- #define EIGEN_VECTORIZE_SSE3
- #define EIGEN_VECTORIZE_SSSE3
- #define EIGEN_VECTORIZE_SSE4_1
- #define EIGEN_VECTORIZE_SSE4_2
- #endif
- #ifdef __AVX2__
- #define EIGEN_VECTORIZE_AVX2
- #define EIGEN_VECTORIZE_AVX
- #define EIGEN_VECTORIZE_SSE3
- #define EIGEN_VECTORIZE_SSSE3
- #define EIGEN_VECTORIZE_SSE4_1
- #define EIGEN_VECTORIZE_SSE4_2
- #endif
- #ifdef __FMA__
- #define EIGEN_VECTORIZE_FMA
- #endif
- #if defined(__AVX512F__)
- #define EIGEN_VECTORIZE_AVX512
- #define EIGEN_VECTORIZE_AVX2
- #define EIGEN_VECTORIZE_AVX
- #define EIGEN_VECTORIZE_FMA
- #define EIGEN_VECTORIZE_SSE3
- #define EIGEN_VECTORIZE_SSSE3
- #define EIGEN_VECTORIZE_SSE4_1
- #define EIGEN_VECTORIZE_SSE4_2
- #ifdef __AVX512DQ__
- #define EIGEN_VECTORIZE_AVX512DQ
- #endif
- #endif
-
- // include files
-
- // This extern "C" works around a MINGW-w64 compilation issue
- // https://sourceforge.net/tracker/index.php?func=detail&aid=3018394&group_id=202880&atid=983354
- // In essence, intrin.h is included by windows.h and also declares intrinsics (just as emmintrin.h etc. below do).
- // However, intrin.h uses an extern "C" declaration, and g++ thus complains of duplicate declarations
- // with conflicting linkage. The linkage for intrinsics doesn't matter, but at that stage the compiler doesn't know;
- // so, to avoid compile errors when windows.h is included after Eigen/Core, ensure intrinsics are extern "C" here too.
- // notice that since these are C headers, the extern "C" is theoretically needed anyways.
- extern "C" {
- // In theory we should only include immintrin.h and not the other *mmintrin.h header files directly.
- // Doing so triggers some issues with ICC. However old gcc versions seems to not have this file, thus:
- #if EIGEN_COMP_ICC >= 1110
- #include <immintrin.h>
- #else
- #include <mmintrin.h>
- #include <emmintrin.h>
- #include <xmmintrin.h>
- #ifdef EIGEN_VECTORIZE_SSE3
- #include <pmmintrin.h>
- #endif
- #ifdef EIGEN_VECTORIZE_SSSE3
- #include <tmmintrin.h>
- #endif
- #ifdef EIGEN_VECTORIZE_SSE4_1
- #include <smmintrin.h>
- #endif
- #ifdef EIGEN_VECTORIZE_SSE4_2
- #include <nmmintrin.h>
- #endif
- #if defined(EIGEN_VECTORIZE_AVX) || defined(EIGEN_VECTORIZE_AVX512)
- #include <immintrin.h>
- #endif
- #endif
- } // end extern "C"
- #elif defined __VSX__
- #define EIGEN_VECTORIZE
- #define EIGEN_VECTORIZE_VSX
- #include <altivec.h>
- // We need to #undef all these ugly tokens defined in <altivec.h>
- // => use __vector instead of vector
- #undef bool
- #undef vector
- #undef pixel
- #elif defined __ALTIVEC__
- #define EIGEN_VECTORIZE
- #define EIGEN_VECTORIZE_ALTIVEC
- #include <altivec.h>
- // We need to #undef all these ugly tokens defined in <altivec.h>
- // => use __vector instead of vector
- #undef bool
- #undef vector
- #undef pixel
- #elif (defined __ARM_NEON) || (defined __ARM_NEON__)
- #define EIGEN_VECTORIZE
- #define EIGEN_VECTORIZE_NEON
- #include <arm_neon.h>
- #elif (defined __s390x__ && defined __VEC__)
- #define EIGEN_VECTORIZE
- #define EIGEN_VECTORIZE_ZVECTOR
- #include <vecintrin.h>
- #endif
-#endif
-#if defined(__F16C__) && !defined(EIGEN_COMP_CLANG)
- // We can use the optimized fp16 to float and float to fp16 conversion routines
- #define EIGEN_HAS_FP16_C
+#if defined(EIGEN_HAS_CUDA_FP16) || defined(EIGEN_HAS_HIP_FP16)
+ #define EIGEN_HAS_GPU_FP16
#endif
-#if defined EIGEN_CUDACC
- #define EIGEN_VECTORIZE_CUDA
- #include <vector_types.h>
- #if EIGEN_CUDACC_VER >= 70500
- #define EIGEN_HAS_CUDA_FP16
- #endif
-#endif
-
-#if defined EIGEN_HAS_CUDA_FP16
- #include <host_defines.h>
- #include <cuda_fp16.h>
+#if defined(EIGEN_HAS_CUDA_BF16) || defined(EIGEN_HAS_HIP_BF16)
+ #define EIGEN_HAS_GPU_BF16
#endif
#if (defined _OPENMP) && (!defined EIGEN_DONT_PARALLELIZE)
@@ -288,7 +83,10 @@
#include <cmath>
#include <cassert>
#include <functional>
-#include <iosfwd>
+#include <sstream>
+#ifndef EIGEN_NO_IO
+ #include <iosfwd>
+#endif
#include <cstring>
#include <string>
#include <limits>
@@ -296,6 +94,10 @@
// for min/max:
#include <algorithm>
+#if EIGEN_HAS_CXX11
+#include <array>
+#endif
+
// for std::is_nothrow_move_assignable
#ifdef EIGEN_INCLUDE_TYPE_TRAITS
#include <type_traits>
@@ -307,51 +109,30 @@
#endif
// required for __cpuid, needs to be included after cmath
-#if EIGEN_COMP_MSVC && EIGEN_ARCH_i386_OR_x86_64 && !EIGEN_OS_WINCE
+// also required for _BitScanReverse on Windows on ARM
+#if EIGEN_COMP_MSVC && (EIGEN_ARCH_i386_OR_x86_64 || EIGEN_ARCH_ARM64) && !EIGEN_OS_WINCE
#include <intrin.h>
#endif
-#if defined(__SYCL_DEVICE_ONLY__)
+#if defined(EIGEN_USE_SYCL)
#undef min
#undef max
#undef isnan
#undef isinf
#undef isfinite
- #include <SYCL/sycl.hpp>
-#endif
-
-/** \brief Namespace containing all symbols from the %Eigen library. */
-namespace Eigen {
-
-inline static const char *SimdInstructionSetsInUse(void) {
-#if defined(EIGEN_VECTORIZE_AVX512)
- return "AVX512, FMA, AVX2, AVX, SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2";
-#elif defined(EIGEN_VECTORIZE_AVX)
- return "AVX SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2";
-#elif defined(EIGEN_VECTORIZE_SSE4_2)
- return "SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2";
-#elif defined(EIGEN_VECTORIZE_SSE4_1)
- return "SSE, SSE2, SSE3, SSSE3, SSE4.1";
-#elif defined(EIGEN_VECTORIZE_SSSE3)
- return "SSE, SSE2, SSE3, SSSE3";
-#elif defined(EIGEN_VECTORIZE_SSE3)
- return "SSE, SSE2, SSE3";
-#elif defined(EIGEN_VECTORIZE_SSE2)
- return "SSE, SSE2";
-#elif defined(EIGEN_VECTORIZE_ALTIVEC)
- return "AltiVec";
-#elif defined(EIGEN_VECTORIZE_VSX)
- return "VSX";
-#elif defined(EIGEN_VECTORIZE_NEON)
- return "ARM NEON";
-#elif defined(EIGEN_VECTORIZE_ZVECTOR)
- return "S390X ZVECTOR";
-#else
- return "None";
+ #include <CL/sycl.hpp>
+ #include <map>
+ #include <memory>
+ #include <utility>
+ #include <thread>
+ #ifndef EIGEN_SYCL_LOCAL_THREAD_DIM0
+ #define EIGEN_SYCL_LOCAL_THREAD_DIM0 16
+ #endif
+ #ifndef EIGEN_SYCL_LOCAL_THREAD_DIM1
+ #define EIGEN_SYCL_LOCAL_THREAD_DIM1 16
+ #endif
#endif
-}
-} // end namespace Eigen
#if defined EIGEN2_SUPPORT_STAGE40_FULL_EIGEN3_STRICTNESS || defined EIGEN2_SUPPORT_STAGE30_FULL_EIGEN3_API || defined EIGEN2_SUPPORT_STAGE20_RESOLVE_API_CONFLICTS || defined EIGEN2_SUPPORT_STAGE10_FULL_EIGEN2_API || defined EIGEN2_SUPPORT
// This will generate an error message:
@@ -360,7 +141,7 @@ inline static const char *SimdInstructionSetsInUse(void) {
namespace Eigen {
-// we use size_t frequently and we'll never remember to prepend it with std:: everytime just to
+// we use size_t frequently and we'll never remember to prepend it with std:: every time just to
// ensure QNX/QCC support
using std::size_t;
// gcc 4.6.0 wants std:: for ptrdiff_t
@@ -387,59 +168,87 @@ using std::ptrdiff_t;
#include "src/Core/util/IntegralConstant.h"
#include "src/Core/util/SymbolicIndex.h"
-
#include "src/Core/NumTraits.h"
#include "src/Core/MathFunctions.h"
#include "src/Core/GenericPacketMath.h"
#include "src/Core/MathFunctionsImpl.h"
#include "src/Core/arch/Default/ConjHelper.h"
+// Generic half float support
+#include "src/Core/arch/Default/Half.h"
+#include "src/Core/arch/Default/BFloat16.h"
+#include "src/Core/arch/Default/TypeCasting.h"
+#include "src/Core/arch/Default/GenericPacketMathFunctionsFwd.h"
#if defined EIGEN_VECTORIZE_AVX512
#include "src/Core/arch/SSE/PacketMath.h"
+ #include "src/Core/arch/SSE/TypeCasting.h"
+ #include "src/Core/arch/SSE/Complex.h"
#include "src/Core/arch/AVX/PacketMath.h"
+ #include "src/Core/arch/AVX/TypeCasting.h"
+ #include "src/Core/arch/AVX/Complex.h"
#include "src/Core/arch/AVX512/PacketMath.h"
+ #include "src/Core/arch/AVX512/TypeCasting.h"
+ #include "src/Core/arch/AVX512/Complex.h"
#include "src/Core/arch/SSE/MathFunctions.h"
#include "src/Core/arch/AVX/MathFunctions.h"
#include "src/Core/arch/AVX512/MathFunctions.h"
#elif defined EIGEN_VECTORIZE_AVX
// Use AVX for floats and doubles, SSE for integers
#include "src/Core/arch/SSE/PacketMath.h"
+ #include "src/Core/arch/SSE/TypeCasting.h"
#include "src/Core/arch/SSE/Complex.h"
- #include "src/Core/arch/SSE/MathFunctions.h"
#include "src/Core/arch/AVX/PacketMath.h"
- #include "src/Core/arch/AVX/MathFunctions.h"
- #include "src/Core/arch/AVX/Complex.h"
#include "src/Core/arch/AVX/TypeCasting.h"
+ #include "src/Core/arch/AVX/Complex.h"
+ #include "src/Core/arch/SSE/MathFunctions.h"
+ #include "src/Core/arch/AVX/MathFunctions.h"
#elif defined EIGEN_VECTORIZE_SSE
#include "src/Core/arch/SSE/PacketMath.h"
+ #include "src/Core/arch/SSE/TypeCasting.h"
#include "src/Core/arch/SSE/MathFunctions.h"
#include "src/Core/arch/SSE/Complex.h"
- #include "src/Core/arch/SSE/TypeCasting.h"
#elif defined(EIGEN_VECTORIZE_ALTIVEC) || defined(EIGEN_VECTORIZE_VSX)
#include "src/Core/arch/AltiVec/PacketMath.h"
#include "src/Core/arch/AltiVec/MathFunctions.h"
#include "src/Core/arch/AltiVec/Complex.h"
#elif defined EIGEN_VECTORIZE_NEON
#include "src/Core/arch/NEON/PacketMath.h"
+ #include "src/Core/arch/NEON/TypeCasting.h"
#include "src/Core/arch/NEON/MathFunctions.h"
#include "src/Core/arch/NEON/Complex.h"
+#elif defined EIGEN_VECTORIZE_SVE
+ #include "src/Core/arch/SVE/PacketMath.h"
+ #include "src/Core/arch/SVE/TypeCasting.h"
+ #include "src/Core/arch/SVE/MathFunctions.h"
#elif defined EIGEN_VECTORIZE_ZVECTOR
#include "src/Core/arch/ZVector/PacketMath.h"
#include "src/Core/arch/ZVector/MathFunctions.h"
#include "src/Core/arch/ZVector/Complex.h"
+#elif defined EIGEN_VECTORIZE_MSA
+ #include "src/Core/arch/MSA/PacketMath.h"
+ #include "src/Core/arch/MSA/MathFunctions.h"
+ #include "src/Core/arch/MSA/Complex.h"
#endif
-// Half float support
-#include "src/Core/arch/CUDA/Half.h"
-#include "src/Core/arch/CUDA/PacketMathHalf.h"
-#include "src/Core/arch/CUDA/TypeCasting.h"
+#if defined EIGEN_VECTORIZE_GPU
+ #include "src/Core/arch/GPU/PacketMath.h"
+ #include "src/Core/arch/GPU/MathFunctions.h"
+ #include "src/Core/arch/GPU/TypeCasting.h"
+#endif
-#if defined EIGEN_VECTORIZE_CUDA
- #include "src/Core/arch/CUDA/PacketMath.h"
- #include "src/Core/arch/CUDA/MathFunctions.h"
+#if defined(EIGEN_USE_SYCL)
+ #include "src/Core/arch/SYCL/SyclMemoryModel.h"
+ #include "src/Core/arch/SYCL/InteropHeaders.h"
+#if !defined(EIGEN_DONT_VECTORIZE_SYCL)
+ #include "src/Core/arch/SYCL/PacketMath.h"
+ #include "src/Core/arch/SYCL/MathFunctions.h"
+ #include "src/Core/arch/SYCL/TypeCasting.h"
+#endif
#endif
#include "src/Core/arch/Default/Settings.h"
+// This file provides generic implementations valid for scalar as well
+#include "src/Core/arch/Default/GenericPacketMathFunctions.h"
#include "src/Core/functors/TernaryFunctors.h"
#include "src/Core/functors/BinaryFunctors.h"
@@ -450,11 +259,16 @@ using std::ptrdiff_t;
// Specialized functors to enable the processing of complex numbers
// on CUDA devices
+#ifdef EIGEN_CUDACC
#include "src/Core/arch/CUDA/Complex.h"
+#endif
#include "src/Core/util/IndexedViewHelper.h"
+#include "src/Core/util/ReshapedHelper.h"
#include "src/Core/ArithmeticSequence.h"
-#include "src/Core/IO.h"
+#ifndef EIGEN_NO_IO
+ #include "src/Core/IO.h"
+#endif
#include "src/Core/DenseCoeffsBase.h"
#include "src/Core/DenseBase.h"
#include "src/Core/MatrixBase.h"
@@ -496,6 +310,7 @@ using std::ptrdiff_t;
#include "src/Core/Block.h"
#include "src/Core/VectorBlock.h"
#include "src/Core/IndexedView.h"
+#include "src/Core/Reshaped.h"
#include "src/Core/Transpose.h"
#include "src/Core/DiagonalMatrix.h"
#include "src/Core/Diagonal.h"
@@ -532,13 +347,21 @@ using std::ptrdiff_t;
#include "src/Core/CoreIterators.h"
#include "src/Core/ConditionEstimator.h"
+#if defined(EIGEN_VECTORIZE_ALTIVEC) || defined(EIGEN_VECTORIZE_VSX)
+ #include "src/Core/arch/AltiVec/MatrixProduct.h"
+#elif defined EIGEN_VECTORIZE_NEON
+ #include "src/Core/arch/NEON/GeneralBlockPanelKernel.h"
+#endif
+
#include "src/Core/BooleanRedux.h"
#include "src/Core/Select.h"
#include "src/Core/VectorwiseOp.h"
+#include "src/Core/PartialReduxEvaluator.h"
#include "src/Core/Random.h"
#include "src/Core/Replicate.h"
#include "src/Core/Reverse.h"
#include "src/Core/ArrayWrapper.h"
+#include "src/Core/StlIterators.h"
#ifdef EIGEN_USE_BLAS
#include "src/Core/products/GeneralMatrixMatrix_BLAS.h"
diff --git a/examples/ThirdPartyLibs/Eigen/Eigenvalues b/examples/ThirdPartyLibs/Eigen/Eigenvalues
index f3f661b07..5467a2e7b 100644
--- a/examples/ThirdPartyLibs/Eigen/Eigenvalues
+++ b/examples/ThirdPartyLibs/Eigen/Eigenvalues
@@ -10,14 +10,14 @@
#include "Core"
-#include "src/Core/util/DisableStupidWarnings.h"
-
#include "Cholesky"
#include "Jacobi"
#include "Householder"
#include "LU"
#include "Geometry"
+#include "src/Core/util/DisableStupidWarnings.h"
+
/** \defgroup Eigenvalues_Module Eigenvalues module
*
*
@@ -58,4 +58,3 @@
#include "src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_EIGENVALUES_MODULE_H
-/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/examples/ThirdPartyLibs/Eigen/Geometry b/examples/ThirdPartyLibs/Eigen/Geometry
index 131a4edfc..bc78110a8 100644
--- a/examples/ThirdPartyLibs/Eigen/Geometry
+++ b/examples/ThirdPartyLibs/Eigen/Geometry
@@ -10,12 +10,12 @@
#include "Core"
-#include "src/Core/util/DisableStupidWarnings.h"
-
#include "SVD"
#include "LU"
#include <limits>
+#include "src/Core/util/DisableStupidWarnings.h"
+
/** \defgroup Geometry_Module Geometry module
*
* This module provides support for:
@@ -49,13 +49,11 @@
#include "src/Geometry/AlignedBox.h"
#include "src/Geometry/Umeyama.h"
-// Use the SSE optimized version whenever possible. At the moment the
-// SSE version doesn't compile when AVX is enabled
-#if defined EIGEN_VECTORIZE_SSE && !defined EIGEN_VECTORIZE_AVX
-#include "src/Geometry/arch/Geometry_SSE.h"
+// Use the SSE optimized version whenever possible.
+#if (defined EIGEN_VECTORIZE_SSE) || (defined EIGEN_VECTORIZE_NEON)
+#include "src/Geometry/arch/Geometry_SIMD.h"
#endif
#include "src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_GEOMETRY_MODULE_H
-/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/examples/ThirdPartyLibs/Eigen/Householder b/examples/ThirdPartyLibs/Eigen/Householder
index 89cd81b1a..f2fa79969 100644
--- a/examples/ThirdPartyLibs/Eigen/Householder
+++ b/examples/ThirdPartyLibs/Eigen/Householder
@@ -27,4 +27,3 @@
#include "src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_HOUSEHOLDER_MODULE_H
-/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/examples/ThirdPartyLibs/Eigen/Jacobi b/examples/ThirdPartyLibs/Eigen/Jacobi
index 17c1d785a..43edc7a19 100644
--- a/examples/ThirdPartyLibs/Eigen/Jacobi
+++ b/examples/ThirdPartyLibs/Eigen/Jacobi
@@ -29,5 +29,4 @@
#include "src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_JACOBI_MODULE_H
-/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/examples/ThirdPartyLibs/Eigen/KLUSupport b/examples/ThirdPartyLibs/Eigen/KLUSupport
new file mode 100644
index 000000000..b23d90535
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/KLUSupport
@@ -0,0 +1,41 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_KLUSUPPORT_MODULE_H
+#define EIGEN_KLUSUPPORT_MODULE_H
+
+#include <Eigen/SparseCore>
+
+#include <Eigen/src/Core/util/DisableStupidWarnings.h>
+
+extern "C" {
+#include <btf.h>
+#include <klu.h>
+ }
+
+/** \ingroup Support_modules
+ * \defgroup KLUSupport_Module KLUSupport module
+ *
+ * This module provides an interface to the KLU library which is part of the <a href="http://www.suitesparse.com">suitesparse</a> package.
+ * It provides the following factorization class:
+ * - class KLU: a sparse LU factorization, well-suited for circuit simulation.
+ *
+ * \code
+ * #include <Eigen/KLUSupport>
+ * \endcode
+ *
+ * In order to use this module, the klu and btf headers must be accessible from the include paths, and your binary must be linked to the klu library and its dependencies.
+ * The dependencies depend on how umfpack has been compiled.
+ * For a cmake based project, you can use our FindKLU.cmake module to help you in this task.
+ *
+ */
+
+#include "src/KLUSupport/KLUSupport.h"
+
+#include <Eigen/src/Core/util/ReenableStupidWarnings.h>
+
+#endif // EIGEN_KLUSUPPORT_MODULE_H
diff --git a/examples/ThirdPartyLibs/Eigen/LICENSE.txt b/examples/ThirdPartyLibs/Eigen/LICENSE.txt
deleted file mode 100644
index c6b8841d9..000000000
--- a/examples/ThirdPartyLibs/Eigen/LICENSE.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
diff --git a/examples/ThirdPartyLibs/Eigen/LU b/examples/ThirdPartyLibs/Eigen/LU
index 6418a86e1..1236ceb04 100644
--- a/examples/ThirdPartyLibs/Eigen/LU
+++ b/examples/ThirdPartyLibs/Eigen/LU
@@ -38,13 +38,10 @@
#include "src/LU/Determinant.h"
#include "src/LU/InverseImpl.h"
-// Use the SSE optimized version whenever possible. At the moment the
-// SSE version doesn't compile when AVX is enabled
-#if defined EIGEN_VECTORIZE_SSE && !defined EIGEN_VECTORIZE_AVX
- #include "src/LU/arch/Inverse_SSE.h"
+#if defined EIGEN_VECTORIZE_SSE || defined EIGEN_VECTORIZE_NEON
+ #include "src/LU/arch/InverseSize4.h"
#endif
#include "src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_LU_MODULE_H
-/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/examples/ThirdPartyLibs/Eigen/OrderingMethods b/examples/ThirdPartyLibs/Eigen/OrderingMethods
index d8ea36193..29691a62b 100644
--- a/examples/ThirdPartyLibs/Eigen/OrderingMethods
+++ b/examples/ThirdPartyLibs/Eigen/OrderingMethods
@@ -63,10 +63,7 @@
* \endcode
*/
-#ifndef EIGEN_MPL2_ONLY
#include "src/OrderingMethods/Amd.h"
-#endif
-
#include "src/OrderingMethods/Ordering.h"
#include "src/Core/util/ReenableStupidWarnings.h"
diff --git a/examples/ThirdPartyLibs/Eigen/PaStiXSupport b/examples/ThirdPartyLibs/Eigen/PaStiXSupport
index de3a63b4d..234619acc 100644
--- a/examples/ThirdPartyLibs/Eigen/PaStiXSupport
+++ b/examples/ThirdPartyLibs/Eigen/PaStiXSupport
@@ -36,6 +36,7 @@ extern "C" {
* \endcode
*
* In order to use this module, the PaSTiX headers must be accessible from the include paths, and your binary must be linked to the PaSTiX library and its dependencies.
+ * This wrapper resuires PaStiX version 5.x compiled without MPI support.
* The dependencies depend on how PaSTiX has been compiled.
* For a cmake based project, you can use our FindPaSTiX.cmake module to help you in this task.
*
diff --git a/examples/ThirdPartyLibs/Eigen/QR b/examples/ThirdPartyLibs/Eigen/QR
index c7e914469..8465b62ce 100644
--- a/examples/ThirdPartyLibs/Eigen/QR
+++ b/examples/ThirdPartyLibs/Eigen/QR
@@ -10,12 +10,12 @@
#include "Core"
-#include "src/Core/util/DisableStupidWarnings.h"
-
#include "Cholesky"
#include "Jacobi"
#include "Householder"
+#include "src/Core/util/DisableStupidWarnings.h"
+
/** \defgroup QR_Module QR module
*
*
@@ -48,4 +48,3 @@
#include "src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_QR_MODULE_H
-/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/examples/ThirdPartyLibs/Eigen/QtAlignedMalloc b/examples/ThirdPartyLibs/Eigen/QtAlignedMalloc
index 4f07df02a..6fe82374a 100644
--- a/examples/ThirdPartyLibs/Eigen/QtAlignedMalloc
+++ b/examples/ThirdPartyLibs/Eigen/QtAlignedMalloc
@@ -37,4 +37,3 @@ void *qRealloc(void *ptr, std::size_t size)
#endif
#endif // EIGEN_QTMALLOC_MODULE_H
-/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/examples/ThirdPartyLibs/Eigen/SVD b/examples/ThirdPartyLibs/Eigen/SVD
index 5d0e75f7f..345179496 100644
--- a/examples/ThirdPartyLibs/Eigen/SVD
+++ b/examples/ThirdPartyLibs/Eigen/SVD
@@ -48,4 +48,3 @@
#include "src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_SVD_MODULE_H
-/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/examples/ThirdPartyLibs/Eigen/Sparse b/examples/ThirdPartyLibs/Eigen/Sparse
index 136e681a1..a2ef7a665 100644
--- a/examples/ThirdPartyLibs/Eigen/Sparse
+++ b/examples/ThirdPartyLibs/Eigen/Sparse
@@ -25,9 +25,7 @@
#include "SparseCore"
#include "OrderingMethods"
-#ifndef EIGEN_MPL2_ONLY
#include "SparseCholesky"
-#endif
#include "SparseLU"
#include "SparseQR"
#include "IterativeLinearSolvers"
diff --git a/examples/ThirdPartyLibs/Eigen/SparseCholesky b/examples/ThirdPartyLibs/Eigen/SparseCholesky
index b6a320c40..d2b1f1276 100644
--- a/examples/ThirdPartyLibs/Eigen/SparseCholesky
+++ b/examples/ThirdPartyLibs/Eigen/SparseCholesky
@@ -30,16 +30,8 @@
* \endcode
*/
-#ifdef EIGEN_MPL2_ONLY
-#error The SparseCholesky module has nothing to offer in MPL2 only mode
-#endif
-
#include "src/SparseCholesky/SimplicialCholesky.h"
-
-#ifndef EIGEN_MPL2_ONLY
#include "src/SparseCholesky/SimplicialCholesky_impl.h"
-#endif
-
#include "src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_SPARSECHOLESKY_MODULE_H
diff --git a/examples/ThirdPartyLibs/Eigen/SparseLU b/examples/ThirdPartyLibs/Eigen/SparseLU
index 38b38b531..37c4a5c5a 100644
--- a/examples/ThirdPartyLibs/Eigen/SparseLU
+++ b/examples/ThirdPartyLibs/Eigen/SparseLU
@@ -23,6 +23,8 @@
// Ordering interface
#include "OrderingMethods"
+#include "src/Core/util/DisableStupidWarnings.h"
+
#include "src/SparseLU/SparseLU_gemm_kernel.h"
#include "src/SparseLU/SparseLU_Structs.h"
@@ -43,4 +45,6 @@
#include "src/SparseLU/SparseLU_Utils.h"
#include "src/SparseLU/SparseLU.h"
+#include "src/Core/util/ReenableStupidWarnings.h"
+
#endif // EIGEN_SPARSELU_MODULE_H
diff --git a/examples/ThirdPartyLibs/Eigen/SparseQR b/examples/ThirdPartyLibs/Eigen/SparseQR
index a6f3b7f7d..f5fc5fa7f 100644
--- a/examples/ThirdPartyLibs/Eigen/SparseQR
+++ b/examples/ThirdPartyLibs/Eigen/SparseQR
@@ -28,7 +28,6 @@
*
*/
-#include "OrderingMethods"
#include "src/SparseCore/SparseColEtree.h"
#include "src/SparseQR/SparseQR.h"
diff --git a/examples/ThirdPartyLibs/Eigen/src/Cholesky/LDLT.h b/examples/ThirdPartyLibs/Eigen/src/Cholesky/LDLT.h
index 968427b3a..1013ca045 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Cholesky/LDLT.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Cholesky/LDLT.h
@@ -16,6 +16,15 @@
namespace Eigen {
namespace internal {
+ template<typename _MatrixType, int _UpLo> struct traits<LDLT<_MatrixType, _UpLo> >
+ : traits<_MatrixType>
+ {
+ typedef MatrixXpr XprKind;
+ typedef SolverStorage StorageKind;
+ typedef int StorageIndex;
+ enum { Flags = 0 };
+ };
+
template<typename MatrixType, int UpLo> struct LDLT_Traits;
// PositiveSemiDef means positive semi-definite and non-zero; same for NegativeSemiDef
@@ -36,7 +45,7 @@ namespace internal {
* matrix \f$ A \f$ such that \f$ A = P^TLDL^*P \f$, where P is a permutation matrix, L
* is lower triangular with a unit diagonal and D is a diagonal matrix.
*
- * The decomposition uses pivoting to ensure stability, so that L will have
+ * The decomposition uses pivoting to ensure stability, so that D will have
* zeros in the bottom right rank(A) - n submatrix. Avoiding the square root
* on D also stabilizes the computation.
*
@@ -44,24 +53,23 @@ namespace internal {
* decomposition to determine whether a system of equations has a solution.
*
* This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism.
- *
+ *
* \sa MatrixBase::ldlt(), SelfAdjointView::ldlt(), class LLT
*/
template<typename _MatrixType, int _UpLo> class LDLT
+ : public SolverBase<LDLT<_MatrixType, _UpLo> >
{
public:
typedef _MatrixType MatrixType;
+ typedef SolverBase<LDLT> Base;
+ friend class SolverBase<LDLT>;
+
+ EIGEN_GENERIC_PUBLIC_INTERFACE(LDLT)
enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
UpLo = _UpLo
};
- typedef typename MatrixType::Scalar Scalar;
- typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
- typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
- typedef typename MatrixType::StorageIndex StorageIndex;
typedef Matrix<Scalar, RowsAtCompileTime, 1, 0, MaxRowsAtCompileTime, 1> TmpMatrixType;
typedef Transpositions<RowsAtCompileTime, MaxRowsAtCompileTime> TranspositionType;
@@ -180,6 +188,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
return m_sign == internal::NegativeSemiDef || m_sign == internal::ZeroSign;
}
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
/** \returns a solution x of \f$ A x = b \f$ using the current decomposition of A.
*
* This function also supports in-place solves using the syntax <tt>x = decompositionObject.solve(x)</tt> .
@@ -191,19 +200,14 @@ template<typename _MatrixType, int _UpLo> class LDLT
* \f$ L^* y_4 = y_3 \f$ and \f$ P x = y_4 \f$ in succession. If the matrix \f$ A \f$ is singular, then
* \f$ D \f$ will also be singular (all the other matrices are invertible). In that case, the
* least-square solution of \f$ D y_3 = y_2 \f$ is computed. This does not mean that this function
- * computes the least-square solution of \f$ A x = b \f$ is \f$ A \f$ is singular.
+ * computes the least-square solution of \f$ A x = b \f$ if \f$ A \f$ is singular.
*
* \sa MatrixBase::ldlt(), SelfAdjointView::ldlt()
*/
template<typename Rhs>
inline const Solve<LDLT, Rhs>
- solve(const MatrixBase<Rhs>& b) const
- {
- eigen_assert(m_isInitialized && "LDLT is not initialized.");
- eigen_assert(m_matrix.rows()==b.rows()
- && "LDLT::solve(): invalid number of rows of the right hand side matrix b");
- return Solve<LDLT, Rhs>(*this, b.derived());
- }
+ solve(const MatrixBase<Rhs>& b) const;
+ #endif
template<typename Derived>
bool solveInPlace(MatrixBase<Derived> &bAndX) const;
@@ -242,12 +246,12 @@ template<typename _MatrixType, int _UpLo> class LDLT
*/
const LDLT& adjoint() const { return *this; };
- inline Index rows() const { return m_matrix.rows(); }
- inline Index cols() const { return m_matrix.cols(); }
+ EIGEN_DEVICE_FUNC inline EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_matrix.rows(); }
+ EIGEN_DEVICE_FUNC inline EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_matrix.cols(); }
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the factorization failed because of a zero pivot.
*/
ComputationInfo info() const
@@ -259,6 +263,9 @@ template<typename _MatrixType, int _UpLo> class LDLT
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
void _solve_impl(const RhsType &rhs, DstType &dst) const;
+
+ template<bool Conjugate, typename RhsType, typename DstType>
+ void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const;
#endif
protected:
@@ -304,7 +311,8 @@ template<> struct ldlt_inplace<Lower>
if (size <= 1)
{
transpositions.setIdentity();
- if (numext::real(mat.coeff(0,0)) > static_cast<RealScalar>(0) ) sign = PositiveSemiDef;
+ if(size==0) sign = ZeroSign;
+ else if (numext::real(mat.coeff(0,0)) > static_cast<RealScalar>(0) ) sign = PositiveSemiDef;
else if (numext::real(mat.coeff(0,0)) < static_cast<RealScalar>(0)) sign = NegativeSemiDef;
else sign = ZeroSign;
return true;
@@ -375,6 +383,8 @@ template<> struct ldlt_inplace<Lower>
if((rs>0) && pivot_is_valid)
A21 /= realAkk;
+ else if(rs>0)
+ ret = ret && (A21.array()==Scalar(0)).all();
if(found_zero_pivot && pivot_is_valid) ret = false; // factorization failed
else if(!pivot_is_valid) found_zero_pivot = true;
@@ -556,25 +566,33 @@ template<typename _MatrixType, int _UpLo>
template<typename RhsType, typename DstType>
void LDLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) const
{
- eigen_assert(rhs.rows() == rows());
+ _solve_impl_transposed<true>(rhs, dst);
+}
+
+template<typename _MatrixType,int _UpLo>
+template<bool Conjugate, typename RhsType, typename DstType>
+void LDLT<_MatrixType,_UpLo>::_solve_impl_transposed(const RhsType &rhs, DstType &dst) const
+{
// dst = P b
dst = m_transpositions * rhs;
// dst = L^-1 (P b)
- matrixL().solveInPlace(dst);
+ // dst = L^-*T (P b)
+ matrixL().template conjugateIf<!Conjugate>().solveInPlace(dst);
- // dst = D^-1 (L^-1 P b)
+ // dst = D^-* (L^-1 P b)
+ // dst = D^-1 (L^-*T P b)
// more precisely, use pseudo-inverse of D (see bug 241)
using std::abs;
const typename Diagonal<const MatrixType>::RealReturnType vecD(vectorD());
- // In some previous versions, tolerance was set to the max of 1/highest and the maximal diagonal entry * epsilon
- // as motivated by LAPACK's xGELSS:
+ // In some previous versions, tolerance was set to the max of 1/highest (or rather numeric_limits::min())
+ // and the maximal diagonal entry * epsilon as motivated by LAPACK's xGELSS:
// RealScalar tolerance = numext::maxi(vecD.array().abs().maxCoeff() * NumTraits<RealScalar>::epsilon(),RealScalar(1) / NumTraits<RealScalar>::highest());
// However, LDLT is not rank revealing, and so adjusting the tolerance wrt to the highest
// diagonal element is not well justified and leads to numerical issues in some cases.
// Moreover, Lapack's xSYTRS routines use 0 for the tolerance.
- RealScalar tolerance = RealScalar(1) / NumTraits<RealScalar>::highest();
-
+ // Using numeric_limits::min() gives us more robustness to denormals.
+ RealScalar tolerance = (std::numeric_limits<RealScalar>::min)();
for (Index i = 0; i < vecD.size(); ++i)
{
if(abs(vecD(i)) > tolerance)
@@ -583,10 +601,12 @@ void LDLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) cons
dst.row(i).setZero();
}
- // dst = L^-T (D^-1 L^-1 P b)
- matrixU().solveInPlace(dst);
+ // dst = L^-* (D^-* L^-1 P b)
+ // dst = L^-T (D^-1 L^-*T P b)
+ matrixL().transpose().template conjugateIf<Conjugate>().solveInPlace(dst);
- // dst = P^-1 (L^-T D^-1 L^-1 P b) = A^-1 b
+ // dst = P^T (L^-* D^-* L^-1 P b) = A^-1 b
+ // dst = P^-T (L^-T D^-1 L^-*T P b) = A^-1 b
dst = m_transpositions.transpose() * dst;
}
#endif
diff --git a/examples/ThirdPartyLibs/Eigen/src/Cholesky/LLT.h b/examples/ThirdPartyLibs/Eigen/src/Cholesky/LLT.h
index 814174d47..8c9b2b398 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Cholesky/LLT.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Cholesky/LLT.h
@@ -13,6 +13,16 @@
namespace Eigen {
namespace internal{
+
+template<typename _MatrixType, int _UpLo> struct traits<LLT<_MatrixType, _UpLo> >
+ : traits<_MatrixType>
+{
+ typedef MatrixXpr XprKind;
+ typedef SolverStorage StorageKind;
+ typedef int StorageIndex;
+ enum { Flags = 0 };
+};
+
template<typename MatrixType, int UpLo> struct LLT_Traits;
}
@@ -54,18 +64,17 @@ template<typename MatrixType, int UpLo> struct LLT_Traits;
* \sa MatrixBase::llt(), SelfAdjointView::llt(), class LDLT
*/
template<typename _MatrixType, int _UpLo> class LLT
+ : public SolverBase<LLT<_MatrixType, _UpLo> >
{
public:
typedef _MatrixType MatrixType;
+ typedef SolverBase<LLT> Base;
+ friend class SolverBase<LLT>;
+
+ EIGEN_GENERIC_PUBLIC_INTERFACE(LLT)
enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
- typedef typename MatrixType::Scalar Scalar;
- typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
- typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
- typedef typename MatrixType::StorageIndex StorageIndex;
enum {
PacketSize = internal::packet_traits<Scalar>::size,
@@ -100,7 +109,7 @@ template<typename _MatrixType, int _UpLo> class LLT
compute(matrix.derived());
}
- /** \brief Constructs a LDLT factorization from a given matrix
+ /** \brief Constructs a LLT factorization from a given matrix
*
* This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when
* \c MatrixType is a Eigen::Ref.
@@ -129,6 +138,7 @@ template<typename _MatrixType, int _UpLo> class LLT
return Traits::getL(m_matrix);
}
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
/** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
*
* Since this LLT class assumes anyway that the matrix A is invertible, the solution
@@ -141,13 +151,8 @@ template<typename _MatrixType, int _UpLo> class LLT
*/
template<typename Rhs>
inline const Solve<LLT, Rhs>
- solve(const MatrixBase<Rhs>& b) const
- {
- eigen_assert(m_isInitialized && "LLT is not initialized.");
- eigen_assert(m_matrix.rows()==b.rows()
- && "LLT::solve(): invalid number of rows of the right hand side matrix b");
- return Solve<LLT, Rhs>(*this, b.derived());
- }
+ solve(const MatrixBase<Rhs>& b) const;
+ #endif
template<typename Derived>
void solveInPlace(const MatrixBase<Derived> &bAndX) const;
@@ -180,7 +185,7 @@ template<typename _MatrixType, int _UpLo> class LLT
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the matrix.appears not to be positive definite.
*/
ComputationInfo info() const
@@ -194,17 +199,20 @@ template<typename _MatrixType, int _UpLo> class LLT
* This method is provided for compatibility with other matrix decompositions, thus enabling generic code such as:
* \code x = decomposition.adjoint().solve(b) \endcode
*/
- const LLT& adjoint() const { return *this; };
+ const LLT& adjoint() const EIGEN_NOEXCEPT { return *this; };
- inline Index rows() const { return m_matrix.rows(); }
- inline Index cols() const { return m_matrix.cols(); }
+ inline EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_matrix.rows(); }
+ inline EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_matrix.cols(); }
template<typename VectorType>
- LLT rankUpdate(const VectorType& vec, const RealScalar& sigma = 1);
+ LLT & rankUpdate(const VectorType& vec, const RealScalar& sigma = 1);
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
void _solve_impl(const RhsType &rhs, DstType &dst) const;
+
+ template<bool Conjugate, typename RhsType, typename DstType>
+ void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const;
#endif
protected:
@@ -458,7 +466,7 @@ LLT<MatrixType,_UpLo>& LLT<MatrixType,_UpLo>::compute(const EigenBase<InputType>
*/
template<typename _MatrixType, int _UpLo>
template<typename VectorType>
-LLT<_MatrixType,_UpLo> LLT<_MatrixType,_UpLo>::rankUpdate(const VectorType& v, const RealScalar& sigma)
+LLT<_MatrixType,_UpLo> & LLT<_MatrixType,_UpLo>::rankUpdate(const VectorType& v, const RealScalar& sigma)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorType);
eigen_assert(v.size()==m_matrix.cols());
@@ -476,8 +484,17 @@ template<typename _MatrixType,int _UpLo>
template<typename RhsType, typename DstType>
void LLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) const
{
- dst = rhs;
- solveInPlace(dst);
+ _solve_impl_transposed<true>(rhs, dst);
+}
+
+template<typename _MatrixType,int _UpLo>
+template<bool Conjugate, typename RhsType, typename DstType>
+void LLT<_MatrixType,_UpLo>::_solve_impl_transposed(const RhsType &rhs, DstType &dst) const
+{
+ dst = rhs;
+
+ matrixL().template conjugateIf<!Conjugate>().solveInPlace(dst);
+ matrixU().template conjugateIf<!Conjugate>().solveInPlace(dst);
}
#endif
diff --git a/examples/ThirdPartyLibs/Eigen/src/CholmodSupport/CholmodSupport.h b/examples/ThirdPartyLibs/Eigen/src/CholmodSupport/CholmodSupport.h
index dc199ece6..adaf52858 100644
--- a/examples/ThirdPartyLibs/Eigen/src/CholmodSupport/CholmodSupport.h
+++ b/examples/ThirdPartyLibs/Eigen/src/CholmodSupport/CholmodSupport.h
@@ -10,7 +10,7 @@
#ifndef EIGEN_CHOLMODSUPPORT_H
#define EIGEN_CHOLMODSUPPORT_H
-namespace Eigen {
+namespace Eigen {
namespace internal {
@@ -79,12 +79,12 @@ cholmod_sparse viewAsCholmod(Ref<SparseMatrix<_Scalar,_Options,_StorageIndex> >
res.dtype = 0;
res.stype = -1;
-
+
if (internal::is_same<_StorageIndex,int>::value)
{
res.itype = CHOLMOD_INT;
}
- else if (internal::is_same<_StorageIndex,long>::value)
+ else if (internal::is_same<_StorageIndex,SuiteSparse_long>::value)
{
res.itype = CHOLMOD_LONG;
}
@@ -95,9 +95,9 @@ cholmod_sparse viewAsCholmod(Ref<SparseMatrix<_Scalar,_Options,_StorageIndex> >
// setup res.xtype
internal::cholmod_configure_matrix<_Scalar>::run(res);
-
+
res.stype = 0;
-
+
return res;
}
@@ -121,7 +121,7 @@ template<typename _Scalar, int _Options, typename _Index, unsigned int UpLo>
cholmod_sparse viewAsCholmod(const SparseSelfAdjointView<const SparseMatrix<_Scalar,_Options,_Index>, UpLo>& mat)
{
cholmod_sparse res = viewAsCholmod(Ref<SparseMatrix<_Scalar,_Options,_Index> >(mat.matrix().const_cast_derived()));
-
+
if(UpLo==Upper) res.stype = 1;
if(UpLo==Lower) res.stype = -1;
// swap stype for rowmajor matrices (only works for real matrices)
@@ -168,11 +168,11 @@ namespace internal {
#define EIGEN_CHOLMOD_SPECIALIZE0(ret, name) \
template<typename _StorageIndex> inline ret cm_ ## name (cholmod_common &Common) { return cholmod_ ## name (&Common); } \
- template<> inline ret cm_ ## name<long> (cholmod_common &Common) { return cholmod_l_ ## name (&Common); }
+ template<> inline ret cm_ ## name<SuiteSparse_long> (cholmod_common &Common) { return cholmod_l_ ## name (&Common); }
#define EIGEN_CHOLMOD_SPECIALIZE1(ret, name, t1, a1) \
template<typename _StorageIndex> inline ret cm_ ## name (t1& a1, cholmod_common &Common) { return cholmod_ ## name (&a1, &Common); } \
- template<> inline ret cm_ ## name<long> (t1& a1, cholmod_common &Common) { return cholmod_l_ ## name (&a1, &Common); }
+ template<> inline ret cm_ ## name<SuiteSparse_long> (t1& a1, cholmod_common &Common) { return cholmod_l_ ## name (&a1, &Common); }
EIGEN_CHOLMOD_SPECIALIZE0(int, start)
EIGEN_CHOLMOD_SPECIALIZE0(int, finish)
@@ -184,15 +184,15 @@ EIGEN_CHOLMOD_SPECIALIZE1(int, free_sparse, cholmod_sparse*, A)
EIGEN_CHOLMOD_SPECIALIZE1(cholmod_factor*, analyze, cholmod_sparse, A)
template<typename _StorageIndex> inline cholmod_dense* cm_solve (int sys, cholmod_factor& L, cholmod_dense& B, cholmod_common &Common) { return cholmod_solve (sys, &L, &B, &Common); }
-template<> inline cholmod_dense* cm_solve<long> (int sys, cholmod_factor& L, cholmod_dense& B, cholmod_common &Common) { return cholmod_l_solve (sys, &L, &B, &Common); }
+template<> inline cholmod_dense* cm_solve<SuiteSparse_long> (int sys, cholmod_factor& L, cholmod_dense& B, cholmod_common &Common) { return cholmod_l_solve (sys, &L, &B, &Common); }
template<typename _StorageIndex> inline cholmod_sparse* cm_spsolve (int sys, cholmod_factor& L, cholmod_sparse& B, cholmod_common &Common) { return cholmod_spsolve (sys, &L, &B, &Common); }
-template<> inline cholmod_sparse* cm_spsolve<long> (int sys, cholmod_factor& L, cholmod_sparse& B, cholmod_common &Common) { return cholmod_l_spsolve (sys, &L, &B, &Common); }
+template<> inline cholmod_sparse* cm_spsolve<SuiteSparse_long> (int sys, cholmod_factor& L, cholmod_sparse& B, cholmod_common &Common) { return cholmod_l_spsolve (sys, &L, &B, &Common); }
template<typename _StorageIndex>
inline int cm_factorize_p (cholmod_sparse* A, double beta[2], _StorageIndex* fset, std::size_t fsize, cholmod_factor* L, cholmod_common &Common) { return cholmod_factorize_p (A, beta, fset, fsize, L, &Common); }
template<>
-inline int cm_factorize_p<long> (cholmod_sparse* A, double beta[2], long* fset, std::size_t fsize, cholmod_factor* L, cholmod_common &Common) { return cholmod_l_factorize_p (A, beta, fset, fsize, L, &Common); }
+inline int cm_factorize_p<SuiteSparse_long> (cholmod_sparse* A, double beta[2], SuiteSparse_long* fset, std::size_t fsize, cholmod_factor* L, cholmod_common &Common) { return cholmod_l_factorize_p (A, beta, fset, fsize, L, &Common); }
#undef EIGEN_CHOLMOD_SPECIALIZE0
#undef EIGEN_CHOLMOD_SPECIALIZE1
@@ -254,10 +254,10 @@ class CholmodBase : public SparseSolverBase<Derived>
internal::cm_free_factor<StorageIndex>(m_cholmodFactor, m_cholmod);
internal::cm_finish<StorageIndex>(m_cholmod);
}
-
+
inline StorageIndex cols() const { return internal::convert_index<StorageIndex, Index>(m_cholmodFactor->n); }
inline StorageIndex rows() const { return internal::convert_index<StorageIndex, Index>(m_cholmodFactor->n); }
-
+
/** \brief Reports whether previous computation was successful.
*
* \returns \c Success if computation was successful,
@@ -276,11 +276,11 @@ class CholmodBase : public SparseSolverBase<Derived>
factorize(matrix);
return derived();
}
-
+
/** Performs a symbolic decomposition on the sparsity pattern of \a matrix.
*
* This function is particularly useful when solving for several problems having the same structure.
- *
+ *
* \sa factorize()
*/
void analyzePattern(const MatrixType& matrix)
@@ -292,13 +292,13 @@ class CholmodBase : public SparseSolverBase<Derived>
}
cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView<UpLo>());
m_cholmodFactor = internal::cm_analyze<StorageIndex>(A, m_cholmod);
-
+
this->m_isInitialized = true;
this->m_info = Success;
m_analysisIsOk = true;
m_factorizationIsOk = false;
}
-
+
/** Performs a numeric decomposition of \a matrix
*
* The given matrix must have the same sparsity pattern as the matrix on which the symbolic decomposition has been performed.
@@ -315,11 +315,11 @@ class CholmodBase : public SparseSolverBase<Derived>
this->m_info = (m_cholmodFactor->minor == m_cholmodFactor->n ? Success : NumericalIssue);
m_factorizationIsOk = true;
}
-
+
/** Returns a reference to the Cholmod's configuration structure to get a full control over the performed operations.
* See the Cholmod user guide for details. */
cholmod_common& cholmod() { return m_cholmod; }
-
+
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** \internal */
template<typename Rhs,typename Dest>
@@ -329,7 +329,7 @@ class CholmodBase : public SparseSolverBase<Derived>
const Index size = m_cholmodFactor->n;
EIGEN_UNUSED_VARIABLE(size);
eigen_assert(size==b.rows());
-
+
// Cholmod needs column-major storage without inner-stride, which corresponds to the default behavior of Ref.
Ref<const Matrix<typename Rhs::Scalar,Dynamic,Dynamic,ColMajor> > b_ref(b.derived());
@@ -345,7 +345,7 @@ class CholmodBase : public SparseSolverBase<Derived>
dest = Matrix<Scalar,Dest::RowsAtCompileTime,Dest::ColsAtCompileTime>::Map(reinterpret_cast<Scalar*>(x_cd->x),b.rows(),b.cols());
internal::cm_free_dense<StorageIndex>(x_cd, m_cholmod);
}
-
+
/** \internal */
template<typename RhsDerived, typename DestDerived>
void _solve_impl(const SparseMatrixBase<RhsDerived> &b, SparseMatrixBase<DestDerived> &dest) const
@@ -370,8 +370,8 @@ class CholmodBase : public SparseSolverBase<Derived>
internal::cm_free_sparse<StorageIndex>(x_cs, m_cholmod);
}
#endif // EIGEN_PARSED_BY_DOXYGEN
-
-
+
+
/** Sets the shift parameter that will be used to adjust the diagonal coefficients during the numerical factorization.
*
* During the numerical factorization, an offset term is added to the diagonal coefficients:\n
@@ -386,7 +386,7 @@ class CholmodBase : public SparseSolverBase<Derived>
m_shiftOffset[0] = double(offset);
return derived();
}
-
+
/** \returns the determinant of the underlying matrix from the current factorization */
Scalar determinant() const
{
@@ -441,7 +441,7 @@ class CholmodBase : public SparseSolverBase<Derived>
template<typename Stream>
void dumpMemory(Stream& /*s*/)
{}
-
+
protected:
mutable cholmod_common m_cholmod;
cholmod_factor* m_cholmodFactor;
@@ -478,11 +478,11 @@ class CholmodSimplicialLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimpl
{
typedef CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLLT> Base;
using Base::m_cholmod;
-
+
public:
-
+
typedef _MatrixType MatrixType;
-
+
CholmodSimplicialLLT() : Base() { init(); }
CholmodSimplicialLLT(const MatrixType& matrix) : Base()
@@ -529,11 +529,11 @@ class CholmodSimplicialLDLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimp
{
typedef CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLDLT> Base;
using Base::m_cholmod;
-
+
public:
-
+
typedef _MatrixType MatrixType;
-
+
CholmodSimplicialLDLT() : Base() { init(); }
CholmodSimplicialLDLT(const MatrixType& matrix) : Base()
@@ -578,11 +578,11 @@ class CholmodSupernodalLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSuper
{
typedef CholmodBase<_MatrixType, _UpLo, CholmodSupernodalLLT> Base;
using Base::m_cholmod;
-
+
public:
-
+
typedef _MatrixType MatrixType;
-
+
CholmodSupernodalLLT() : Base() { init(); }
CholmodSupernodalLLT(const MatrixType& matrix) : Base()
@@ -629,11 +629,11 @@ class CholmodDecomposition : public CholmodBase<_MatrixType, _UpLo, CholmodDecom
{
typedef CholmodBase<_MatrixType, _UpLo, CholmodDecomposition> Base;
using Base::m_cholmod;
-
+
public:
-
+
typedef _MatrixType MatrixType;
-
+
CholmodDecomposition() : Base() { init(); }
CholmodDecomposition(const MatrixType& matrix) : Base()
@@ -643,7 +643,7 @@ class CholmodDecomposition : public CholmodBase<_MatrixType, _UpLo, CholmodDecom
}
~CholmodDecomposition() {}
-
+
void setMode(CholmodMode mode)
{
switch(mode)
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/ArithmeticSequence.h b/examples/ThirdPartyLibs/Eigen/src/Core/ArithmeticSequence.h
index ada1571f1..b6200fac1 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/ArithmeticSequence.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/ArithmeticSequence.h
@@ -29,17 +29,17 @@ template<int N> struct aseq_negate<FixedInt<N> > {
template<> struct aseq_negate<FixedInt<DynamicIndex> > {};
template<typename FirstType,typename SizeType,typename IncrType,
- bool FirstIsSymbolic=Symbolic::is_symbolic<FirstType>::value,
- bool SizeIsSymbolic =Symbolic::is_symbolic<SizeType>::value>
+ bool FirstIsSymbolic=symbolic::is_symbolic<FirstType>::value,
+ bool SizeIsSymbolic =symbolic::is_symbolic<SizeType>::value>
struct aseq_reverse_first_type {
typedef Index type;
};
template<typename FirstType,typename SizeType,typename IncrType>
struct aseq_reverse_first_type<FirstType,SizeType,IncrType,true,true> {
- typedef Symbolic::AddExpr<FirstType,
- Symbolic::ProductExpr<Symbolic::AddExpr<SizeType,Symbolic::ValueExpr<FixedInt<-1> > >,
- Symbolic::ValueExpr<IncrType> >
+ typedef symbolic::AddExpr<FirstType,
+ symbolic::ProductExpr<symbolic::AddExpr<SizeType,symbolic::ValueExpr<FixedInt<-1> > >,
+ symbolic::ValueExpr<IncrType> >
> type;
};
@@ -56,14 +56,14 @@ struct aseq_reverse_first_type_aux<SizeType,IncrType,typename internal::enable_i
template<typename FirstType,typename SizeType,typename IncrType>
struct aseq_reverse_first_type<FirstType,SizeType,IncrType,true,false> {
typedef typename aseq_reverse_first_type_aux<SizeType,IncrType>::type Aux;
- typedef Symbolic::AddExpr<FirstType,Symbolic::ValueExpr<Aux> > type;
+ typedef symbolic::AddExpr<FirstType,symbolic::ValueExpr<Aux> > type;
};
template<typename FirstType,typename SizeType,typename IncrType>
struct aseq_reverse_first_type<FirstType,SizeType,IncrType,false,true> {
- typedef Symbolic::AddExpr<Symbolic::ProductExpr<Symbolic::AddExpr<SizeType,Symbolic::ValueExpr<FixedInt<-1> > >,
- Symbolic::ValueExpr<IncrType> >,
- Symbolic::ValueExpr<> > type;
+ typedef symbolic::AddExpr<symbolic::ProductExpr<symbolic::AddExpr<SizeType,symbolic::ValueExpr<FixedInt<-1> > >,
+ symbolic::ValueExpr<IncrType> >,
+ symbolic::ValueExpr<> > type;
};
#endif
@@ -225,10 +225,11 @@ auto seq(FirstType f, LastType l, IncrType incr)
-typename internal::cleanup_index_type<FirstType>::type(f)+CleanedIncrType(incr)) / CleanedIncrType(incr),
CleanedIncrType(incr));
}
-#else
+
+#else // EIGEN_HAS_CXX11
template<typename FirstType,typename LastType>
-typename internal::enable_if<!(Symbolic::is_symbolic<FirstType>::value || Symbolic::is_symbolic<LastType>::value),
+typename internal::enable_if<!(symbolic::is_symbolic<FirstType>::value || symbolic::is_symbolic<LastType>::value),
ArithmeticSequence<typename internal::cleanup_index_type<FirstType>::type,Index> >::type
seq(FirstType f, LastType l)
{
@@ -237,35 +238,35 @@ seq(FirstType f, LastType l)
}
template<typename FirstTypeDerived,typename LastType>
-typename internal::enable_if<!Symbolic::is_symbolic<LastType>::value,
- ArithmeticSequence<FirstTypeDerived, Symbolic::AddExpr<Symbolic::AddExpr<Symbolic::NegateExpr<FirstTypeDerived>,Symbolic::ValueExpr<> >,
- Symbolic::ValueExpr<internal::FixedInt<1> > > > >::type
-seq(const Symbolic::BaseExpr<FirstTypeDerived> &f, LastType l)
+typename internal::enable_if<!symbolic::is_symbolic<LastType>::value,
+ ArithmeticSequence<FirstTypeDerived, symbolic::AddExpr<symbolic::AddExpr<symbolic::NegateExpr<FirstTypeDerived>,symbolic::ValueExpr<> >,
+ symbolic::ValueExpr<internal::FixedInt<1> > > > >::type
+seq(const symbolic::BaseExpr<FirstTypeDerived> &f, LastType l)
{
return seqN(f.derived(),(typename internal::cleanup_index_type<LastType>::type(l)-f.derived()+fix<1>()));
}
template<typename FirstType,typename LastTypeDerived>
-typename internal::enable_if<!Symbolic::is_symbolic<FirstType>::value,
+typename internal::enable_if<!symbolic::is_symbolic<FirstType>::value,
ArithmeticSequence<typename internal::cleanup_index_type<FirstType>::type,
- Symbolic::AddExpr<Symbolic::AddExpr<LastTypeDerived,Symbolic::ValueExpr<> >,
- Symbolic::ValueExpr<internal::FixedInt<1> > > > >::type
-seq(FirstType f, const Symbolic::BaseExpr<LastTypeDerived> &l)
+ symbolic::AddExpr<symbolic::AddExpr<LastTypeDerived,symbolic::ValueExpr<> >,
+ symbolic::ValueExpr<internal::FixedInt<1> > > > >::type
+seq(FirstType f, const symbolic::BaseExpr<LastTypeDerived> &l)
{
return seqN(typename internal::cleanup_index_type<FirstType>::type(f),(l.derived()-typename internal::cleanup_index_type<FirstType>::type(f)+fix<1>()));
}
template<typename FirstTypeDerived,typename LastTypeDerived>
ArithmeticSequence<FirstTypeDerived,
- Symbolic::AddExpr<Symbolic::AddExpr<LastTypeDerived,Symbolic::NegateExpr<FirstTypeDerived> >,Symbolic::ValueExpr<internal::FixedInt<1> > > >
-seq(const Symbolic::BaseExpr<FirstTypeDerived> &f, const Symbolic::BaseExpr<LastTypeDerived> &l)
+ symbolic::AddExpr<symbolic::AddExpr<LastTypeDerived,symbolic::NegateExpr<FirstTypeDerived> >,symbolic::ValueExpr<internal::FixedInt<1> > > >
+seq(const symbolic::BaseExpr<FirstTypeDerived> &f, const symbolic::BaseExpr<LastTypeDerived> &l)
{
return seqN(f.derived(),(l.derived()-f.derived()+fix<1>()));
}
template<typename FirstType,typename LastType, typename IncrType>
-typename internal::enable_if<!(Symbolic::is_symbolic<FirstType>::value || Symbolic::is_symbolic<LastType>::value),
+typename internal::enable_if<!(symbolic::is_symbolic<FirstType>::value || symbolic::is_symbolic<LastType>::value),
ArithmeticSequence<typename internal::cleanup_index_type<FirstType>::type,Index,typename internal::cleanup_seq_incr<IncrType>::type> >::type
seq(FirstType f, LastType l, IncrType incr)
{
@@ -275,27 +276,27 @@ seq(FirstType f, LastType l, IncrType incr)
}
template<typename FirstTypeDerived,typename LastType, typename IncrType>
-typename internal::enable_if<!Symbolic::is_symbolic<LastType>::value,
+typename internal::enable_if<!symbolic::is_symbolic<LastType>::value,
ArithmeticSequence<FirstTypeDerived,
- Symbolic::QuotientExpr<Symbolic::AddExpr<Symbolic::AddExpr<Symbolic::NegateExpr<FirstTypeDerived>,
- Symbolic::ValueExpr<> >,
- Symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,
- Symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,
+ symbolic::QuotientExpr<symbolic::AddExpr<symbolic::AddExpr<symbolic::NegateExpr<FirstTypeDerived>,
+ symbolic::ValueExpr<> >,
+ symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,
+ symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,
typename internal::cleanup_seq_incr<IncrType>::type> >::type
-seq(const Symbolic::BaseExpr<FirstTypeDerived> &f, LastType l, IncrType incr)
+seq(const symbolic::BaseExpr<FirstTypeDerived> &f, LastType l, IncrType incr)
{
typedef typename internal::cleanup_seq_incr<IncrType>::type CleanedIncrType;
return seqN(f.derived(),(typename internal::cleanup_index_type<LastType>::type(l)-f.derived()+CleanedIncrType(incr))/CleanedIncrType(incr), incr);
}
template<typename FirstType,typename LastTypeDerived, typename IncrType>
-typename internal::enable_if<!Symbolic::is_symbolic<FirstType>::value,
+typename internal::enable_if<!symbolic::is_symbolic<FirstType>::value,
ArithmeticSequence<typename internal::cleanup_index_type<FirstType>::type,
- Symbolic::QuotientExpr<Symbolic::AddExpr<Symbolic::AddExpr<LastTypeDerived,Symbolic::ValueExpr<> >,
- Symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,
- Symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,
+ symbolic::QuotientExpr<symbolic::AddExpr<symbolic::AddExpr<LastTypeDerived,symbolic::ValueExpr<> >,
+ symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,
+ symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,
typename internal::cleanup_seq_incr<IncrType>::type> >::type
-seq(FirstType f, const Symbolic::BaseExpr<LastTypeDerived> &l, IncrType incr)
+seq(FirstType f, const symbolic::BaseExpr<LastTypeDerived> &l, IncrType incr)
{
typedef typename internal::cleanup_seq_incr<IncrType>::type CleanedIncrType;
return seqN(typename internal::cleanup_index_type<FirstType>::type(f),
@@ -304,26 +305,55 @@ seq(FirstType f, const Symbolic::BaseExpr<LastTypeDerived> &l, IncrType incr)
template<typename FirstTypeDerived,typename LastTypeDerived, typename IncrType>
ArithmeticSequence<FirstTypeDerived,
- Symbolic::QuotientExpr<Symbolic::AddExpr<Symbolic::AddExpr<LastTypeDerived,
- Symbolic::NegateExpr<FirstTypeDerived> >,
- Symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,
- Symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,
+ symbolic::QuotientExpr<symbolic::AddExpr<symbolic::AddExpr<LastTypeDerived,
+ symbolic::NegateExpr<FirstTypeDerived> >,
+ symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,
+ symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,
typename internal::cleanup_seq_incr<IncrType>::type>
-seq(const Symbolic::BaseExpr<FirstTypeDerived> &f, const Symbolic::BaseExpr<LastTypeDerived> &l, IncrType incr)
+seq(const symbolic::BaseExpr<FirstTypeDerived> &f, const symbolic::BaseExpr<LastTypeDerived> &l, IncrType incr)
{
typedef typename internal::cleanup_seq_incr<IncrType>::type CleanedIncrType;
return seqN(f.derived(),(l.derived()-f.derived()+CleanedIncrType(incr))/CleanedIncrType(incr), incr);
}
-#endif
+#endif // EIGEN_HAS_CXX11
#endif // EIGEN_PARSED_BY_DOXYGEN
+
+#if EIGEN_HAS_CXX11 || defined(EIGEN_PARSED_BY_DOXYGEN)
+/** \cpp11
+ * \returns a symbolic ArithmeticSequence representing the last \a size elements with increment \a incr.
+ *
+ * It is a shortcut for: \code seqN(last-(size-fix<1>)*incr, size, incr) \endcode
+ *
+ * \sa lastN(SizeType), seqN(FirstType,SizeType), seq(FirstType,LastType,IncrType) */
+template<typename SizeType,typename IncrType>
+auto lastN(SizeType size, IncrType incr)
+-> decltype(seqN(Eigen::last-(size-fix<1>())*incr, size, incr))
+{
+ return seqN(Eigen::last-(size-fix<1>())*incr, size, incr);
+}
+
+/** \cpp11
+ * \returns a symbolic ArithmeticSequence representing the last \a size elements with a unit increment.
+ *
+ * It is a shortcut for: \code seq(last+fix<1>-size, last) \endcode
+ *
+ * \sa lastN(SizeType,IncrType, seqN(FirstType,SizeType), seq(FirstType,LastType) */
+template<typename SizeType>
+auto lastN(SizeType size)
+-> decltype(seqN(Eigen::last+fix<1>()-size, size))
+{
+ return seqN(Eigen::last+fix<1>()-size, size);
+}
+#endif
+
namespace internal {
// Convert a symbolic span into a usable one (i.e., remove last/end "keywords")
template<typename T>
struct make_size_type {
- typedef typename internal::conditional<Symbolic::is_symbolic<T>::value, Index, T>::type type;
+ typedef typename internal::conditional<symbolic::is_symbolic<T>::value, Index, T>::type type;
};
template<typename FirstType,typename SizeType,typename IncrType,int XprSize>
@@ -345,6 +375,39 @@ struct get_compile_time_incr<ArithmeticSequence<FirstType,SizeType,IncrType> > {
} // end namespace internal
+/** \namespace Eigen::indexing
+ * \ingroup Core_Module
+ *
+ * The sole purpose of this namespace is to be able to import all functions
+ * and symbols that are expected to be used within operator() for indexing
+ * and slicing. If you already imported the whole Eigen namespace:
+ * \code using namespace Eigen; \endcode
+ * then you are already all set. Otherwise, if you don't want/cannot import
+ * the whole Eigen namespace, the following line:
+ * \code using namespace Eigen::indexing; \endcode
+ * is equivalent to:
+ * \code
+ using Eigen::all;
+ using Eigen::seq;
+ using Eigen::seqN;
+ using Eigen::lastN; // c++11 only
+ using Eigen::last;
+ using Eigen::lastp1;
+ using Eigen::fix;
+ \endcode
+ */
+namespace indexing {
+ using Eigen::all;
+ using Eigen::seq;
+ using Eigen::seqN;
+ #if EIGEN_HAS_CXX11
+ using Eigen::lastN;
+ #endif
+ using Eigen::last;
+ using Eigen::lastp1;
+ using Eigen::fix;
+}
+
} // end namespace Eigen
#endif // EIGEN_ARITHMETIC_SEQUENCE_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/Array.h b/examples/ThirdPartyLibs/Eigen/src/Core/Array.h
index e10020d4f..20c789b10 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/Array.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/Array.h
@@ -117,7 +117,7 @@ class Array
{
return Base::_set(other);
}
-
+
/** Default constructor.
*
* For fixed-size matrices, does nothing.
@@ -153,17 +153,54 @@ class Array
: Base(std::move(other))
{
Base::_check_template_params();
- if (RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic)
- Base::_set_noalias(other);
}
EIGEN_DEVICE_FUNC
Array& operator=(Array&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_assignable<Scalar>::value)
{
- other.swap(*this);
+ Base::operator=(std::move(other));
return *this;
}
#endif
+ #if EIGEN_HAS_CXX11
+ /** \copydoc PlainObjectBase(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args)
+ *
+ * Example: \include Array_variadic_ctor_cxx11.cpp
+ * Output: \verbinclude Array_variadic_ctor_cxx11.out
+ *
+ * \sa Array(const std::initializer_list<std::initializer_list<Scalar>>&)
+ * \sa Array(const Scalar&), Array(const Scalar&,const Scalar&)
+ */
+ template <typename... ArgTypes>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args)
+ : Base(a0, a1, a2, a3, args...) {}
+
+ /** \brief Constructs an array and initializes it from the coefficients given as initializer-lists grouped by row. \cpp11
+ *
+ * In the general case, the constructor takes a list of rows, each row being represented as a list of coefficients:
+ *
+ * Example: \include Array_initializer_list_23_cxx11.cpp
+ * Output: \verbinclude Array_initializer_list_23_cxx11.out
+ *
+ * Each of the inner initializer lists must contain the exact same number of elements, otherwise an assertion is triggered.
+ *
+ * In the case of a compile-time column 1D array, implicit transposition from a single row is allowed.
+ * Therefore <code> Array<int,Dynamic,1>{{1,2,3,4,5}}</code> is legal and the more verbose syntax
+ * <code>Array<int,Dynamic,1>{{1},{2},{3},{4},{5}}</code> can be avoided:
+ *
+ * Example: \include Array_initializer_list_vector_cxx11.cpp
+ * Output: \verbinclude Array_initializer_list_vector_cxx11.out
+ *
+ * In the case of fixed-sized arrays, the initializer list sizes must exactly match the array sizes,
+ * and implicit transposition is allowed for compile-time 1D arrays only.
+ *
+ * \sa Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args)
+ */
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Array(const std::initializer_list<std::initializer_list<Scalar>>& list) : Base(list) {}
+ #endif // end EIGEN_HAS_CXX11
+
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename T>
EIGEN_DEVICE_FUNC
@@ -180,6 +217,7 @@ class Array
Base::_check_template_params();
this->template _init2<T0,T1>(val0, val1);
}
+
#else
/** \brief Constructs a fixed-sized array initialized with coefficients starting at \a data */
EIGEN_DEVICE_FUNC explicit Array(const Scalar *data);
@@ -191,7 +229,8 @@ class Array
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE explicit Array(Index dim);
- /** constructs an initialized 1x1 Array with the given coefficient */
+ /** constructs an initialized 1x1 Array with the given coefficient
+ * \sa const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args */
Array(const Scalar& value);
/** constructs an uninitialized array with \a rows rows and \a cols columns.
*
@@ -199,11 +238,14 @@ class Array
* it is redundant to pass these parameters, so one should use the default constructor
* Array() instead. */
Array(Index rows, Index cols);
- /** constructs an initialized 2D vector with given coefficients */
+ /** constructs an initialized 2D vector with given coefficients
+ * \sa Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args) */
Array(const Scalar& val0, const Scalar& val1);
- #endif
+ #endif // end EIGEN_PARSED_BY_DOXYGEN
- /** constructs an initialized 3D vector with given coefficients */
+ /** constructs an initialized 3D vector with given coefficients
+ * \sa Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args)
+ */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Array(const Scalar& val0, const Scalar& val1, const Scalar& val2)
{
@@ -213,7 +255,9 @@ class Array
m_storage.data()[1] = val1;
m_storage.data()[2] = val2;
}
- /** constructs an initialized 4D vector with given coefficients */
+ /** constructs an initialized 4D vector with given coefficients
+ * \sa Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args)
+ */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Array(const Scalar& val0, const Scalar& val1, const Scalar& val2, const Scalar& val3)
{
@@ -244,8 +288,10 @@ class Array
: Base(other.derived())
{ }
- EIGEN_DEVICE_FUNC inline Index innerStride() const { return 1; }
- EIGEN_DEVICE_FUNC inline Index outerStride() const { return this->innerSize(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index innerStride() const EIGEN_NOEXCEPT{ return 1; }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index outerStride() const EIGEN_NOEXCEPT { return this->innerSize(); }
#ifdef EIGEN_ARRAY_PLUGIN
#include EIGEN_ARRAY_PLUGIN
@@ -260,7 +306,7 @@ class Array
/** \defgroup arraytypedefs Global array typedefs
* \ingroup Core_Module
*
- * Eigen defines several typedef shortcuts for most common 1D and 2D array types.
+ * %Eigen defines several typedef shortcuts for most common 1D and 2D array types.
*
* The general patterns are the following:
*
@@ -273,6 +319,12 @@ class Array
* There are also \c ArraySizeType which are self-explanatory. For example, \c Array4cf is
* a fixed-size 1D array of 4 complex floats.
*
+ * With \cpp11, template alias are also defined for common sizes.
+ * They follow the same pattern as above except that the scalar type suffix is replaced by a
+ * template parameter, i.e.:
+ * - `ArrayRowsCols<Type>` where `Rows` and `Cols` can be \c 2,\c 3,\c 4, or \c X for fixed or dynamic size.
+ * - `ArraySize<Type>` where `Size` can be \c 2,\c 3,\c 4 or \c X for fixed or dynamic size 1D arrays.
+ *
* \sa class Array
*/
@@ -305,8 +357,42 @@ EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(std::complex<double>, cd)
#undef EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES
#undef EIGEN_MAKE_ARRAY_TYPEDEFS
+#undef EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS
+
+#if EIGEN_HAS_CXX11
+
+#define EIGEN_MAKE_ARRAY_TYPEDEFS(Size, SizeSuffix) \
+/** \ingroup arraytypedefs */ \
+/** \brief \cpp11 */ \
+template <typename Type> \
+using Array##SizeSuffix##SizeSuffix = Array<Type, Size, Size>; \
+/** \ingroup arraytypedefs */ \
+/** \brief \cpp11 */ \
+template <typename Type> \
+using Array##SizeSuffix = Array<Type, Size, 1>;
+
+#define EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Size) \
+/** \ingroup arraytypedefs */ \
+/** \brief \cpp11 */ \
+template <typename Type> \
+using Array##Size##X = Array<Type, Size, Dynamic>; \
+/** \ingroup arraytypedefs */ \
+/** \brief \cpp11 */ \
+template <typename Type> \
+using Array##X##Size = Array<Type, Dynamic, Size>;
+
+EIGEN_MAKE_ARRAY_TYPEDEFS(2, 2)
+EIGEN_MAKE_ARRAY_TYPEDEFS(3, 3)
+EIGEN_MAKE_ARRAY_TYPEDEFS(4, 4)
+EIGEN_MAKE_ARRAY_TYPEDEFS(Dynamic, X)
+EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(2)
+EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(3)
+EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(4)
+
+#undef EIGEN_MAKE_ARRAY_TYPEDEFS
+#undef EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS
-#undef EIGEN_MAKE_ARRAY_TYPEDEFS_LARGE
+#endif // EIGEN_HAS_CXX11
#define EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, SizeSuffix) \
using Eigen::Matrix##SizeSuffix##TypeSuffix; \
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/ArrayBase.h b/examples/ThirdPartyLibs/Eigen/src/Core/ArrayBase.h
index 9da960f08..ea3dd1c3b 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/ArrayBase.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/ArrayBase.h
@@ -153,8 +153,8 @@ template<typename Derived> class ArrayBase
// inline void evalTo(Dest& dst) const { dst = matrix(); }
protected:
- EIGEN_DEVICE_FUNC
- ArrayBase() : Base() {}
+ EIGEN_DEFAULT_COPY_CONSTRUCTOR(ArrayBase)
+ EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(ArrayBase)
private:
explicit ArrayBase(Index);
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/ArrayWrapper.h b/examples/ThirdPartyLibs/Eigen/src/Core/ArrayWrapper.h
index 688aadd62..2e9555b53 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/ArrayWrapper.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/ArrayWrapper.h
@@ -10,7 +10,7 @@
#ifndef EIGEN_ARRAYWRAPPER_H
#define EIGEN_ARRAYWRAPPER_H
-namespace Eigen {
+namespace Eigen {
/** \class ArrayWrapper
* \ingroup Core_Module
@@ -60,14 +60,14 @@ class ArrayWrapper : public ArrayBase<ArrayWrapper<ExpressionType> >
EIGEN_DEVICE_FUNC
explicit EIGEN_STRONG_INLINE ArrayWrapper(ExpressionType& matrix) : m_expression(matrix) {}
- EIGEN_DEVICE_FUNC
- inline Index rows() const { return m_expression.rows(); }
- EIGEN_DEVICE_FUNC
- inline Index cols() const { return m_expression.cols(); }
- EIGEN_DEVICE_FUNC
- inline Index outerStride() const { return m_expression.outerStride(); }
- EIGEN_DEVICE_FUNC
- inline Index innerStride() const { return m_expression.innerStride(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index rows() const EIGEN_NOEXCEPT { return m_expression.rows(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index cols() const EIGEN_NOEXCEPT { return m_expression.cols(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index outerStride() const EIGEN_NOEXCEPT { return m_expression.outerStride(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index innerStride() const EIGEN_NOEXCEPT { return m_expression.innerStride(); }
EIGEN_DEVICE_FUNC
inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); }
@@ -90,9 +90,9 @@ class ArrayWrapper : public ArrayBase<ArrayWrapper<ExpressionType> >
EIGEN_DEVICE_FUNC
inline void evalTo(Dest& dst) const { dst = m_expression; }
- const typename internal::remove_all<NestedExpressionType>::type&
EIGEN_DEVICE_FUNC
- nestedExpression() const
+ const typename internal::remove_all<NestedExpressionType>::type&
+ nestedExpression() const
{
return m_expression;
}
@@ -158,14 +158,14 @@ class MatrixWrapper : public MatrixBase<MatrixWrapper<ExpressionType> >
EIGEN_DEVICE_FUNC
explicit inline MatrixWrapper(ExpressionType& matrix) : m_expression(matrix) {}
- EIGEN_DEVICE_FUNC
- inline Index rows() const { return m_expression.rows(); }
- EIGEN_DEVICE_FUNC
- inline Index cols() const { return m_expression.cols(); }
- EIGEN_DEVICE_FUNC
- inline Index outerStride() const { return m_expression.outerStride(); }
- EIGEN_DEVICE_FUNC
- inline Index innerStride() const { return m_expression.innerStride(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index rows() const EIGEN_NOEXCEPT { return m_expression.rows(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index cols() const EIGEN_NOEXCEPT { return m_expression.cols(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index outerStride() const EIGEN_NOEXCEPT { return m_expression.outerStride(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index innerStride() const EIGEN_NOEXCEPT { return m_expression.innerStride(); }
EIGEN_DEVICE_FUNC
inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); }
@@ -185,8 +185,8 @@ class MatrixWrapper : public MatrixBase<MatrixWrapper<ExpressionType> >
}
EIGEN_DEVICE_FUNC
- const typename internal::remove_all<NestedExpressionType>::type&
- nestedExpression() const
+ const typename internal::remove_all<NestedExpressionType>::type&
+ nestedExpression() const
{
return m_expression;
}
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/AssignEvaluator.h b/examples/ThirdPartyLibs/Eigen/src/Core/AssignEvaluator.h
index dbe435d86..7d76f0c25 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/AssignEvaluator.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/AssignEvaluator.h
@@ -17,24 +17,24 @@ namespace Eigen {
// This implementation is based on Assign.h
namespace internal {
-
+
/***************************************************************************
* Part 1 : the logic deciding a strategy for traversal and unrolling *
***************************************************************************/
// copy_using_evaluator_traits is based on assign_traits
-template <typename DstEvaluator, typename SrcEvaluator, typename AssignFunc>
+template <typename DstEvaluator, typename SrcEvaluator, typename AssignFunc, int MaxPacketSize = -1>
struct copy_using_evaluator_traits
{
typedef typename DstEvaluator::XprType Dst;
typedef typename Dst::Scalar DstScalar;
-
+
enum {
DstFlags = DstEvaluator::Flags,
SrcFlags = SrcEvaluator::Flags
};
-
+
public:
enum {
DstAlignment = DstEvaluator::Alignment,
@@ -51,13 +51,15 @@ private:
InnerMaxSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::MaxSizeAtCompileTime)
: int(DstFlags)&RowMajorBit ? int(Dst::MaxColsAtCompileTime)
: int(Dst::MaxRowsAtCompileTime),
+ RestrictedInnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(InnerSize,MaxPacketSize),
+ RestrictedLinearSize = EIGEN_SIZE_MIN_PREFER_FIXED(Dst::SizeAtCompileTime,MaxPacketSize),
OuterStride = int(outer_stride_at_compile_time<Dst>::ret),
MaxSizeAtCompileTime = Dst::SizeAtCompileTime
};
// TODO distinguish between linear traversal and inner-traversals
- typedef typename find_best_packet<DstScalar,Dst::SizeAtCompileTime>::type LinearPacketType;
- typedef typename find_best_packet<DstScalar,InnerSize>::type InnerPacketType;
+ typedef typename find_best_packet<DstScalar,RestrictedLinearSize>::type LinearPacketType;
+ typedef typename find_best_packet<DstScalar,RestrictedInnerSize>::type InnerPacketType;
enum {
LinearPacketSize = unpacket_traits<LinearPacketType>::size,
@@ -97,7 +99,8 @@ private:
public:
enum {
- Traversal = int(MayLinearVectorize) && (LinearPacketSize>InnerPacketSize) ? int(LinearVectorizedTraversal)
+ Traversal = int(Dst::SizeAtCompileTime) == 0 ? int(AllAtOnceTraversal) // If compile-size is zero, traversing will fail at compile-time.
+ : (int(MayLinearVectorize) && (LinearPacketSize>InnerPacketSize)) ? int(LinearVectorizedTraversal)
: int(MayInnerVectorize) ? int(InnerVectorizedTraversal)
: int(MayLinearVectorize) ? int(LinearVectorizedTraversal)
: int(MaySliceVectorize) ? int(SliceVectorizedTraversal)
@@ -135,7 +138,7 @@ public:
? int(CompleteUnrolling)
: int(NoUnrolling) )
: int(Traversal) == int(LinearTraversal)
- ? ( bool(MayUnrollCompletely) ? int(CompleteUnrolling)
+ ? ( bool(MayUnrollCompletely) ? int(CompleteUnrolling)
: int(NoUnrolling) )
#if EIGEN_UNALIGNED_VECTORIZE
: int(Traversal) == int(SliceVectorizedTraversal)
@@ -172,6 +175,8 @@ public:
EIGEN_DEBUG_VAR(MaySliceVectorize)
std::cerr << "Traversal" << " = " << Traversal << " (" << demangle_traversal(Traversal) << ")" << std::endl;
EIGEN_DEBUG_VAR(SrcEvaluator::CoeffReadCost)
+ EIGEN_DEBUG_VAR(DstEvaluator::CoeffReadCost)
+ EIGEN_DEBUG_VAR(Dst::SizeAtCompileTime)
EIGEN_DEBUG_VAR(UnrollingLimit)
EIGEN_DEBUG_VAR(MayUnrollCompletely)
EIGEN_DEBUG_VAR(MayUnrollInner)
@@ -195,7 +200,7 @@ struct copy_using_evaluator_DefaultTraversal_CompleteUnrolling
// FIXME: this is not very clean, perhaps this information should be provided by the kernel?
typedef typename Kernel::DstEvaluatorType DstEvaluatorType;
typedef typename DstEvaluatorType::XprType DstXprType;
-
+
enum {
outer = Index / DstXprType::InnerSizeAtCompileTime,
inner = Index % DstXprType::InnerSizeAtCompileTime
@@ -261,7 +266,7 @@ struct copy_using_evaluator_innervec_CompleteUnrolling
typedef typename Kernel::DstEvaluatorType DstEvaluatorType;
typedef typename DstEvaluatorType::XprType DstXprType;
typedef typename Kernel::PacketType PacketType;
-
+
enum {
outer = Index / DstXprType::InnerSizeAtCompileTime,
inner = Index % DstXprType::InnerSizeAtCompileTime,
@@ -313,6 +318,22 @@ template<typename Kernel,
struct dense_assignment_loop;
/************************
+***** Special Cases *****
+************************/
+
+// Zero-sized assignment is a no-op.
+template<typename Kernel, int Unrolling>
+struct dense_assignment_loop<Kernel, AllAtOnceTraversal, Unrolling>
+{
+ EIGEN_DEVICE_FUNC static void EIGEN_STRONG_INLINE run(Kernel& /*kernel*/)
+ {
+ typedef typename Kernel::DstEvaluatorType::XprType DstXprType;
+ EIGEN_STATIC_ASSERT(int(DstXprType::SizeAtCompileTime) == 0,
+ EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT)
+ }
+};
+
+/************************
*** Default traversal ***
************************/
@@ -426,10 +447,10 @@ struct dense_assignment_loop<Kernel, LinearVectorizedTraversal, CompleteUnrollin
{
typedef typename Kernel::DstEvaluatorType::XprType DstXprType;
typedef typename Kernel::PacketType PacketType;
-
+
enum { size = DstXprType::SizeAtCompileTime,
packetSize =unpacket_traits<PacketType>::size,
- alignedSize = (size/packetSize)*packetSize };
+ alignedSize = (int(size)/packetSize)*packetSize };
copy_using_evaluator_innervec_CompleteUnrolling<Kernel, 0, alignedSize>::run(kernel);
copy_using_evaluator_DefaultTraversal_CompleteUnrolling<Kernel, alignedSize, size>::run(kernel);
@@ -530,7 +551,7 @@ struct dense_assignment_loop<Kernel, SliceVectorizedTraversal, NoUnrolling>
const Scalar *dst_ptr = kernel.dstDataPtr();
if((!bool(dstIsAligned)) && (UIntPtr(dst_ptr) % sizeof(Scalar))>0)
{
- // the pointer is not aligend-on scalar, so alignment is not possible
+ // the pointer is not aligned-on scalar, so alignment is not possible
return dense_assignment_loop<Kernel,DefaultTraversal,NoUnrolling>::run(kernel);
}
const Index packetAlignedMask = packetSize - 1;
@@ -568,14 +589,15 @@ struct dense_assignment_loop<Kernel, SliceVectorizedTraversal, InnerUnrolling>
typedef typename Kernel::DstEvaluatorType::XprType DstXprType;
typedef typename Kernel::PacketType PacketType;
- enum { size = DstXprType::InnerSizeAtCompileTime,
+ enum { innerSize = DstXprType::InnerSizeAtCompileTime,
packetSize =unpacket_traits<PacketType>::size,
- vectorizableSize = (size/packetSize)*packetSize };
+ vectorizableSize = (int(innerSize) / int(packetSize)) * int(packetSize),
+ size = DstXprType::SizeAtCompileTime };
for(Index outer = 0; outer < kernel.outerSize(); ++outer)
{
copy_using_evaluator_innervec_InnerUnrolling<Kernel, 0, vectorizableSize, 0, 0>::run(kernel, outer);
- copy_using_evaluator_DefaultTraversal_InnerUnrolling<Kernel, vectorizableSize, size>::run(kernel, outer);
+ copy_using_evaluator_DefaultTraversal_InnerUnrolling<Kernel, vectorizableSize, innerSize>::run(kernel, outer);
}
}
};
@@ -599,73 +621,74 @@ protected:
typedef typename DstEvaluatorTypeT::XprType DstXprType;
typedef typename SrcEvaluatorTypeT::XprType SrcXprType;
public:
-
+
typedef DstEvaluatorTypeT DstEvaluatorType;
typedef SrcEvaluatorTypeT SrcEvaluatorType;
typedef typename DstEvaluatorType::Scalar Scalar;
typedef copy_using_evaluator_traits<DstEvaluatorTypeT, SrcEvaluatorTypeT, Functor> AssignmentTraits;
typedef typename AssignmentTraits::PacketType PacketType;
-
-
- EIGEN_DEVICE_FUNC generic_dense_assignment_kernel(DstEvaluatorType &dst, const SrcEvaluatorType &src, const Functor &func, DstXprType& dstExpr)
+
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ generic_dense_assignment_kernel(DstEvaluatorType &dst, const SrcEvaluatorType &src, const Functor &func, DstXprType& dstExpr)
: m_dst(dst), m_src(src), m_functor(func), m_dstExpr(dstExpr)
{
#ifdef EIGEN_DEBUG_ASSIGN
AssignmentTraits::debug();
#endif
}
-
- EIGEN_DEVICE_FUNC Index size() const { return m_dstExpr.size(); }
- EIGEN_DEVICE_FUNC Index innerSize() const { return m_dstExpr.innerSize(); }
- EIGEN_DEVICE_FUNC Index outerSize() const { return m_dstExpr.outerSize(); }
- EIGEN_DEVICE_FUNC Index rows() const { return m_dstExpr.rows(); }
- EIGEN_DEVICE_FUNC Index cols() const { return m_dstExpr.cols(); }
- EIGEN_DEVICE_FUNC Index outerStride() const { return m_dstExpr.outerStride(); }
-
- EIGEN_DEVICE_FUNC DstEvaluatorType& dstEvaluator() { return m_dst; }
- EIGEN_DEVICE_FUNC const SrcEvaluatorType& srcEvaluator() const { return m_src; }
-
+
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index size() const EIGEN_NOEXCEPT { return m_dstExpr.size(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index innerSize() const EIGEN_NOEXCEPT { return m_dstExpr.innerSize(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index outerSize() const EIGEN_NOEXCEPT { return m_dstExpr.outerSize(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_dstExpr.rows(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_dstExpr.cols(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index outerStride() const EIGEN_NOEXCEPT { return m_dstExpr.outerStride(); }
+
+ EIGEN_DEVICE_FUNC DstEvaluatorType& dstEvaluator() EIGEN_NOEXCEPT { return m_dst; }
+ EIGEN_DEVICE_FUNC const SrcEvaluatorType& srcEvaluator() const EIGEN_NOEXCEPT { return m_src; }
+
/// Assign src(row,col) to dst(row,col) through the assignment functor.
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(Index row, Index col)
{
m_functor.assignCoeff(m_dst.coeffRef(row,col), m_src.coeff(row,col));
}
-
+
/// \sa assignCoeff(Index,Index)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(Index index)
{
m_functor.assignCoeff(m_dst.coeffRef(index), m_src.coeff(index));
}
-
+
/// \sa assignCoeff(Index,Index)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeffByOuterInner(Index outer, Index inner)
{
- Index row = rowIndexByOuterInner(outer, inner);
- Index col = colIndexByOuterInner(outer, inner);
+ Index row = rowIndexByOuterInner(outer, inner);
+ Index col = colIndexByOuterInner(outer, inner);
assignCoeff(row, col);
}
-
-
+
+
template<int StoreMode, int LoadMode, typename PacketType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignPacket(Index row, Index col)
{
m_functor.template assignPacket<StoreMode>(&m_dst.coeffRef(row,col), m_src.template packet<LoadMode,PacketType>(row,col));
}
-
+
template<int StoreMode, int LoadMode, typename PacketType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignPacket(Index index)
{
m_functor.template assignPacket<StoreMode>(&m_dst.coeffRef(index), m_src.template packet<LoadMode,PacketType>(index));
}
-
+
template<int StoreMode, int LoadMode, typename PacketType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignPacketByOuterInner(Index outer, Index inner)
{
- Index row = rowIndexByOuterInner(outer, inner);
+ Index row = rowIndexByOuterInner(outer, inner);
Index col = colIndexByOuterInner(outer, inner);
assignPacket<StoreMode,LoadMode,PacketType>(row, col);
}
-
+
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Index rowIndexByOuterInner(Index outer, Index inner)
{
typedef typename DstEvaluatorType::ExpressionTraits Traits;
@@ -688,7 +711,7 @@ public:
{
return m_dstExpr.data();
}
-
+
protected:
DstEvaluatorType& m_dst;
const SrcEvaluatorType& m_src;
@@ -697,6 +720,27 @@ protected:
DstXprType& m_dstExpr;
};
+// Special kernel used when computing small products whose operands have dynamic dimensions. It ensures that the
+// PacketSize used is no larger than 4, thereby increasing the chance that vectorized instructions will be used
+// when computing the product.
+
+template<typename DstEvaluatorTypeT, typename SrcEvaluatorTypeT, typename Functor>
+class restricted_packet_dense_assignment_kernel : public generic_dense_assignment_kernel<DstEvaluatorTypeT, SrcEvaluatorTypeT, Functor, BuiltIn>
+{
+protected:
+ typedef generic_dense_assignment_kernel<DstEvaluatorTypeT, SrcEvaluatorTypeT, Functor, BuiltIn> Base;
+ public:
+ typedef typename Base::Scalar Scalar;
+ typedef typename Base::DstXprType DstXprType;
+ typedef copy_using_evaluator_traits<DstEvaluatorTypeT, SrcEvaluatorTypeT, Functor, 4> AssignmentTraits;
+ typedef typename AssignmentTraits::PacketType PacketType;
+
+ EIGEN_DEVICE_FUNC restricted_packet_dense_assignment_kernel(DstEvaluatorTypeT &dst, const SrcEvaluatorTypeT &src, const Functor &func, DstXprType& dstExpr)
+ : Base(dst, src, func, dstExpr)
+ {
+ }
+ };
+
/***************************************************************************
* Part 5 : Entry point for dense rectangular assignment
***************************************************************************/
@@ -734,13 +778,23 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_dense_assignment_loop(DstXprType
resize_if_allowed(dst, src, func);
DstEvaluatorType dstEvaluator(dst);
-
+
typedef generic_dense_assignment_kernel<DstEvaluatorType,SrcEvaluatorType,Functor> Kernel;
Kernel kernel(dstEvaluator, srcEvaluator, func, dst.const_cast_derived());
dense_assignment_loop<Kernel>::run(kernel);
}
+// Specialization for filling the destination with a constant value.
+#ifndef EIGEN_GPU_COMPILE_PHASE
+template<typename DstXprType>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_dense_assignment_loop(DstXprType& dst, const Eigen::CwiseNullaryOp<Eigen::internal::scalar_constant_op<typename DstXprType::Scalar>, DstXprType>& src, const internal::assign_op<typename DstXprType::Scalar,typename DstXprType::Scalar>& func)
+{
+ resize_if_allowed(dst, src, func);
+ std::fill_n(dst.data(), dst.size(), src.functor()());
+}
+#endif
+
template<typename DstXprType, typename SrcXprType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_dense_assignment_loop(DstXprType& dst, const SrcXprType& src)
{
@@ -756,13 +810,13 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_dense_assignment_loop(DstXprType
// AssignmentKind must define a Kind typedef.
template<typename DstShape, typename SrcShape> struct AssignmentKind;
-// Assignement kind defined in this file:
+// Assignment kind defined in this file:
struct Dense2Dense {};
struct EigenBase2EigenBase {};
template<typename,typename> struct AssignmentKind { typedef EigenBase2EigenBase Kind; };
template<> struct AssignmentKind<DenseShape,DenseShape> { typedef Dense2Dense Kind; };
-
+
// This is the main assignment class
template< typename DstXprType, typename SrcXprType, typename Functor,
typename Kind = typename AssignmentKind< typename evaluator_traits<DstXprType>::Shape , typename evaluator_traits<SrcXprType>::Shape >::Kind,
@@ -787,7 +841,7 @@ void call_assignment(const Dst& dst, const Src& src)
{
call_assignment(dst, src, internal::assign_op<typename Dst::Scalar,typename Src::Scalar>());
}
-
+
// Deal with "assume-aliasing"
template<typename Dst, typename Src, typename Func>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
@@ -827,14 +881,35 @@ void call_assignment_no_alias(Dst& dst, const Src& src, const Func& func)
typedef typename internal::conditional<NeedToTranspose, Transpose<Dst>, Dst>::type ActualDstTypeCleaned;
typedef typename internal::conditional<NeedToTranspose, Transpose<Dst>, Dst&>::type ActualDstType;
ActualDstType actualDst(dst);
-
+
// TODO check whether this is the right place to perform these checks:
EIGEN_STATIC_ASSERT_LVALUE(Dst)
EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(ActualDstTypeCleaned,Src)
EIGEN_CHECK_BINARY_COMPATIBILIY(Func,typename ActualDstTypeCleaned::Scalar,typename Src::Scalar);
-
+
Assignment<ActualDstTypeCleaned,Src,Func>::run(actualDst, src, func);
}
+
+template<typename Dst, typename Src, typename Func>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+void call_restricted_packet_assignment_no_alias(Dst& dst, const Src& src, const Func& func)
+{
+ typedef evaluator<Dst> DstEvaluatorType;
+ typedef evaluator<Src> SrcEvaluatorType;
+ typedef restricted_packet_dense_assignment_kernel<DstEvaluatorType,SrcEvaluatorType,Func> Kernel;
+
+ EIGEN_STATIC_ASSERT_LVALUE(Dst)
+ EIGEN_CHECK_BINARY_COMPATIBILIY(Func,typename Dst::Scalar,typename Src::Scalar);
+
+ SrcEvaluatorType srcEvaluator(src);
+ resize_if_allowed(dst, src, func);
+
+ DstEvaluatorType dstEvaluator(dst);
+ Kernel kernel(dstEvaluator, srcEvaluator, func, dst.const_cast_derived());
+
+ dense_assignment_loop<Kernel>::run(kernel);
+}
+
template<typename Dst, typename Src>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void call_assignment_no_alias(Dst& dst, const Src& src)
@@ -875,7 +950,7 @@ struct Assignment<DstXprType, SrcXprType, Functor, Dense2Dense, Weak>
#ifndef EIGEN_NO_DEBUG
internal::check_for_aliasing(dst, src);
#endif
-
+
call_dense_assignment_loop(dst, src, func);
}
};
@@ -899,7 +974,7 @@ struct Assignment<DstXprType, SrcXprType, Functor, EigenBase2EigenBase, Weak>
src.evalTo(dst);
}
- // NOTE The following two functions are templated to avoid their instanciation if not needed
+ // NOTE The following two functions are templated to avoid their instantiation if not needed
// This is needed because some expressions supports evalTo only and/or have 'void' as scalar type.
template<typename SrcScalarType>
EIGEN_DEVICE_FUNC
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/Assign_MKL.h b/examples/ThirdPartyLibs/Eigen/src/Core/Assign_MKL.h
index 6c2ab9264..c6140d185 100644..100755
--- a/examples/ThirdPartyLibs/Eigen/src/Core/Assign_MKL.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/Assign_MKL.h
@@ -68,27 +68,28 @@ class vml_assign_traits
#define EIGEN_PP_EXPAND(ARG) ARG
#if !defined (EIGEN_FAST_MATH) || (EIGEN_FAST_MATH != 1)
-#define EIGEN_VMLMODE_EXPAND_LA , VML_HA
+#define EIGEN_VMLMODE_EXPAND_xLA , VML_HA
#else
-#define EIGEN_VMLMODE_EXPAND_LA , VML_LA
+#define EIGEN_VMLMODE_EXPAND_xLA , VML_LA
#endif
-#define EIGEN_VMLMODE_EXPAND__
+#define EIGEN_VMLMODE_EXPAND_x_
-#define EIGEN_VMLMODE_PREFIX_LA vm
-#define EIGEN_VMLMODE_PREFIX__ v
-#define EIGEN_VMLMODE_PREFIX(VMLMODE) EIGEN_CAT(EIGEN_VMLMODE_PREFIX_,VMLMODE)
+#define EIGEN_VMLMODE_PREFIX_xLA vm
+#define EIGEN_VMLMODE_PREFIX_x_ v
+#define EIGEN_VMLMODE_PREFIX(VMLMODE) EIGEN_CAT(EIGEN_VMLMODE_PREFIX_x,VMLMODE)
#define EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE, VMLMODE) \
template< typename DstXprType, typename SrcXprNested> \
struct Assignment<DstXprType, CwiseUnaryOp<scalar_##EIGENOP##_op<EIGENTYPE>, SrcXprNested>, assign_op<EIGENTYPE,EIGENTYPE>, \
Dense2Dense, typename enable_if<vml_assign_traits<DstXprType,SrcXprNested>::EnableVml>::type> { \
typedef CwiseUnaryOp<scalar_##EIGENOP##_op<EIGENTYPE>, SrcXprNested> SrcXprType; \
- static void run(DstXprType &dst, const SrcXprType &src, const assign_op<EIGENTYPE,EIGENTYPE> &/*func*/) { \
+ static void run(DstXprType &dst, const SrcXprType &src, const assign_op<EIGENTYPE,EIGENTYPE> &func) { \
+ resize_if_allowed(dst, src, func); \
eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); \
if(vml_assign_traits<DstXprType,SrcXprNested>::Traversal==LinearTraversal) { \
VMLOP(dst.size(), (const VMLTYPE*)src.nestedExpression().data(), \
- (VMLTYPE*)dst.data() EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_##VMLMODE) ); \
+ (VMLTYPE*)dst.data() EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_x##VMLMODE) ); \
} else { \
const Index outerSize = dst.outerSize(); \
for(Index outer = 0; outer < outerSize; ++outer) { \
@@ -96,7 +97,7 @@ class vml_assign_traits
&(src.nestedExpression().coeffRef(0, outer)); \
EIGENTYPE *dst_ptr = dst.IsRowMajor ? &(dst.coeffRef(outer,0)) : &(dst.coeffRef(0, outer)); \
VMLOP( dst.innerSize(), (const VMLTYPE*)src_ptr, \
- (VMLTYPE*)dst_ptr EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_##VMLMODE)); \
+ (VMLTYPE*)dst_ptr EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_x##VMLMODE)); \
} \
} \
} \
@@ -144,13 +145,14 @@ EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(ceil, Ceil, _)
Dense2Dense, typename enable_if<vml_assign_traits<DstXprType,SrcXprNested>::EnableVml>::type> { \
typedef CwiseBinaryOp<scalar_##EIGENOP##_op<EIGENTYPE,EIGENTYPE>, SrcXprNested, \
const CwiseNullaryOp<internal::scalar_constant_op<EIGENTYPE>,Plain> > SrcXprType; \
- static void run(DstXprType &dst, const SrcXprType &src, const assign_op<EIGENTYPE,EIGENTYPE> &/*func*/) { \
+ static void run(DstXprType &dst, const SrcXprType &src, const assign_op<EIGENTYPE,EIGENTYPE> &func) { \
+ resize_if_allowed(dst, src, func); \
eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); \
VMLTYPE exponent = reinterpret_cast<const VMLTYPE&>(src.rhs().functor().m_other); \
if(vml_assign_traits<DstXprType,SrcXprNested>::Traversal==LinearTraversal) \
{ \
VMLOP( dst.size(), (const VMLTYPE*)src.lhs().data(), exponent, \
- (VMLTYPE*)dst.data() EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_##VMLMODE) ); \
+ (VMLTYPE*)dst.data() EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_x##VMLMODE) ); \
} else { \
const Index outerSize = dst.outerSize(); \
for(Index outer = 0; outer < outerSize; ++outer) { \
@@ -158,7 +160,7 @@ EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(ceil, Ceil, _)
&(src.lhs().coeffRef(0, outer)); \
EIGENTYPE *dst_ptr = dst.IsRowMajor ? &(dst.coeffRef(outer,0)) : &(dst.coeffRef(0, outer)); \
VMLOP( dst.innerSize(), (const VMLTYPE*)src_ptr, exponent, \
- (VMLTYPE*)dst_ptr EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_##VMLMODE)); \
+ (VMLTYPE*)dst_ptr EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_x##VMLMODE)); \
} \
} \
} \
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/BandMatrix.h b/examples/ThirdPartyLibs/Eigen/src/Core/BandMatrix.h
index 4978c9140..878c0240a 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/BandMatrix.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/BandMatrix.h
@@ -10,7 +10,7 @@
#ifndef EIGEN_BANDMATRIX_H
#define EIGEN_BANDMATRIX_H
-namespace Eigen {
+namespace Eigen {
namespace internal {
@@ -45,7 +45,7 @@ class BandMatrixBase : public EigenBase<Derived>
};
public:
-
+
using Base::derived;
using Base::rows;
using Base::cols;
@@ -55,10 +55,10 @@ class BandMatrixBase : public EigenBase<Derived>
/** \returns the number of sub diagonals */
inline Index subs() const { return derived().subs(); }
-
+
/** \returns an expression of the underlying coefficient matrix */
inline const CoefficientsType& coeffs() const { return derived().coeffs(); }
-
+
/** \returns an expression of the underlying coefficient matrix */
inline CoefficientsType& coeffs() { return derived().coeffs(); }
@@ -67,7 +67,7 @@ class BandMatrixBase : public EigenBase<Derived>
* \warning the internal storage must be column major. */
inline Block<CoefficientsType,Dynamic,1> col(Index i)
{
- EIGEN_STATIC_ASSERT((Options&RowMajor)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ EIGEN_STATIC_ASSERT((int(Options) & int(RowMajor)) == 0, THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
Index start = 0;
Index len = coeffs().rows();
if (i<=supers())
@@ -90,7 +90,7 @@ class BandMatrixBase : public EigenBase<Derived>
template<int Index> struct DiagonalIntReturnType {
enum {
- ReturnOpposite = (Options&SelfAdjoint) && (((Index)>0 && Supers==0) || ((Index)<0 && Subs==0)),
+ ReturnOpposite = (int(Options) & int(SelfAdjoint)) && (((Index) > 0 && Supers == 0) || ((Index) < 0 && Subs == 0)),
Conjugate = ReturnOpposite && NumTraits<Scalar>::IsComplex,
ActualIndex = ReturnOpposite ? -Index : Index,
DiagonalSize = (RowsAtCompileTime==Dynamic || ColsAtCompileTime==Dynamic)
@@ -130,7 +130,7 @@ class BandMatrixBase : public EigenBase<Derived>
eigen_assert((i<0 && -i<=subs()) || (i>=0 && i<=supers()));
return Block<const CoefficientsType,1,Dynamic>(coeffs(), supers()-i, std::max<Index>(0,i), 1, diagonalLength(i));
}
-
+
template<typename Dest> inline void evalTo(Dest& dst) const
{
dst.resize(rows(),cols());
@@ -192,7 +192,7 @@ struct traits<BandMatrix<_Scalar,_Rows,_Cols,_Supers,_Subs,_Options> >
Options = _Options,
DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic)) ? 1 + Supers + Subs : Dynamic
};
- typedef Matrix<Scalar,DataRowsAtCompileTime,ColsAtCompileTime,Options&RowMajor?RowMajor:ColMajor> CoefficientsType;
+ typedef Matrix<Scalar, DataRowsAtCompileTime, ColsAtCompileTime, int(Options) & int(RowMajor) ? RowMajor : ColMajor> CoefficientsType;
};
template<typename _Scalar, int Rows, int Cols, int Supers, int Subs, int Options>
@@ -211,16 +211,16 @@ class BandMatrix : public BandMatrixBase<BandMatrix<_Scalar,Rows,Cols,Supers,Sub
}
/** \returns the number of columns */
- inline Index rows() const { return m_rows.value(); }
+ inline EIGEN_CONSTEXPR Index rows() const { return m_rows.value(); }
/** \returns the number of rows */
- inline Index cols() const { return m_coeffs.cols(); }
+ inline EIGEN_CONSTEXPR Index cols() const { return m_coeffs.cols(); }
/** \returns the number of super diagonals */
- inline Index supers() const { return m_supers.value(); }
+ inline EIGEN_CONSTEXPR Index supers() const { return m_supers.value(); }
/** \returns the number of sub diagonals */
- inline Index subs() const { return m_subs.value(); }
+ inline EIGEN_CONSTEXPR Index subs() const { return m_subs.value(); }
inline const CoefficientsType& coeffs() const { return m_coeffs; }
inline CoefficientsType& coeffs() { return m_coeffs; }
@@ -275,16 +275,16 @@ class BandMatrixWrapper : public BandMatrixBase<BandMatrixWrapper<_CoefficientsT
}
/** \returns the number of columns */
- inline Index rows() const { return m_rows.value(); }
+ inline EIGEN_CONSTEXPR Index rows() const { return m_rows.value(); }
/** \returns the number of rows */
- inline Index cols() const { return m_coeffs.cols(); }
+ inline EIGEN_CONSTEXPR Index cols() const { return m_coeffs.cols(); }
/** \returns the number of super diagonals */
- inline Index supers() const { return m_supers.value(); }
+ inline EIGEN_CONSTEXPR Index supers() const { return m_supers.value(); }
/** \returns the number of sub diagonals */
- inline Index subs() const { return m_subs.value(); }
+ inline EIGEN_CONSTEXPR Index subs() const { return m_subs.value(); }
inline const CoefficientsType& coeffs() const { return m_coeffs; }
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/Block.h b/examples/ThirdPartyLibs/Eigen/src/Core/Block.h
index 11de45c2e..d0b95d50b 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/Block.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/Block.h
@@ -11,7 +11,7 @@
#ifndef EIGEN_BLOCK_H
#define EIGEN_BLOCK_H
-namespace Eigen {
+namespace Eigen {
namespace internal {
template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
@@ -52,7 +52,7 @@ struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel> > : traits<XprTyp
FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0,
Flags = (traits<XprType>::Flags & (DirectAccessBit | (InnerPanel?CompressedAccessBit:0))) | FlagsLvalueBit | FlagsRowMajorBit,
// FIXME DirectAccessBit should not be handled by expressions
- //
+ //
// Alignment is needed by MapBase's assertions
// We can sefely set it to false here. Internal alignment errors will be detected by an eigen_internal_assert in the respective evaluator
Alignment = 0
@@ -61,7 +61,7 @@ struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel> > : traits<XprTyp
template<typename XprType, int BlockRows=Dynamic, int BlockCols=Dynamic, bool InnerPanel = false,
bool HasDirectAccess = internal::has_direct_access<XprType>::ret> class BlockImpl_dense;
-
+
} // end namespace internal
template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, typename StorageKind> class BlockImpl;
@@ -109,13 +109,13 @@ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel> class
typedef Impl Base;
EIGEN_GENERIC_PUBLIC_INTERFACE(Block)
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block)
-
+
typedef typename internal::remove_all<XprType>::type NestedExpression;
-
+
/** Column or Row constructor
*/
- EIGEN_DEVICE_FUNC
- inline Block(XprType& xpr, Index i) : Impl(xpr,i)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Block(XprType& xpr, Index i) : Impl(xpr,i)
{
eigen_assert( (i>=0) && (
((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && i<xpr.rows())
@@ -124,8 +124,8 @@ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel> class
/** Fixed-size constructor
*/
- EIGEN_DEVICE_FUNC
- inline Block(XprType& xpr, Index startRow, Index startCol)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Block(XprType& xpr, Index startRow, Index startCol)
: Impl(xpr, startRow, startCol)
{
EIGEN_STATIC_ASSERT(RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic,THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE)
@@ -135,8 +135,8 @@ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel> class
/** Dynamic-size constructor
*/
- EIGEN_DEVICE_FUNC
- inline Block(XprType& xpr,
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Block(XprType& xpr,
Index startRow, Index startCol,
Index blockRows, Index blockCols)
: Impl(xpr, startRow, startCol, blockRows, blockCols)
@@ -147,7 +147,7 @@ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel> class
&& startCol >= 0 && blockCols >= 0 && startCol <= xpr.cols() - blockCols);
}
};
-
+
// The generic default implementation for dense block simplu forward to the internal::BlockImpl_dense
// that must be specialized for direct and non-direct access...
template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
@@ -159,10 +159,10 @@ class BlockImpl<XprType, BlockRows, BlockCols, InnerPanel, Dense>
public:
typedef Impl Base;
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl)
- EIGEN_DEVICE_FUNC inline BlockImpl(XprType& xpr, Index i) : Impl(xpr,i) {}
- EIGEN_DEVICE_FUNC inline BlockImpl(XprType& xpr, Index startRow, Index startCol) : Impl(xpr, startRow, startCol) {}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockImpl(XprType& xpr, Index i) : Impl(xpr,i) {}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockImpl(XprType& xpr, Index startRow, Index startCol) : Impl(xpr, startRow, startCol) {}
EIGEN_DEVICE_FUNC
- inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
+ EIGEN_STRONG_INLINE BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
: Impl(xpr, startRow, startCol, blockRows, blockCols) {}
};
@@ -260,19 +260,19 @@ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool H
}
template<int LoadMode>
- inline PacketScalar packet(Index rowId, Index colId) const
+ EIGEN_DEVICE_FUNC inline PacketScalar packet(Index rowId, Index colId) const
{
return m_xpr.template packet<Unaligned>(rowId + m_startRow.value(), colId + m_startCol.value());
}
template<int LoadMode>
- inline void writePacket(Index rowId, Index colId, const PacketScalar& val)
+ EIGEN_DEVICE_FUNC inline void writePacket(Index rowId, Index colId, const PacketScalar& val)
{
m_xpr.template writePacket<Unaligned>(rowId + m_startRow.value(), colId + m_startCol.value(), val);
}
template<int LoadMode>
- inline PacketScalar packet(Index index) const
+ EIGEN_DEVICE_FUNC inline PacketScalar packet(Index index) const
{
return m_xpr.template packet<Unaligned>
(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
@@ -280,7 +280,7 @@ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool H
}
template<int LoadMode>
- inline void writePacket(Index index, const PacketScalar& val)
+ EIGEN_DEVICE_FUNC inline void writePacket(Index index, const PacketScalar& val)
{
m_xpr.template writePacket<Unaligned>
(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
@@ -294,25 +294,25 @@ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool H
EIGEN_DEVICE_FUNC inline Index outerStride() const;
#endif
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const typename internal::remove_all<XprTypeNested>::type& nestedExpression() const
- {
- return m_xpr;
+ {
+ return m_xpr;
}
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
XprType& nestedExpression() { return m_xpr; }
-
- EIGEN_DEVICE_FUNC
- StorageIndex startRow() const
- {
- return m_startRow.value();
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ StorageIndex startRow() const EIGEN_NOEXCEPT
+ {
+ return m_startRow.value();
}
-
- EIGEN_DEVICE_FUNC
- StorageIndex startCol() const
- {
- return m_startCol.value();
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ StorageIndex startCol() const EIGEN_NOEXCEPT
+ {
+ return m_startCol.value();
}
protected:
@@ -342,9 +342,9 @@ class BlockImpl_dense<XprType,BlockRows,BlockCols, InnerPanel,true>
/** Column or Row constructor
*/
- EIGEN_DEVICE_FUNC
- inline BlockImpl_dense(XprType& xpr, Index i)
- : Base(xpr.data() + i * ( ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && (!XprTypeIsRowMajor))
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ BlockImpl_dense(XprType& xpr, Index i)
+ : Base(xpr.data() + i * ( ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && (!XprTypeIsRowMajor))
|| ((BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) && ( XprTypeIsRowMajor)) ? xpr.innerStride() : xpr.outerStride()),
BlockRows==1 ? 1 : xpr.rows(),
BlockCols==1 ? 1 : xpr.cols()),
@@ -357,8 +357,8 @@ class BlockImpl_dense<XprType,BlockRows,BlockCols, InnerPanel,true>
/** Fixed-size constructor
*/
- EIGEN_DEVICE_FUNC
- inline BlockImpl_dense(XprType& xpr, Index startRow, Index startCol)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ BlockImpl_dense(XprType& xpr, Index startRow, Index startCol)
: Base(xpr.data()+xpr.innerStride()*(XprTypeIsRowMajor?startCol:startRow) + xpr.outerStride()*(XprTypeIsRowMajor?startRow:startCol)),
m_xpr(xpr), m_startRow(startRow), m_startCol(startCol)
{
@@ -367,8 +367,8 @@ class BlockImpl_dense<XprType,BlockRows,BlockCols, InnerPanel,true>
/** Dynamic-size constructor
*/
- EIGEN_DEVICE_FUNC
- inline BlockImpl_dense(XprType& xpr,
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ BlockImpl_dense(XprType& xpr,
Index startRow, Index startCol,
Index blockRows, Index blockCols)
: Base(xpr.data()+xpr.innerStride()*(XprTypeIsRowMajor?startCol:startRow) + xpr.outerStride()*(XprTypeIsRowMajor?startRow:startCol), blockRows, blockCols),
@@ -377,18 +377,18 @@ class BlockImpl_dense<XprType,BlockRows,BlockCols, InnerPanel,true>
init();
}
- EIGEN_DEVICE_FUNC
- const typename internal::remove_all<XprTypeNested>::type& nestedExpression() const
- {
- return m_xpr;
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const typename internal::remove_all<XprTypeNested>::type& nestedExpression() const EIGEN_NOEXCEPT
+ {
+ return m_xpr;
}
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
XprType& nestedExpression() { return m_xpr; }
-
+
/** \sa MapBase::innerStride() */
- EIGEN_DEVICE_FUNC
- inline Index innerStride() const
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ Index innerStride() const EIGEN_NOEXCEPT
{
return internal::traits<BlockType>::HasSameStorageOrderAsXprType
? m_xpr.innerStride()
@@ -396,23 +396,19 @@ class BlockImpl_dense<XprType,BlockRows,BlockCols, InnerPanel,true>
}
/** \sa MapBase::outerStride() */
- EIGEN_DEVICE_FUNC
- inline Index outerStride() const
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ Index outerStride() const EIGEN_NOEXCEPT
{
- return m_outerStride;
+ return internal::traits<BlockType>::HasSameStorageOrderAsXprType
+ ? m_xpr.outerStride()
+ : m_xpr.innerStride();
}
- EIGEN_DEVICE_FUNC
- StorageIndex startRow() const
- {
- return m_startRow.value();
- }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ StorageIndex startRow() const EIGEN_NOEXCEPT { return m_startRow.value(); }
- EIGEN_DEVICE_FUNC
- StorageIndex startCol() const
- {
- return m_startCol.value();
- }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ StorageIndex startCol() const EIGEN_NOEXCEPT { return m_startCol.value(); }
#ifndef __SUNPRO_CC
// FIXME sunstudio is not friendly with the above friend...
@@ -422,8 +418,8 @@ class BlockImpl_dense<XprType,BlockRows,BlockCols, InnerPanel,true>
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** \internal used by allowAligned() */
- EIGEN_DEVICE_FUNC
- inline BlockImpl_dense(XprType& xpr, const Scalar* data, Index blockRows, Index blockCols)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ BlockImpl_dense(XprType& xpr, const Scalar* data, Index blockRows, Index blockCols)
: Base(data, blockRows, blockCols), m_xpr(xpr)
{
init();
@@ -431,7 +427,7 @@ class BlockImpl_dense<XprType,BlockRows,BlockCols, InnerPanel,true>
#endif
protected:
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void init()
{
m_outerStride = internal::traits<BlockType>::HasSameStorageOrderAsXprType
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/BooleanRedux.h b/examples/ThirdPartyLibs/Eigen/src/Core/BooleanRedux.h
index ccf519067..852de8b90 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/BooleanRedux.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/BooleanRedux.h
@@ -22,7 +22,7 @@ struct all_unroller
row = (UnrollCount-1) % Rows
};
- static inline bool run(const Derived &mat)
+ EIGEN_DEVICE_FUNC static inline bool run(const Derived &mat)
{
return all_unroller<Derived, UnrollCount-1, Rows>::run(mat) && mat.coeff(row, col);
}
@@ -31,13 +31,13 @@ struct all_unroller
template<typename Derived, int Rows>
struct all_unroller<Derived, 0, Rows>
{
- static inline bool run(const Derived &/*mat*/) { return true; }
+ EIGEN_DEVICE_FUNC static inline bool run(const Derived &/*mat*/) { return true; }
};
template<typename Derived, int Rows>
struct all_unroller<Derived, Dynamic, Rows>
{
- static inline bool run(const Derived &) { return false; }
+ EIGEN_DEVICE_FUNC static inline bool run(const Derived &) { return false; }
};
template<typename Derived, int UnrollCount, int Rows>
@@ -48,7 +48,7 @@ struct any_unroller
row = (UnrollCount-1) % Rows
};
- static inline bool run(const Derived &mat)
+ EIGEN_DEVICE_FUNC static inline bool run(const Derived &mat)
{
return any_unroller<Derived, UnrollCount-1, Rows>::run(mat) || mat.coeff(row, col);
}
@@ -57,13 +57,13 @@ struct any_unroller
template<typename Derived, int Rows>
struct any_unroller<Derived, 0, Rows>
{
- static inline bool run(const Derived & /*mat*/) { return false; }
+ EIGEN_DEVICE_FUNC static inline bool run(const Derived & /*mat*/) { return false; }
};
template<typename Derived, int Rows>
struct any_unroller<Derived, Dynamic, Rows>
{
- static inline bool run(const Derived &) { return false; }
+ EIGEN_DEVICE_FUNC static inline bool run(const Derived &) { return false; }
};
} // end namespace internal
@@ -81,7 +81,7 @@ EIGEN_DEVICE_FUNC inline bool DenseBase<Derived>::all() const
typedef internal::evaluator<Derived> Evaluator;
enum {
unroll = SizeAtCompileTime != Dynamic
- && SizeAtCompileTime * (Evaluator::CoeffReadCost + NumTraits<Scalar>::AddCost) <= EIGEN_UNROLLING_LIMIT
+ && SizeAtCompileTime * (int(Evaluator::CoeffReadCost) + int(NumTraits<Scalar>::AddCost)) <= EIGEN_UNROLLING_LIMIT
};
Evaluator evaluator(derived());
if(unroll)
@@ -105,7 +105,7 @@ EIGEN_DEVICE_FUNC inline bool DenseBase<Derived>::any() const
typedef internal::evaluator<Derived> Evaluator;
enum {
unroll = SizeAtCompileTime != Dynamic
- && SizeAtCompileTime * (Evaluator::CoeffReadCost + NumTraits<Scalar>::AddCost) <= EIGEN_UNROLLING_LIMIT
+ && SizeAtCompileTime * (int(Evaluator::CoeffReadCost) + int(NumTraits<Scalar>::AddCost)) <= EIGEN_UNROLLING_LIMIT
};
Evaluator evaluator(derived());
if(unroll)
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/CommaInitializer.h b/examples/ThirdPartyLibs/Eigen/src/Core/CommaInitializer.h
index 35fdbb819..c0e29c75c 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/CommaInitializer.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/CommaInitializer.h
@@ -33,6 +33,8 @@ struct CommaInitializer
inline CommaInitializer(XprType& xpr, const Scalar& s)
: m_xpr(xpr), m_row(0), m_col(1), m_currentBlockRows(1)
{
+ eigen_assert(m_xpr.rows() > 0 && m_xpr.cols() > 0
+ && "Cannot comma-initialize a 0x0 matrix (operator<<)");
m_xpr.coeffRef(0,0) = s;
}
@@ -41,6 +43,8 @@ struct CommaInitializer
inline CommaInitializer(XprType& xpr, const DenseBase<OtherDerived>& other)
: m_xpr(xpr), m_row(0), m_col(other.cols()), m_currentBlockRows(other.rows())
{
+ eigen_assert(m_xpr.rows() >= other.rows() && m_xpr.cols() >= other.cols()
+ && "Cannot comma-initialize a 0x0 matrix (operator<<)");
m_xpr.block(0, 0, other.rows(), other.cols()) = other;
}
@@ -103,7 +107,7 @@ struct CommaInitializer
EIGEN_EXCEPTION_SPEC(Eigen::eigen_assert_exception)
#endif
{
- finished();
+ finished();
}
/** \returns the built matrix once all its coefficients have been set.
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/ConditionEstimator.h b/examples/ThirdPartyLibs/Eigen/src/Core/ConditionEstimator.h
index aa7efdc76..51a2e5f1b 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/ConditionEstimator.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/ConditionEstimator.h
@@ -160,7 +160,7 @@ rcond_estimate_helper(typename Decomposition::RealScalar matrix_norm, const Deco
{
typedef typename Decomposition::RealScalar RealScalar;
eigen_assert(dec.rows() == dec.cols());
- if (dec.rows() == 0) return RealScalar(1);
+ if (dec.rows() == 0) return NumTraits<RealScalar>::infinity();
if (matrix_norm == RealScalar(0)) return RealScalar(0);
if (dec.rows() == 1) return RealScalar(1);
const RealScalar inverse_matrix_norm = rcond_invmatrix_L1_norm_estimate(dec);
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/CoreEvaluators.h b/examples/ThirdPartyLibs/Eigen/src/Core/CoreEvaluators.h
index 15b361b38..0ff8c8deb 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/CoreEvaluators.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/CoreEvaluators.h
@@ -14,7 +14,7 @@
#define EIGEN_COREEVALUATORS_H
namespace Eigen {
-
+
namespace internal {
// This class returns the evaluator kind from the expression storage kind.
@@ -63,8 +63,8 @@ template< typename T,
template< typename T,
typename Kind = typename evaluator_traits<typename T::NestedExpression>::Kind,
typename Scalar = typename T::Scalar> struct unary_evaluator;
-
-// evaluator_traits<T> contains traits for evaluator<T>
+
+// evaluator_traits<T> contains traits for evaluator<T>
template<typename T>
struct evaluator_traits_base
@@ -90,7 +90,8 @@ template<typename T>
struct evaluator : public unary_evaluator<T>
{
typedef unary_evaluator<T> Base;
- EIGEN_DEVICE_FUNC explicit evaluator(const T& xpr) : Base(xpr) {}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit evaluator(const T& xpr) : Base(xpr) {}
};
@@ -99,7 +100,7 @@ template<typename T>
struct evaluator<const T>
: evaluator<T>
{
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
explicit evaluator(const T& xpr) : evaluator<T>(xpr) {}
};
@@ -110,7 +111,7 @@ struct evaluator_base
{
// TODO that's not very nice to have to propagate all these traits. They are currently only needed to handle outer,inner indices.
typedef traits<ExpressionType> ExpressionTraits;
-
+
enum {
Alignment = 0
};
@@ -134,19 +135,25 @@ private:
// this helper permits to completely eliminate m_outerStride if it is known at compiletime.
template<typename Scalar,int OuterStride> class plainobjectbase_evaluator_data {
public:
- EIGEN_DEVICE_FUNC plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) : data(ptr)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) : data(ptr)
{
- EIGEN_ONLY_USED_FOR_DEBUG(outerStride);
+#ifndef EIGEN_INTERNAL_DEBUGGING
+ EIGEN_UNUSED_VARIABLE(outerStride);
+#endif
eigen_internal_assert(outerStride==OuterStride);
}
- EIGEN_DEVICE_FUNC Index outerStride() const { return OuterStride; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ Index outerStride() const EIGEN_NOEXCEPT { return OuterStride; }
const Scalar *data;
};
template<typename Scalar> class plainobjectbase_evaluator_data<Scalar,Dynamic> {
public:
- EIGEN_DEVICE_FUNC plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) : data(ptr), m_outerStride(outerStride) {}
- EIGEN_DEVICE_FUNC Index outerStride() const { return m_outerStride; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) : data(ptr), m_outerStride(outerStride) {}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Index outerStride() const { return m_outerStride; }
const Scalar *data;
protected:
Index m_outerStride;
@@ -165,7 +172,7 @@ struct evaluator<PlainObjectBase<Derived> >
IsVectorAtCompileTime = PlainObjectType::IsVectorAtCompileTime,
RowsAtCompileTime = PlainObjectType::RowsAtCompileTime,
ColsAtCompileTime = PlainObjectType::ColsAtCompileTime,
-
+
CoeffReadCost = NumTraits<Scalar>::ReadCost,
Flags = traits<Derived>::EvaluatorFlags,
Alignment = traits<Derived>::Alignment
@@ -177,13 +184,15 @@ struct evaluator<PlainObjectBase<Derived> >
: RowsAtCompileTime
};
- EIGEN_DEVICE_FUNC evaluator()
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ evaluator()
: m_d(0,OuterStrideAtCompileTime)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
- EIGEN_DEVICE_FUNC explicit evaluator(const PlainObjectType& m)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit evaluator(const PlainObjectType& m)
: m_d(m.data(),IsVectorAtCompileTime ? 0 : m.outerStride())
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
@@ -265,11 +274,13 @@ struct evaluator<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >
: evaluator<PlainObjectBase<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > >
{
typedef Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> XprType;
-
- EIGEN_DEVICE_FUNC evaluator() {}
- EIGEN_DEVICE_FUNC explicit evaluator(const XprType& m)
- : evaluator<PlainObjectBase<XprType> >(m)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ evaluator() {}
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit evaluator(const XprType& m)
+ : evaluator<PlainObjectBase<XprType> >(m)
{ }
};
@@ -279,10 +290,12 @@ struct evaluator<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >
{
typedef Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> XprType;
- EIGEN_DEVICE_FUNC evaluator() {}
-
- EIGEN_DEVICE_FUNC explicit evaluator(const XprType& m)
- : evaluator<PlainObjectBase<XprType> >(m)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ evaluator() {}
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit evaluator(const XprType& m)
+ : evaluator<PlainObjectBase<XprType> >(m)
{ }
};
@@ -293,14 +306,15 @@ struct unary_evaluator<Transpose<ArgType>, IndexBased>
: evaluator_base<Transpose<ArgType> >
{
typedef Transpose<ArgType> XprType;
-
+
enum {
- CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
Flags = evaluator<ArgType>::Flags ^ RowMajorBit,
Alignment = evaluator<ArgType>::Alignment
};
- EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& t) : m_argImpl(t.nestedExpression()) {}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit unary_evaluator(const XprType& t) : m_argImpl(t.nestedExpression()) {}
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
@@ -485,10 +499,10 @@ struct evaluator<CwiseNullaryOp<NullaryOp,PlainObjectType> >
{
typedef CwiseNullaryOp<NullaryOp,PlainObjectType> XprType;
typedef typename internal::remove_all<PlainObjectType>::type PlainObjectTypeCleaned;
-
+
enum {
CoeffReadCost = internal::functor_traits<NullaryOp>::Cost,
-
+
Flags = (evaluator<PlainObjectTypeCleaned>::Flags
& ( HereditaryBits
| (functor_has_linear_access<NullaryOp>::ret ? LinearAccessBit : 0)
@@ -545,10 +559,10 @@ struct unary_evaluator<CwiseUnaryOp<UnaryOp, ArgType>, IndexBased >
: evaluator_base<CwiseUnaryOp<UnaryOp, ArgType> >
{
typedef CwiseUnaryOp<UnaryOp, ArgType> XprType;
-
+
enum {
- CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<UnaryOp>::Cost,
-
+ CoeffReadCost = int(evaluator<ArgType>::CoeffReadCost) + int(functor_traits<UnaryOp>::Cost),
+
Flags = evaluator<ArgType>::Flags
& (HereditaryBits | LinearAccessBit | (functor_traits<UnaryOp>::PacketAccess ? PacketAccessBit : 0)),
Alignment = evaluator<ArgType>::Alignment
@@ -592,13 +606,13 @@ struct unary_evaluator<CwiseUnaryOp<UnaryOp, ArgType>, IndexBased >
protected:
// this helper permits to completely eliminate the functor if it is empty
- class Data : private UnaryOp
+ struct Data
{
- public:
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
- Data(const XprType& xpr) : UnaryOp(xpr.functor()), argImpl(xpr.nestedExpression()) {}
+ Data(const XprType& xpr) : op(xpr.functor()), argImpl(xpr.nestedExpression()) {}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
- const UnaryOp& func() const { return static_cast<const UnaryOp&>(*this); }
+ const UnaryOp& func() const { return op; }
+ UnaryOp op;
evaluator<ArgType> argImpl;
};
@@ -614,7 +628,7 @@ struct evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >
{
typedef CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> XprType;
typedef ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> > Base;
-
+
EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {}
};
@@ -623,10 +637,10 @@ struct ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3>, IndexBased
: evaluator_base<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >
{
typedef CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> XprType;
-
+
enum {
- CoeffReadCost = evaluator<Arg1>::CoeffReadCost + evaluator<Arg2>::CoeffReadCost + evaluator<Arg3>::CoeffReadCost + functor_traits<TernaryOp>::Cost,
-
+ CoeffReadCost = int(evaluator<Arg1>::CoeffReadCost) + int(evaluator<Arg2>::CoeffReadCost) + int(evaluator<Arg3>::CoeffReadCost) + int(functor_traits<TernaryOp>::Cost),
+
Arg1Flags = evaluator<Arg1>::Flags,
Arg2Flags = evaluator<Arg2>::Flags,
Arg3Flags = evaluator<Arg3>::Flags,
@@ -686,12 +700,13 @@ struct ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3>, IndexBased
protected:
// this helper permits to completely eliminate the functor if it is empty
- struct Data : private TernaryOp
+ struct Data
{
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
- Data(const XprType& xpr) : TernaryOp(xpr.functor()), arg1Impl(xpr.arg1()), arg2Impl(xpr.arg2()), arg3Impl(xpr.arg3()) {}
+ Data(const XprType& xpr) : op(xpr.functor()), arg1Impl(xpr.arg1()), arg2Impl(xpr.arg2()), arg3Impl(xpr.arg3()) {}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
- const TernaryOp& func() const { return static_cast<const TernaryOp&>(*this); }
+ const TernaryOp& func() const { return op; }
+ TernaryOp op;
evaluator<Arg1> arg1Impl;
evaluator<Arg2> arg2Impl;
evaluator<Arg3> arg3Impl;
@@ -709,8 +724,9 @@ struct evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
{
typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
typedef binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> > Base;
-
- EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {}
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit evaluator(const XprType& xpr) : Base(xpr) {}
};
template<typename BinaryOp, typename Lhs, typename Rhs>
@@ -718,10 +734,10 @@ struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IndexBased, IndexBase
: evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
{
typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
-
+
enum {
- CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
-
+ CoeffReadCost = int(evaluator<Lhs>::CoeffReadCost) + int(evaluator<Rhs>::CoeffReadCost) + int(functor_traits<BinaryOp>::Cost),
+
LhsFlags = evaluator<Lhs>::Flags,
RhsFlags = evaluator<Rhs>::Flags,
SameType = is_same<typename Lhs::Scalar,typename Rhs::Scalar>::value,
@@ -738,7 +754,8 @@ struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IndexBased, IndexBase
Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<Lhs>::Alignment,evaluator<Rhs>::Alignment)
};
- EIGEN_DEVICE_FUNC explicit binary_evaluator(const XprType& xpr) : m_d(xpr)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit binary_evaluator(const XprType& xpr) : m_d(xpr)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
@@ -777,12 +794,13 @@ struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IndexBased, IndexBase
protected:
// this helper permits to completely eliminate the functor if it is empty
- struct Data : private BinaryOp
+ struct Data
{
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
- Data(const XprType& xpr) : BinaryOp(xpr.functor()), lhsImpl(xpr.lhs()), rhsImpl(xpr.rhs()) {}
+ Data(const XprType& xpr) : op(xpr.functor()), lhsImpl(xpr.lhs()), rhsImpl(xpr.rhs()) {}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
- const BinaryOp& func() const { return static_cast<const BinaryOp&>(*this); }
+ const BinaryOp& func() const { return op; }
+ BinaryOp op;
evaluator<Lhs> lhsImpl;
evaluator<Rhs> rhsImpl;
};
@@ -797,12 +815,12 @@ struct unary_evaluator<CwiseUnaryView<UnaryOp, ArgType>, IndexBased>
: evaluator_base<CwiseUnaryView<UnaryOp, ArgType> >
{
typedef CwiseUnaryView<UnaryOp, ArgType> XprType;
-
+
enum {
- CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<UnaryOp>::Cost,
-
+ CoeffReadCost = int(evaluator<ArgType>::CoeffReadCost) + int(functor_traits<UnaryOp>::Cost),
+
Flags = (evaluator<ArgType>::Flags & (HereditaryBits | LinearAccessBit | DirectAccessBit)),
-
+
Alignment = 0 // FIXME it is not very clear why alignment is necessarily lost...
};
@@ -842,12 +860,13 @@ struct unary_evaluator<CwiseUnaryView<UnaryOp, ArgType>, IndexBased>
protected:
// this helper permits to completely eliminate the functor if it is empty
- struct Data : private UnaryOp
+ struct Data
{
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
- Data(const XprType& xpr) : UnaryOp(xpr.functor()), argImpl(xpr.nestedExpression()) {}
+ Data(const XprType& xpr) : op(xpr.functor()), argImpl(xpr.nestedExpression()) {}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
- const UnaryOp& func() const { return static_cast<const UnaryOp&>(*this); }
+ const UnaryOp& func() const { return op; }
+ UnaryOp op;
evaluator<ArgType> argImpl;
};
@@ -868,14 +887,15 @@ struct mapbase_evaluator : evaluator_base<Derived>
typedef typename XprType::PointerType PointerType;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
-
+
enum {
IsRowMajor = XprType::RowsAtCompileTime,
ColsAtCompileTime = XprType::ColsAtCompileTime,
CoeffReadCost = NumTraits<Scalar>::ReadCost
};
- EIGEN_DEVICE_FUNC explicit mapbase_evaluator(const XprType& map)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit mapbase_evaluator(const XprType& map)
: m_data(const_cast<PointerType>(map.data())),
m_innerStride(map.innerStride()),
m_outerStride(map.outerStride())
@@ -939,17 +959,21 @@ struct mapbase_evaluator : evaluator_base<Derived>
internal::pstoret<Scalar, PacketType, StoreMode>(m_data + index * m_innerStride.value(), x);
}
protected:
- EIGEN_DEVICE_FUNC
- inline Index rowStride() const { return XprType::IsRowMajor ? m_outerStride.value() : m_innerStride.value(); }
- EIGEN_DEVICE_FUNC
- inline Index colStride() const { return XprType::IsRowMajor ? m_innerStride.value() : m_outerStride.value(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ Index rowStride() const EIGEN_NOEXCEPT {
+ return XprType::IsRowMajor ? m_outerStride.value() : m_innerStride.value();
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ Index colStride() const EIGEN_NOEXCEPT {
+ return XprType::IsRowMajor ? m_innerStride.value() : m_outerStride.value();
+ }
PointerType m_data;
const internal::variable_if_dynamic<Index, XprType::InnerStrideAtCompileTime> m_innerStride;
const internal::variable_if_dynamic<Index, XprType::OuterStrideAtCompileTime> m_outerStride;
};
-template<typename PlainObjectType, int MapOptions, typename StrideType>
+template<typename PlainObjectType, int MapOptions, typename StrideType>
struct evaluator<Map<PlainObjectType, MapOptions, StrideType> >
: public mapbase_evaluator<Map<PlainObjectType, MapOptions, StrideType>, PlainObjectType>
{
@@ -957,7 +981,7 @@ struct evaluator<Map<PlainObjectType, MapOptions, StrideType> >
typedef typename XprType::Scalar Scalar;
// TODO: should check for smaller packet types once we can handle multi-sized packet types
typedef typename packet_traits<Scalar>::type PacketScalar;
-
+
enum {
InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0
? int(PlainObjectType::InnerStrideAtCompileTime)
@@ -969,34 +993,35 @@ struct evaluator<Map<PlainObjectType, MapOptions, StrideType> >
HasNoOuterStride = StrideType::OuterStrideAtCompileTime == 0,
HasNoStride = HasNoInnerStride && HasNoOuterStride,
IsDynamicSize = PlainObjectType::SizeAtCompileTime==Dynamic,
-
+
PacketAccessMask = bool(HasNoInnerStride) ? ~int(0) : ~int(PacketAccessBit),
LinearAccessMask = bool(HasNoStride) || bool(PlainObjectType::IsVectorAtCompileTime) ? ~int(0) : ~int(LinearAccessBit),
Flags = int( evaluator<PlainObjectType>::Flags) & (LinearAccessMask&PacketAccessMask),
-
+
Alignment = int(MapOptions)&int(AlignedMask)
};
EIGEN_DEVICE_FUNC explicit evaluator(const XprType& map)
- : mapbase_evaluator<XprType, PlainObjectType>(map)
+ : mapbase_evaluator<XprType, PlainObjectType>(map)
{ }
};
// -------------------- Ref --------------------
-template<typename PlainObjectType, int RefOptions, typename StrideType>
+template<typename PlainObjectType, int RefOptions, typename StrideType>
struct evaluator<Ref<PlainObjectType, RefOptions, StrideType> >
: public mapbase_evaluator<Ref<PlainObjectType, RefOptions, StrideType>, PlainObjectType>
{
typedef Ref<PlainObjectType, RefOptions, StrideType> XprType;
-
+
enum {
Flags = evaluator<Map<PlainObjectType, RefOptions, StrideType> >::Flags,
Alignment = evaluator<Map<PlainObjectType, RefOptions, StrideType> >::Alignment
};
- EIGEN_DEVICE_FUNC explicit evaluator(const XprType& ref)
- : mapbase_evaluator<XprType, PlainObjectType>(ref)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit evaluator(const XprType& ref)
+ : mapbase_evaluator<XprType, PlainObjectType>(ref)
{ }
};
@@ -1004,8 +1029,8 @@ struct evaluator<Ref<PlainObjectType, RefOptions, StrideType> >
template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel,
bool HasDirectAccess = internal::has_direct_access<ArgType>::ret> struct block_evaluator;
-
-template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
+
+template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
struct evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
: block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel>
{
@@ -1013,15 +1038,15 @@ struct evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
typedef typename XprType::Scalar Scalar;
// TODO: should check for smaller packet types once we can handle multi-sized packet types
typedef typename packet_traits<Scalar>::type PacketScalar;
-
+
enum {
CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
-
+
RowsAtCompileTime = traits<XprType>::RowsAtCompileTime,
ColsAtCompileTime = traits<XprType>::ColsAtCompileTime,
MaxRowsAtCompileTime = traits<XprType>::MaxRowsAtCompileTime,
MaxColsAtCompileTime = traits<XprType>::MaxColsAtCompileTime,
-
+
ArgTypeIsRowMajor = (int(evaluator<ArgType>::Flags)&RowMajorBit) != 0,
IsRowMajor = (MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1) ? 1
: (MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1) ? 0
@@ -1034,21 +1059,24 @@ struct evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
OuterStrideAtCompileTime = HasSameStorageOrderAsArgType
? int(outer_stride_at_compile_time<ArgType>::ret)
: int(inner_stride_at_compile_time<ArgType>::ret),
- MaskPacketAccessBit = (InnerStrideAtCompileTime == 1) ? PacketAccessBit : 0,
-
- FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1 || (InnerPanel && (evaluator<ArgType>::Flags&LinearAccessBit))) ? LinearAccessBit : 0,
+ MaskPacketAccessBit = (InnerStrideAtCompileTime == 1 || HasSameStorageOrderAsArgType) ? PacketAccessBit : 0,
+
+ FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1 || (InnerPanel && (evaluator<ArgType>::Flags&LinearAccessBit))) ? LinearAccessBit : 0,
FlagsRowMajorBit = XprType::Flags&RowMajorBit,
Flags0 = evaluator<ArgType>::Flags & ( (HereditaryBits & ~RowMajorBit) |
DirectAccessBit |
MaskPacketAccessBit),
Flags = Flags0 | FlagsLinearAccessBit | FlagsRowMajorBit,
-
+
PacketAlignment = unpacket_traits<PacketScalar>::alignment,
- Alignment0 = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % int(PacketAlignment)) == 0)) ? int(PacketAlignment) : 0,
+ Alignment0 = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic)
+ && (OuterStrideAtCompileTime!=0)
+ && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % int(PacketAlignment)) == 0)) ? int(PacketAlignment) : 0,
Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<ArgType>::Alignment, Alignment0)
};
typedef block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel> block_evaluator_type;
- EIGEN_DEVICE_FUNC explicit evaluator(const XprType& block) : block_evaluator_type(block)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit evaluator(const XprType& block) : block_evaluator_type(block)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
@@ -1061,8 +1089,9 @@ struct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /*HasDirectAcc
{
typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
- EIGEN_DEVICE_FUNC explicit block_evaluator(const XprType& block)
- : unary_evaluator<XprType>(block)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit block_evaluator(const XprType& block)
+ : unary_evaluator<XprType>(block)
{}
};
@@ -1072,84 +1101,116 @@ struct unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>, IndexBa
{
typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
- EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& block)
- : m_argImpl(block.nestedExpression()),
- m_startRow(block.startRow()),
- m_startCol(block.startCol())
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit unary_evaluator(const XprType& block)
+ : m_argImpl(block.nestedExpression()),
+ m_startRow(block.startRow()),
+ m_startCol(block.startCol()),
+ m_linear_offset(ForwardLinearAccess?(ArgType::IsRowMajor ? block.startRow()*block.nestedExpression().cols() + block.startCol() : block.startCol()*block.nestedExpression().rows() + block.startRow()):0)
{ }
-
+
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
enum {
- RowsAtCompileTime = XprType::RowsAtCompileTime
+ RowsAtCompileTime = XprType::RowsAtCompileTime,
+ ForwardLinearAccess = (InnerPanel || int(XprType::IsRowMajor)==int(ArgType::IsRowMajor)) && bool(evaluator<ArgType>::Flags&LinearAccessBit)
};
-
+
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index row, Index col) const
- {
- return m_argImpl.coeff(m_startRow.value() + row, m_startCol.value() + col);
+ {
+ return m_argImpl.coeff(m_startRow.value() + row, m_startCol.value() + col);
}
-
+
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index index) const
- {
- return coeff(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0);
+ {
+ return linear_coeff_impl(index, bool_constant<ForwardLinearAccess>());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Scalar& coeffRef(Index row, Index col)
- {
- return m_argImpl.coeffRef(m_startRow.value() + row, m_startCol.value() + col);
+ {
+ return m_argImpl.coeffRef(m_startRow.value() + row, m_startCol.value() + col);
}
-
+
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Scalar& coeffRef(Index index)
- {
- return coeffRef(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0);
+ {
+ return linear_coeffRef_impl(index, bool_constant<ForwardLinearAccess>());
}
-
+
template<int LoadMode, typename PacketType>
EIGEN_STRONG_INLINE
- PacketType packet(Index row, Index col) const
- {
- return m_argImpl.template packet<LoadMode,PacketType>(m_startRow.value() + row, m_startCol.value() + col);
+ PacketType packet(Index row, Index col) const
+ {
+ return m_argImpl.template packet<LoadMode,PacketType>(m_startRow.value() + row, m_startCol.value() + col);
}
template<int LoadMode, typename PacketType>
EIGEN_STRONG_INLINE
- PacketType packet(Index index) const
- {
- return packet<LoadMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index,
- RowsAtCompileTime == 1 ? index : 0);
+ PacketType packet(Index index) const
+ {
+ if (ForwardLinearAccess)
+ return m_argImpl.template packet<LoadMode,PacketType>(m_linear_offset.value() + index);
+ else
+ return packet<LoadMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index,
+ RowsAtCompileTime == 1 ? index : 0);
}
-
+
template<int StoreMode, typename PacketType>
EIGEN_STRONG_INLINE
- void writePacket(Index row, Index col, const PacketType& x)
+ void writePacket(Index row, Index col, const PacketType& x)
{
- return m_argImpl.template writePacket<StoreMode,PacketType>(m_startRow.value() + row, m_startCol.value() + col, x);
+ return m_argImpl.template writePacket<StoreMode,PacketType>(m_startRow.value() + row, m_startCol.value() + col, x);
}
-
+
template<int StoreMode, typename PacketType>
EIGEN_STRONG_INLINE
- void writePacket(Index index, const PacketType& x)
+ void writePacket(Index index, const PacketType& x)
{
- return writePacket<StoreMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index,
- RowsAtCompileTime == 1 ? index : 0,
- x);
+ if (ForwardLinearAccess)
+ return m_argImpl.template writePacket<StoreMode,PacketType>(m_linear_offset.value() + index, x);
+ else
+ return writePacket<StoreMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index,
+ RowsAtCompileTime == 1 ? index : 0,
+ x);
}
-
+
protected:
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ CoeffReturnType linear_coeff_impl(Index index, internal::true_type /* ForwardLinearAccess */) const
+ {
+ return m_argImpl.coeff(m_linear_offset.value() + index);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ CoeffReturnType linear_coeff_impl(Index index, internal::false_type /* not ForwardLinearAccess */) const
+ {
+ return coeff(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Scalar& linear_coeffRef_impl(Index index, internal::true_type /* ForwardLinearAccess */)
+ {
+ return m_argImpl.coeffRef(m_linear_offset.value() + index);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Scalar& linear_coeffRef_impl(Index index, internal::false_type /* not ForwardLinearAccess */)
+ {
+ return coeffRef(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0);
+ }
+
evaluator<ArgType> m_argImpl;
const variable_if_dynamic<Index, (ArgType::RowsAtCompileTime == 1 && BlockRows==1) ? 0 : Dynamic> m_startRow;
const variable_if_dynamic<Index, (ArgType::ColsAtCompileTime == 1 && BlockCols==1) ? 0 : Dynamic> m_startCol;
+ const variable_if_dynamic<Index, ForwardLinearAccess ? Dynamic : 0> m_linear_offset;
};
-// TODO: This evaluator does not actually use the child evaluator;
+// TODO: This evaluator does not actually use the child evaluator;
// all action is via the data() as returned by the Block expression.
-template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
+template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
struct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /* HasDirectAccess */ true>
: mapbase_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>,
typename Block<ArgType, BlockRows, BlockCols, InnerPanel>::PlainObject>
@@ -1157,8 +1218,9 @@ struct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /* HasDirectAc
typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
typedef typename XprType::Scalar Scalar;
- EIGEN_DEVICE_FUNC explicit block_evaluator(const XprType& block)
- : mapbase_evaluator<XprType, typename XprType::PlainObject>(block)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit block_evaluator(const XprType& block)
+ : mapbase_evaluator<XprType, typename XprType::PlainObject>(block)
{
// TODO: for the 3.3 release, this should be turned to an internal assertion, but let's keep it as is for the beta lifetime
eigen_assert(((internal::UIntPtr(block.data()) % EIGEN_PLAIN_ENUM_MAX(1,evaluator<XprType>::Alignment)) == 0) && "data is not aligned");
@@ -1181,18 +1243,19 @@ struct evaluator<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
evaluator<ElseMatrixType>::CoeffReadCost),
Flags = (unsigned int)evaluator<ThenMatrixType>::Flags & evaluator<ElseMatrixType>::Flags & HereditaryBits,
-
+
Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<ThenMatrixType>::Alignment, evaluator<ElseMatrixType>::Alignment)
};
- EIGEN_DEVICE_FUNC explicit evaluator(const XprType& select)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit evaluator(const XprType& select)
: m_conditionImpl(select.conditionMatrix()),
m_thenImpl(select.thenMatrix()),
m_elseImpl(select.elseMatrix())
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
-
+
typedef typename XprType::CoeffReturnType CoeffReturnType;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
@@ -1212,7 +1275,7 @@ struct evaluator<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
else
return m_elseImpl.coeff(index);
}
-
+
protected:
evaluator<ConditionMatrixType> m_conditionImpl;
evaluator<ThenMatrixType> m_thenImpl;
@@ -1222,7 +1285,7 @@ protected:
// -------------------- Replicate --------------------
-template<typename ArgType, int RowFactor, int ColFactor>
+template<typename ArgType, int RowFactor, int ColFactor>
struct unary_evaluator<Replicate<ArgType, RowFactor, ColFactor> >
: evaluator_base<Replicate<ArgType, RowFactor, ColFactor> >
{
@@ -1233,22 +1296,23 @@ struct unary_evaluator<Replicate<ArgType, RowFactor, ColFactor> >
};
typedef typename internal::nested_eval<ArgType,Factor>::type ArgTypeNested;
typedef typename internal::remove_all<ArgTypeNested>::type ArgTypeNestedCleaned;
-
+
enum {
CoeffReadCost = evaluator<ArgTypeNestedCleaned>::CoeffReadCost,
LinearAccessMask = XprType::IsVectorAtCompileTime ? LinearAccessBit : 0,
Flags = (evaluator<ArgTypeNestedCleaned>::Flags & (HereditaryBits|LinearAccessMask) & ~RowMajorBit) | (traits<XprType>::Flags & RowMajorBit),
-
+
Alignment = evaluator<ArgTypeNestedCleaned>::Alignment
};
- EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& replicate)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit unary_evaluator(const XprType& replicate)
: m_arg(replicate.nestedExpression()),
m_argImpl(m_arg),
m_rows(replicate.nestedExpression().rows()),
m_cols(replicate.nestedExpression().cols())
{}
-
+
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index row, Index col) const
{
@@ -1259,10 +1323,10 @@ struct unary_evaluator<Replicate<ArgType, RowFactor, ColFactor> >
const Index actual_col = internal::traits<XprType>::ColsAtCompileTime==1 ? 0
: ColFactor==1 ? col
: col % m_cols.value();
-
+
return m_argImpl.coeff(actual_row, actual_col);
}
-
+
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index index) const
{
@@ -1270,7 +1334,7 @@ struct unary_evaluator<Replicate<ArgType, RowFactor, ColFactor> >
const Index actual_index = internal::traits<XprType>::RowsAtCompileTime==1
? (ColFactor==1 ? index : index%m_cols.value())
: (RowFactor==1 ? index : index%m_rows.value());
-
+
return m_argImpl.coeff(actual_index);
}
@@ -1287,7 +1351,7 @@ struct unary_evaluator<Replicate<ArgType, RowFactor, ColFactor> >
return m_argImpl.template packet<LoadMode,PacketType>(actual_row, actual_col);
}
-
+
template<int LoadMode, typename PacketType>
EIGEN_STRONG_INLINE
PacketType packet(Index index) const
@@ -1298,7 +1362,7 @@ struct unary_evaluator<Replicate<ArgType, RowFactor, ColFactor> >
return m_argImpl.template packet<LoadMode,PacketType>(actual_index);
}
-
+
protected:
const ArgTypeNested m_arg;
evaluator<ArgTypeNestedCleaned> m_argImpl;
@@ -1306,64 +1370,6 @@ protected:
const variable_if_dynamic<Index, ArgType::ColsAtCompileTime> m_cols;
};
-
-// -------------------- PartialReduxExpr --------------------
-
-template< typename ArgType, typename MemberOp, int Direction>
-struct evaluator<PartialReduxExpr<ArgType, MemberOp, Direction> >
- : evaluator_base<PartialReduxExpr<ArgType, MemberOp, Direction> >
-{
- typedef PartialReduxExpr<ArgType, MemberOp, Direction> XprType;
- typedef typename internal::nested_eval<ArgType,1>::type ArgTypeNested;
- typedef typename internal::remove_all<ArgTypeNested>::type ArgTypeNestedCleaned;
- typedef typename ArgType::Scalar InputScalar;
- typedef typename XprType::Scalar Scalar;
- enum {
- TraversalSize = Direction==int(Vertical) ? int(ArgType::RowsAtCompileTime) : int(ArgType::ColsAtCompileTime)
- };
- typedef typename MemberOp::template Cost<InputScalar,int(TraversalSize)> CostOpType;
- enum {
- CoeffReadCost = TraversalSize==Dynamic ? HugeCost
- : TraversalSize * evaluator<ArgType>::CoeffReadCost + int(CostOpType::value),
-
- Flags = (traits<XprType>::Flags&RowMajorBit) | (evaluator<ArgType>::Flags&(HereditaryBits&(~RowMajorBit))) | LinearAccessBit,
-
- Alignment = 0 // FIXME this will need to be improved once PartialReduxExpr is vectorized
- };
-
- EIGEN_DEVICE_FUNC explicit evaluator(const XprType xpr)
- : m_arg(xpr.nestedExpression()), m_functor(xpr.functor())
- {
- EIGEN_INTERNAL_CHECK_COST_VALUE(TraversalSize==Dynamic ? HugeCost : int(CostOpType::value));
- EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
- }
-
- typedef typename XprType::CoeffReturnType CoeffReturnType;
-
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
- const Scalar coeff(Index i, Index j) const
- {
- if (Direction==Vertical)
- return m_functor(m_arg.col(j));
- else
- return m_functor(m_arg.row(i));
- }
-
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
- const Scalar coeff(Index index) const
- {
- if (Direction==Vertical)
- return m_functor(m_arg.col(index));
- else
- return m_functor(m_arg.row(index));
- }
-
-protected:
- typename internal::add_const_on_value_type<ArgTypeNested>::type m_arg;
- const MemberOp m_functor;
-};
-
-
// -------------------- MatrixWrapper and ArrayWrapper --------------------
//
// evaluator_wrapper_base<T> is a common base class for the
@@ -1380,7 +1386,8 @@ struct evaluator_wrapper_base
Alignment = evaluator<ArgType>::Alignment
};
- EIGEN_DEVICE_FUNC explicit evaluator_wrapper_base(const ArgType& arg) : m_argImpl(arg) {}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit evaluator_wrapper_base(const ArgType& arg) : m_argImpl(arg) {}
typedef typename ArgType::Scalar Scalar;
typedef typename ArgType::CoeffReturnType CoeffReturnType;
@@ -1447,7 +1454,8 @@ struct unary_evaluator<MatrixWrapper<TArgType> >
{
typedef MatrixWrapper<TArgType> XprType;
- EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& wrapper)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit unary_evaluator(const XprType& wrapper)
: evaluator_wrapper_base<MatrixWrapper<TArgType> >(wrapper.nestedExpression())
{ }
};
@@ -1458,7 +1466,8 @@ struct unary_evaluator<ArrayWrapper<TArgType> >
{
typedef ArrayWrapper<TArgType> XprType;
- EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& wrapper)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit unary_evaluator(const XprType& wrapper)
: evaluator_wrapper_base<ArrayWrapper<TArgType> >(wrapper.nestedExpression())
{ }
};
@@ -1485,9 +1494,9 @@ struct unary_evaluator<Reverse<ArgType, Direction> >
ReversePacket = (Direction == BothDirections)
|| ((Direction == Vertical) && IsColMajor)
|| ((Direction == Horizontal) && IsRowMajor),
-
+
CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
-
+
// let's enable LinearAccess only with vectorization because of the product overhead
// FIXME enable DirectAccess with negative strides?
Flags0 = evaluator<ArgType>::Flags,
@@ -1496,16 +1505,17 @@ struct unary_evaluator<Reverse<ArgType, Direction> >
? LinearAccessBit : 0,
Flags = int(Flags0) & (HereditaryBits | PacketAccessBit | LinearAccess),
-
+
Alignment = 0 // FIXME in some rare cases, Alignment could be preserved, like a Vector4f.
};
- EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& reverse)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit unary_evaluator(const XprType& reverse)
: m_argImpl(reverse.nestedExpression()),
m_rows(ReverseRow ? reverse.nestedExpression().rows() : 1),
m_cols(ReverseCol ? reverse.nestedExpression().cols() : 1)
{ }
-
+
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index row, Index col) const
{
@@ -1580,7 +1590,7 @@ struct unary_evaluator<Reverse<ArgType, Direction> >
m_argImpl.template writePacket<LoadMode>
(m_rows.value() * m_cols.value() - index - PacketSize, preverse(x));
}
-
+
protected:
evaluator<ArgType> m_argImpl;
@@ -1598,20 +1608,21 @@ struct evaluator<Diagonal<ArgType, DiagIndex> >
: evaluator_base<Diagonal<ArgType, DiagIndex> >
{
typedef Diagonal<ArgType, DiagIndex> XprType;
-
+
enum {
CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
-
+
Flags = (unsigned int)(evaluator<ArgType>::Flags & (HereditaryBits | DirectAccessBit) & ~RowMajorBit) | LinearAccessBit,
-
+
Alignment = 0
};
- EIGEN_DEVICE_FUNC explicit evaluator(const XprType& diagonal)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit evaluator(const XprType& diagonal)
: m_argImpl(diagonal.nestedExpression()),
m_index(diagonal.index())
{ }
-
+
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
@@ -1644,8 +1655,10 @@ protected:
const internal::variable_if_dynamicindex<Index, XprType::DiagIndex> m_index;
private:
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowOffset() const { return m_index.value() > 0 ? 0 : -m_index.value(); }
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colOffset() const { return m_index.value() > 0 ? m_index.value() : 0; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ Index rowOffset() const { return m_index.value() > 0 ? 0 : -m_index.value(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ Index colOffset() const { return m_index.value() > 0 ? m_index.value() : 0; }
};
@@ -1669,25 +1682,25 @@ class EvalToTemp
: public dense_xpr_base<EvalToTemp<ArgType> >::type
{
public:
-
+
typedef typename dense_xpr_base<EvalToTemp>::type Base;
EIGEN_GENERIC_PUBLIC_INTERFACE(EvalToTemp)
-
+
explicit EvalToTemp(const ArgType& arg)
: m_arg(arg)
{ }
-
+
const ArgType& arg() const
{
return m_arg;
}
- Index rows() const
+ EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT
{
return m_arg.rows();
}
- Index cols() const
+ EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT
{
return m_arg.cols();
}
@@ -1695,7 +1708,7 @@ class EvalToTemp
private:
const ArgType& m_arg;
};
-
+
template<typename ArgType>
struct evaluator<EvalToTemp<ArgType> >
: public evaluator<typename ArgType::PlainObject>
@@ -1703,7 +1716,7 @@ struct evaluator<EvalToTemp<ArgType> >
typedef EvalToTemp<ArgType> XprType;
typedef typename ArgType::PlainObject PlainObject;
typedef evaluator<PlainObject> Base;
-
+
EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr)
: m_result(xpr.arg())
{
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/CwiseBinaryOp.h b/examples/ThirdPartyLibs/Eigen/src/Core/CwiseBinaryOp.h
index bf2632d9e..2202b1cc6 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/CwiseBinaryOp.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/CwiseBinaryOp.h
@@ -74,7 +74,7 @@ class CwiseBinaryOpImpl;
* \sa MatrixBase::binaryExpr(const MatrixBase<OtherDerived> &,const CustomBinaryOp &) const, class CwiseUnaryOp, class CwiseNullaryOp
*/
template<typename BinaryOp, typename LhsType, typename RhsType>
-class CwiseBinaryOp :
+class CwiseBinaryOp :
public CwiseBinaryOpImpl<
BinaryOp, LhsType, RhsType,
typename internal::cwise_promote_storage_type<typename internal::traits<LhsType>::StorageKind,
@@ -83,7 +83,7 @@ class CwiseBinaryOp :
internal::no_assignment_operator
{
public:
-
+
typedef typename internal::remove_all<BinaryOp>::type Functor;
typedef typename internal::remove_all<LhsType>::type Lhs;
typedef typename internal::remove_all<RhsType>::type Rhs;
@@ -100,8 +100,14 @@ class CwiseBinaryOp :
typedef typename internal::remove_reference<LhsNested>::type _LhsNested;
typedef typename internal::remove_reference<RhsNested>::type _RhsNested;
- EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE CwiseBinaryOp(const Lhs& aLhs, const Rhs& aRhs, const BinaryOp& func = BinaryOp())
+#if EIGEN_COMP_MSVC && EIGEN_HAS_CXX11
+ //Required for Visual Studio or the Copy constructor will probably not get inlined!
+ EIGEN_STRONG_INLINE
+ CwiseBinaryOp(const CwiseBinaryOp<BinaryOp,LhsType,RhsType>&) = default;
+#endif
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ CwiseBinaryOp(const Lhs& aLhs, const Rhs& aRhs, const BinaryOp& func = BinaryOp())
: m_lhs(aLhs), m_rhs(aRhs), m_functor(func)
{
EIGEN_CHECK_BINARY_COMPATIBILIY(BinaryOp,typename Lhs::Scalar,typename Rhs::Scalar);
@@ -110,31 +116,25 @@ class CwiseBinaryOp :
eigen_assert(aLhs.rows() == aRhs.rows() && aLhs.cols() == aRhs.cols());
}
- EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE Index rows() const {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ Index rows() const EIGEN_NOEXCEPT {
// return the fixed size type if available to enable compile time optimizations
- if (internal::traits<typename internal::remove_all<LhsNested>::type>::RowsAtCompileTime==Dynamic)
- return m_rhs.rows();
- else
- return m_lhs.rows();
+ return internal::traits<typename internal::remove_all<LhsNested>::type>::RowsAtCompileTime==Dynamic ? m_rhs.rows() : m_lhs.rows();
}
- EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE Index cols() const {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ Index cols() const EIGEN_NOEXCEPT {
// return the fixed size type if available to enable compile time optimizations
- if (internal::traits<typename internal::remove_all<LhsNested>::type>::ColsAtCompileTime==Dynamic)
- return m_rhs.cols();
- else
- return m_lhs.cols();
+ return internal::traits<typename internal::remove_all<LhsNested>::type>::ColsAtCompileTime==Dynamic ? m_rhs.cols() : m_lhs.cols();
}
/** \returns the left hand side nested expression */
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const _LhsNested& lhs() const { return m_lhs; }
/** \returns the right hand side nested expression */
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const _RhsNested& rhs() const { return m_rhs; }
/** \returns the functor representing the binary operation */
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const BinaryOp& functor() const { return m_functor; }
protected:
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/CwiseNullaryOp.h b/examples/ThirdPartyLibs/Eigen/src/Core/CwiseNullaryOp.h
index b1923da0f..289ec510a 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/CwiseNullaryOp.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/CwiseNullaryOp.h
@@ -74,10 +74,10 @@ class CwiseNullaryOp : public internal::dense_xpr_base< CwiseNullaryOp<NullaryOp
&& (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols));
}
- EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE Index rows() const { return m_rows.value(); }
- EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE Index cols() const { return m_cols.value(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ Index rows() const { return m_rows.value(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ Index cols() const { return m_cols.value(); }
/** \returns the functor representing the nullary operation */
EIGEN_DEVICE_FUNC
@@ -105,7 +105,12 @@ class CwiseNullaryOp : public internal::dense_xpr_base< CwiseNullaryOp<NullaryOp
*/
template<typename Derived>
template<typename CustomNullaryOp>
-EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, typename DenseBase<Derived>::PlainObject>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+const CwiseNullaryOp<CustomNullaryOp,typename DenseBase<Derived>::PlainObject>
+#else
+const CwiseNullaryOp<CustomNullaryOp,PlainObject>
+#endif
DenseBase<Derived>::NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func)
{
return CwiseNullaryOp<CustomNullaryOp, PlainObject>(rows, cols, func);
@@ -126,12 +131,17 @@ DenseBase<Derived>::NullaryExpr(Index rows, Index cols, const CustomNullaryOp& f
*
* Here is an example with C++11 random generators: \include random_cpp11.cpp
* Output: \verbinclude random_cpp11.out
- *
+ *
* \sa class CwiseNullaryOp
*/
template<typename Derived>
template<typename CustomNullaryOp>
-EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, typename DenseBase<Derived>::PlainObject>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+const CwiseNullaryOp<CustomNullaryOp, typename DenseBase<Derived>::PlainObject>
+#else
+const CwiseNullaryOp<CustomNullaryOp, PlainObject>
+#endif
DenseBase<Derived>::NullaryExpr(Index size, const CustomNullaryOp& func)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
@@ -150,7 +160,12 @@ DenseBase<Derived>::NullaryExpr(Index size, const CustomNullaryOp& func)
*/
template<typename Derived>
template<typename CustomNullaryOp>
-EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, typename DenseBase<Derived>::PlainObject>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+const CwiseNullaryOp<CustomNullaryOp, typename DenseBase<Derived>::PlainObject>
+#else
+const CwiseNullaryOp<CustomNullaryOp, PlainObject>
+#endif
DenseBase<Derived>::NullaryExpr(const CustomNullaryOp& func)
{
return CwiseNullaryOp<CustomNullaryOp, PlainObject>(RowsAtCompileTime, ColsAtCompileTime, func);
@@ -217,27 +232,32 @@ DenseBase<Derived>::Constant(const Scalar& value)
/** \deprecated because of accuracy loss. In Eigen 3.3, it is an alias for LinSpaced(Index,const Scalar&,const Scalar&)
*
- * \sa LinSpaced(Index,Scalar,Scalar), setLinSpaced(Index,const Scalar&,const Scalar&)
+ * \only_for_vectors
+ *
+ * Example: \include DenseBase_LinSpaced_seq_deprecated.cpp
+ * Output: \verbinclude DenseBase_LinSpaced_seq_deprecated.out
+ *
+ * \sa LinSpaced(Index,const Scalar&, const Scalar&), setLinSpaced(Index,const Scalar&,const Scalar&)
*/
template<typename Derived>
-EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
+EIGEN_DEPRECATED EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
DenseBase<Derived>::LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar,PacketScalar>(low,high,size));
+ return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar>(low,high,size));
}
/** \deprecated because of accuracy loss. In Eigen 3.3, it is an alias for LinSpaced(const Scalar&,const Scalar&)
*
- * \sa LinSpaced(Scalar,Scalar)
+ * \sa LinSpaced(const Scalar&, const Scalar&)
*/
template<typename Derived>
-EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
+EIGEN_DEPRECATED EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
DenseBase<Derived>::LinSpaced(Sequential_t, const Scalar& low, const Scalar& high)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
- return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op<Scalar,PacketScalar>(low,high,Derived::SizeAtCompileTime));
+ return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op<Scalar>(low,high,Derived::SizeAtCompileTime));
}
/**
@@ -268,7 +288,7 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomA
DenseBase<Derived>::LinSpaced(Index size, const Scalar& low, const Scalar& high)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar,PacketScalar>(low,high,size));
+ return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar>(low,high,size));
}
/**
@@ -281,7 +301,7 @@ DenseBase<Derived>::LinSpaced(const Scalar& low, const Scalar& high)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
- return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op<Scalar,PacketScalar>(low,high,Derived::SizeAtCompileTime));
+ return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op<Scalar>(low,high,Derived::SizeAtCompileTime));
}
/** \returns true if all coefficients in this matrix are approximately equal to \a val, to within precision \a prec */
@@ -363,6 +383,33 @@ PlainObjectBase<Derived>::setConstant(Index rows, Index cols, const Scalar& val)
return setConstant(val);
}
+/** Resizes to the given size, changing only the number of columns, and sets all
+ * coefficients in this expression to the given value \a val. For the parameter
+ * of type NoChange_t, just pass the special value \c NoChange.
+ *
+ * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&)
+ */
+template<typename Derived>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setConstant(NoChange_t, Index cols, const Scalar& val)
+{
+ return setConstant(rows(), cols, val);
+}
+
+/** Resizes to the given size, changing only the number of rows, and sets all
+ * coefficients in this expression to the given value \a val. For the parameter
+ * of type NoChange_t, just pass the special value \c NoChange.
+ *
+ * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&)
+ */
+template<typename Derived>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setConstant(Index rows, NoChange_t, const Scalar& val)
+{
+ return setConstant(rows, cols(), val);
+}
+
+
/**
* \brief Sets a linearly spaced vector.
*
@@ -383,7 +430,7 @@ template<typename Derived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(Index newSize, const Scalar& low, const Scalar& high)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return derived() = Derived::NullaryExpr(newSize, internal::linspaced_op<Scalar,PacketScalar>(low,high,newSize));
+ return derived() = Derived::NullaryExpr(newSize, internal::linspaced_op<Scalar>(low,high,newSize));
}
/**
@@ -536,6 +583,32 @@ PlainObjectBase<Derived>::setZero(Index rows, Index cols)
return setConstant(Scalar(0));
}
+/** Resizes to the given size, changing only the number of columns, and sets all
+ * coefficients in this expression to zero. For the parameter of type NoChange_t,
+ * just pass the special value \c NoChange.
+ *
+ * \sa DenseBase::setZero(), setZero(Index), setZero(Index, Index), setZero(Index, NoChange_t), class CwiseNullaryOp, DenseBase::Zero()
+ */
+template<typename Derived>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setZero(NoChange_t, Index cols)
+{
+ return setZero(rows(), cols);
+}
+
+/** Resizes to the given size, changing only the number of rows, and sets all
+ * coefficients in this expression to zero. For the parameter of type NoChange_t,
+ * just pass the special value \c NoChange.
+ *
+ * \sa DenseBase::setZero(), setZero(Index), setZero(Index, Index), setZero(NoChange_t, Index), class CwiseNullaryOp, DenseBase::Zero()
+ */
+template<typename Derived>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setZero(Index rows, NoChange_t)
+{
+ return setZero(rows, cols());
+}
+
// ones:
/** \returns an expression of a matrix where all coefficients equal one.
@@ -662,6 +735,32 @@ PlainObjectBase<Derived>::setOnes(Index rows, Index cols)
return setConstant(Scalar(1));
}
+/** Resizes to the given size, changing only the number of rows, and sets all
+ * coefficients in this expression to one. For the parameter of type NoChange_t,
+ * just pass the special value \c NoChange.
+ *
+ * \sa MatrixBase::setOnes(), setOnes(Index), setOnes(Index, Index), setOnes(NoChange_t, Index), class CwiseNullaryOp, MatrixBase::Ones()
+ */
+template<typename Derived>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setOnes(Index rows, NoChange_t)
+{
+ return setOnes(rows, cols());
+}
+
+/** Resizes to the given size, changing only the number of columns, and sets all
+ * coefficients in this expression to one. For the parameter of type NoChange_t,
+ * just pass the special value \c NoChange.
+ *
+ * \sa MatrixBase::setOnes(), setOnes(Index), setOnes(Index, Index), setOnes(Index, NoChange_t) class CwiseNullaryOp, MatrixBase::Ones()
+ */
+template<typename Derived>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setOnes(NoChange_t, Index cols)
+{
+ return setOnes(rows(), cols);
+}
+
// Identity:
/** \returns an expression of the identity matrix (not necessarily square).
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/CwiseUnaryOp.h b/examples/ThirdPartyLibs/Eigen/src/Core/CwiseUnaryOp.h
index 1d2dd19f2..e68c4f748 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/CwiseUnaryOp.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/CwiseUnaryOp.h
@@ -11,7 +11,7 @@
#ifndef EIGEN_CWISE_UNARY_OP_H
#define EIGEN_CWISE_UNARY_OP_H
-namespace Eigen {
+namespace Eigen {
namespace internal {
template<typename UnaryOp, typename XprType>
@@ -24,7 +24,7 @@ struct traits<CwiseUnaryOp<UnaryOp, XprType> >
typedef typename XprType::Nested XprTypeNested;
typedef typename remove_reference<XprTypeNested>::type _XprTypeNested;
enum {
- Flags = _XprTypeNested::Flags & RowMajorBit
+ Flags = _XprTypeNested::Flags & RowMajorBit
};
};
}
@@ -65,10 +65,10 @@ class CwiseUnaryOp : public CwiseUnaryOpImpl<UnaryOp, XprType, typename internal
explicit CwiseUnaryOp(const XprType& xpr, const UnaryOp& func = UnaryOp())
: m_xpr(xpr), m_functor(func) {}
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
- Index rows() const { return m_xpr.rows(); }
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
- Index cols() const { return m_xpr.cols(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ Index rows() const EIGEN_NOEXCEPT { return m_xpr.rows(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ Index cols() const EIGEN_NOEXCEPT { return m_xpr.cols(); }
/** \returns the functor representing the unary operation */
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/CwiseUnaryView.h b/examples/ThirdPartyLibs/Eigen/src/Core/CwiseUnaryView.h
index 271033056..a06d7621e 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/CwiseUnaryView.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/CwiseUnaryView.h
@@ -64,24 +64,26 @@ class CwiseUnaryView : public CwiseUnaryViewImpl<ViewOp, MatrixType, typename in
typedef typename internal::ref_selector<MatrixType>::non_const_type MatrixTypeNested;
typedef typename internal::remove_all<MatrixType>::type NestedExpression;
- explicit inline CwiseUnaryView(MatrixType& mat, const ViewOp& func = ViewOp())
+ explicit EIGEN_DEVICE_FUNC inline CwiseUnaryView(MatrixType& mat, const ViewOp& func = ViewOp())
: m_matrix(mat), m_functor(func) {}
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryView)
- EIGEN_STRONG_INLINE Index rows() const { return m_matrix.rows(); }
- EIGEN_STRONG_INLINE Index cols() const { return m_matrix.cols(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ Index rows() const EIGEN_NOEXCEPT { return m_matrix.rows(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ Index cols() const EIGEN_NOEXCEPT { return m_matrix.cols(); }
/** \returns the functor representing unary operation */
- const ViewOp& functor() const { return m_functor; }
+ EIGEN_DEVICE_FUNC const ViewOp& functor() const { return m_functor; }
/** \returns the nested expression */
- const typename internal::remove_all<MatrixTypeNested>::type&
+ EIGEN_DEVICE_FUNC const typename internal::remove_all<MatrixTypeNested>::type&
nestedExpression() const { return m_matrix; }
/** \returns the nested expression */
- typename internal::remove_reference<MatrixTypeNested>::type&
- nestedExpression() { return m_matrix.const_cast_derived(); }
+ EIGEN_DEVICE_FUNC typename internal::remove_reference<MatrixTypeNested>::type&
+ nestedExpression() { return m_matrix; }
protected:
MatrixTypeNested m_matrix;
@@ -108,19 +110,21 @@ class CwiseUnaryViewImpl<ViewOp,MatrixType,Dense>
EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryViewImpl)
-
+
EIGEN_DEVICE_FUNC inline Scalar* data() { return &(this->coeffRef(0)); }
EIGEN_DEVICE_FUNC inline const Scalar* data() const { return &(this->coeff(0)); }
- EIGEN_DEVICE_FUNC inline Index innerStride() const
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const
{
return derived().nestedExpression().innerStride() * sizeof(typename internal::traits<MatrixType>::Scalar) / sizeof(Scalar);
}
- EIGEN_DEVICE_FUNC inline Index outerStride() const
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const
{
return derived().nestedExpression().outerStride() * sizeof(typename internal::traits<MatrixType>::Scalar) / sizeof(Scalar);
}
+ protected:
+ EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(CwiseUnaryViewImpl)
};
} // end namespace Eigen
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/DenseBase.h b/examples/ThirdPartyLibs/Eigen/src/Core/DenseBase.h
index fd933eed4..9b16db68d 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/DenseBase.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/DenseBase.h
@@ -14,15 +14,15 @@
namespace Eigen {
namespace internal {
-
+
// The index type defined by EIGEN_DEFAULT_DENSE_INDEX_TYPE must be a signed type.
// This dummy function simply aims at checking that at compile time.
static inline void check_DenseIndex_is_signed() {
- EIGEN_STATIC_ASSERT(NumTraits<DenseIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
+ EIGEN_STATIC_ASSERT(NumTraits<DenseIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE)
}
} // end namespace internal
-
+
/** \class DenseBase
* \ingroup Core_Module
*
@@ -40,7 +40,7 @@ static inline void check_DenseIndex_is_signed() {
*/
template<typename Derived> class DenseBase
#ifndef EIGEN_PARSED_BY_DOXYGEN
- : public DenseCoeffsBase<Derived>
+ : public DenseCoeffsBase<Derived, internal::accessors_level<Derived>::value>
#else
: public DenseCoeffsBase<Derived,DirectWriteAccessors>
#endif // not EIGEN_PARSED_BY_DOXYGEN
@@ -64,14 +64,14 @@ template<typename Derived> class DenseBase
/** The numeric type of the expression' coefficients, e.g. float, double, int or std::complex<float>, etc. */
typedef typename internal::traits<Derived>::Scalar Scalar;
-
+
/** The numeric type of the expression' coefficients, e.g. float, double, int or std::complex<float>, etc.
*
* It is an alias for the Scalar type */
typedef Scalar value_type;
-
+
typedef typename NumTraits<Scalar>::Real RealScalar;
- typedef DenseCoeffsBase<Derived> Base;
+ typedef DenseCoeffsBase<Derived, internal::accessors_level<Derived>::value> Base;
using Base::derived;
using Base::const_cast_derived;
@@ -150,13 +150,18 @@ template<typename Derived> class DenseBase
* \sa SizeAtCompileTime, MaxRowsAtCompileTime, MaxColsAtCompileTime
*/
- IsVectorAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime == 1
- || internal::traits<Derived>::MaxColsAtCompileTime == 1,
+ IsVectorAtCompileTime = internal::traits<Derived>::RowsAtCompileTime == 1
+ || internal::traits<Derived>::ColsAtCompileTime == 1,
/**< This is set to true if either the number of rows or the number of
* columns is known at compile-time to be equal to 1. Indeed, in that case,
* we are dealing with a column-vector (if there is only one column) or with
* a row-vector (if there is only one row). */
+ NumDimensions = int(MaxSizeAtCompileTime) == 1 ? 0 : bool(IsVectorAtCompileTime) ? 1 : 2,
+ /**< This value is equal to Tensor::NumDimensions, i.e. 0 for scalars, 1 for vectors,
+ * and 2 for matrices.
+ */
+
Flags = internal::traits<Derived>::Flags,
/**< This stores expression \ref flags flags which may or may not be inherited by new expressions
* constructed from this one. See the \ref flags "list of flags".
@@ -170,11 +175,11 @@ template<typename Derived> class DenseBase
InnerStrideAtCompileTime = internal::inner_stride_at_compile_time<Derived>::ret,
OuterStrideAtCompileTime = internal::outer_stride_at_compile_time<Derived>::ret
};
-
+
typedef typename internal::find_best_packet<Scalar,SizeAtCompileTime>::type PacketScalar;
enum { IsPlainObjectBase = 0 };
-
+
/** The plain matrix type corresponding to this expression.
* \sa PlainObject */
typedef Matrix<typename internal::traits<Derived>::Scalar,
@@ -184,7 +189,7 @@ template<typename Derived> class DenseBase
internal::traits<Derived>::MaxRowsAtCompileTime,
internal::traits<Derived>::MaxColsAtCompileTime
> PlainMatrix;
-
+
/** The plain array type corresponding to this expression.
* \sa PlainObject */
typedef Array<typename internal::traits<Derived>::Scalar,
@@ -206,7 +211,7 @@ template<typename Derived> class DenseBase
/** \returns the number of nonzero coefficients which is in practice the number
* of stored coefficients. */
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
inline Index nonZeros() const { return size(); }
/** \returns the outer size.
@@ -214,7 +219,7 @@ template<typename Derived> class DenseBase
* \note For a vector, this returns just 1. For a matrix (non-vector), this is the major dimension
* with respect to the \ref TopicStorageOrders "storage order", i.e., the number of columns for a
* column-major matrix, and the number of rows for a row-major matrix. */
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
Index outerSize() const
{
return IsVectorAtCompileTime ? 1
@@ -224,9 +229,9 @@ template<typename Derived> class DenseBase
/** \returns the inner size.
*
* \note For a vector, this is just the size. For a matrix (non-vector), this is the minor dimension
- * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of rows for a
+ * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of rows for a
* column-major matrix, and the number of columns for a row-major matrix. */
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
Index innerSize() const
{
return IsVectorAtCompileTime ? this->size()
@@ -261,9 +266,9 @@ template<typename Derived> class DenseBase
/** \internal Represents a matrix with all coefficients equal to one another*/
typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,PlainObject> ConstantReturnType;
/** \internal \deprecated Represents a vector with linearly spaced coefficients that allows sequential access only. */
- typedef CwiseNullaryOp<internal::linspaced_op<Scalar,PacketScalar>,PlainObject> SequentialLinSpacedReturnType;
+ EIGEN_DEPRECATED typedef CwiseNullaryOp<internal::linspaced_op<Scalar>,PlainObject> SequentialLinSpacedReturnType;
/** \internal Represents a vector with linearly spaced coefficients that allows random access. */
- typedef CwiseNullaryOp<internal::linspaced_op<Scalar,PacketScalar>,PlainObject> RandomAccessLinSpacedReturnType;
+ typedef CwiseNullaryOp<internal::linspaced_op<Scalar>,PlainObject> RandomAccessLinSpacedReturnType;
/** \internal the return type of MatrixBase::eigenvalues() */
typedef Matrix<typename NumTraits<typename internal::traits<Derived>::Scalar>::Real, internal::traits<Derived>::ColsAtCompileTime, 1> EigenvaluesReturnType;
@@ -297,17 +302,17 @@ template<typename Derived> class DenseBase
Derived& operator=(const ReturnByValue<OtherDerived>& func);
/** \internal
- * Copies \a other into *this without evaluating other. \returns a reference to *this.
- * \deprecated */
+ * Copies \a other into *this without evaluating other. \returns a reference to *this. */
template<typename OtherDerived>
- EIGEN_DEVICE_FUNC
+ /** \deprecated */
+ EIGEN_DEPRECATED EIGEN_DEVICE_FUNC
Derived& lazyAssign(const DenseBase<OtherDerived>& other);
EIGEN_DEVICE_FUNC
CommaInitializer<Derived> operator<< (const Scalar& s);
- /** \deprecated it now returns \c *this */
template<unsigned int Added,unsigned int Removed>
+ /** \deprecated it now returns \c *this */
EIGEN_DEPRECATED
const Derived& flagged() const
{ return derived(); }
@@ -332,12 +337,13 @@ template<typename Derived> class DenseBase
EIGEN_DEVICE_FUNC static const ConstantReturnType
Constant(const Scalar& value);
- EIGEN_DEVICE_FUNC static const SequentialLinSpacedReturnType
+ EIGEN_DEPRECATED EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType
LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high);
+ EIGEN_DEPRECATED EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType
+ LinSpaced(Sequential_t, const Scalar& low, const Scalar& high);
+
EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType
LinSpaced(Index size, const Scalar& low, const Scalar& high);
- EIGEN_DEVICE_FUNC static const SequentialLinSpacedReturnType
- LinSpaced(Sequential_t, const Scalar& low, const Scalar& high);
EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType
LinSpaced(const Scalar& low, const Scalar& high);
@@ -369,7 +375,7 @@ template<typename Derived> class DenseBase
template<typename OtherDerived> EIGEN_DEVICE_FUNC
bool isApprox(const DenseBase<OtherDerived>& other,
const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC
bool isMuchSmallerThan(const RealScalar& other,
const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
template<typename OtherDerived> EIGEN_DEVICE_FUNC
@@ -380,7 +386,7 @@ template<typename Derived> class DenseBase
EIGEN_DEVICE_FUNC bool isConstant(const Scalar& value, const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
EIGEN_DEVICE_FUNC bool isZero(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
EIGEN_DEVICE_FUNC bool isOnes(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
-
+
inline bool hasNaN() const;
inline bool allFinite() const;
@@ -394,8 +400,8 @@ template<typename Derived> class DenseBase
*
* Notice that in the case of a plain matrix or vector (not an expression) this function just returns
* a const reference, in order to avoid a useless copy.
- *
- * \warning Be carefull with eval() and the auto C++ keyword, as detailed in this \link TopicPitfalls_auto_keyword page \endlink.
+ *
+ * \warning Be careful with eval() and the auto C++ keyword, as detailed in this \link TopicPitfalls_auto_keyword page \endlink.
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE EvalReturnType eval() const
@@ -405,12 +411,12 @@ template<typename Derived> class DenseBase
// size types on MSVC.
return typename internal::eval<Derived>::type(derived());
}
-
+
/** swaps *this with the expression \a other.
*
*/
template<typename OtherDerived>
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void swap(const DenseBase<OtherDerived>& other)
{
EIGEN_STATIC_ASSERT(!OtherDerived::IsPlainObjectBase,THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);
@@ -422,7 +428,7 @@ template<typename Derived> class DenseBase
*
*/
template<typename OtherDerived>
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void swap(PlainObjectBase<OtherDerived>& other)
{
eigen_assert(rows()==other.rows() && cols()==other.cols());
@@ -443,18 +449,58 @@ template<typename Derived> class DenseBase
EIGEN_DEVICE_FUNC Scalar prod() const;
+ template<int NaNPropagation>
EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar minCoeff() const;
+ template<int NaNPropagation>
EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar maxCoeff() const;
- template<typename IndexType> EIGEN_DEVICE_FUNC
+
+ // By default, the fastest version with undefined NaN propagation semantics is
+ // used.
+ // TODO(rmlarsen): Replace with default template argument when we move to
+ // c++11 or beyond.
+ EIGEN_DEVICE_FUNC inline typename internal::traits<Derived>::Scalar minCoeff() const {
+ return minCoeff<PropagateFast>();
+ }
+ EIGEN_DEVICE_FUNC inline typename internal::traits<Derived>::Scalar maxCoeff() const {
+ return maxCoeff<PropagateFast>();
+ }
+
+ template<int NaNPropagation, typename IndexType>
+ EIGEN_DEVICE_FUNC
typename internal::traits<Derived>::Scalar minCoeff(IndexType* row, IndexType* col) const;
- template<typename IndexType> EIGEN_DEVICE_FUNC
+ template<int NaNPropagation, typename IndexType>
+ EIGEN_DEVICE_FUNC
typename internal::traits<Derived>::Scalar maxCoeff(IndexType* row, IndexType* col) const;
- template<typename IndexType> EIGEN_DEVICE_FUNC
+ template<int NaNPropagation, typename IndexType>
+ EIGEN_DEVICE_FUNC
typename internal::traits<Derived>::Scalar minCoeff(IndexType* index) const;
- template<typename IndexType> EIGEN_DEVICE_FUNC
+ template<int NaNPropagation, typename IndexType>
+ EIGEN_DEVICE_FUNC
typename internal::traits<Derived>::Scalar maxCoeff(IndexType* index) const;
+ // TODO(rmlarsen): Replace these methods with a default template argument.
+ template<typename IndexType>
+ EIGEN_DEVICE_FUNC inline
+ typename internal::traits<Derived>::Scalar minCoeff(IndexType* row, IndexType* col) const {
+ return minCoeff<PropagateFast>(row, col);
+ }
+ template<typename IndexType>
+ EIGEN_DEVICE_FUNC inline
+ typename internal::traits<Derived>::Scalar maxCoeff(IndexType* row, IndexType* col) const {
+ return maxCoeff<PropagateFast>(row, col);
+ }
+ template<typename IndexType>
+ EIGEN_DEVICE_FUNC inline
+ typename internal::traits<Derived>::Scalar minCoeff(IndexType* index) const {
+ return minCoeff<PropagateFast>(index);
+ }
+ template<typename IndexType>
+ EIGEN_DEVICE_FUNC inline
+ typename internal::traits<Derived>::Scalar maxCoeff(IndexType* index) const {
+ return maxCoeff<PropagateFast>(index);
+ }
+
template<typename BinaryOp>
EIGEN_DEVICE_FUNC
Scalar redux(const BinaryOp& func) const;
@@ -493,7 +539,7 @@ template<typename Derived> class DenseBase
typedef VectorwiseOp<Derived, Vertical> ColwiseReturnType;
typedef const VectorwiseOp<const Derived, Vertical> ConstColwiseReturnType;
- /** \returns a VectorwiseOp wrapper of *this providing additional partial reduction operations
+ /** \returns a VectorwiseOp wrapper of *this for broadcasting and partial reductions
*
* Example: \include MatrixBase_rowwise.cpp
* Output: \verbinclude MatrixBase_rowwise.out
@@ -506,7 +552,7 @@ template<typename Derived> class DenseBase
}
EIGEN_DEVICE_FUNC RowwiseReturnType rowwise();
- /** \returns a VectorwiseOp wrapper of *this providing additional partial reduction operations
+ /** \returns a VectorwiseOp wrapper of *this broadcasting and partial reductions
*
* Example: \include MatrixBase_colwise.cpp
* Output: \verbinclude MatrixBase_colwise.out
@@ -524,16 +570,16 @@ template<typename Derived> class DenseBase
static const RandomReturnType Random();
template<typename ThenDerived,typename ElseDerived>
- const Select<Derived,ThenDerived,ElseDerived>
+ inline EIGEN_DEVICE_FUNC const Select<Derived,ThenDerived,ElseDerived>
select(const DenseBase<ThenDerived>& thenMatrix,
const DenseBase<ElseDerived>& elseMatrix) const;
template<typename ThenDerived>
- inline const Select<Derived,ThenDerived, typename ThenDerived::ConstantReturnType>
+ inline EIGEN_DEVICE_FUNC const Select<Derived,ThenDerived, typename ThenDerived::ConstantReturnType>
select(const DenseBase<ThenDerived>& thenMatrix, const typename ThenDerived::Scalar& elseScalar) const;
template<typename ElseDerived>
- inline const Select<Derived, typename ElseDerived::ConstantReturnType, ElseDerived >
+ inline EIGEN_DEVICE_FUNC const Select<Derived, typename ElseDerived::ConstantReturnType, ElseDerived >
select(const typename ElseDerived::Scalar& thenScalar, const DenseBase<ElseDerived>& elseMatrix) const;
template<int p> RealScalar lpNorm() const;
@@ -567,6 +613,44 @@ template<typename Derived> class DenseBase
}
EIGEN_DEVICE_FUNC void reverseInPlace();
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
+ /** STL-like <a href="https://en.cppreference.com/w/cpp/named_req/RandomAccessIterator">RandomAccessIterator</a>
+ * iterator type as returned by the begin() and end() methods.
+ */
+ typedef random_access_iterator_type iterator;
+ /** This is the const version of iterator (aka read-only) */
+ typedef random_access_iterator_type const_iterator;
+ #else
+ typedef typename internal::conditional< (Flags&DirectAccessBit)==DirectAccessBit,
+ internal::pointer_based_stl_iterator<Derived>,
+ internal::generic_randaccess_stl_iterator<Derived>
+ >::type iterator_type;
+
+ typedef typename internal::conditional< (Flags&DirectAccessBit)==DirectAccessBit,
+ internal::pointer_based_stl_iterator<const Derived>,
+ internal::generic_randaccess_stl_iterator<const Derived>
+ >::type const_iterator_type;
+
+ // Stl-style iterators are supported only for vectors.
+
+ typedef typename internal::conditional< IsVectorAtCompileTime,
+ iterator_type,
+ void
+ >::type iterator;
+
+ typedef typename internal::conditional< IsVectorAtCompileTime,
+ const_iterator_type,
+ void
+ >::type const_iterator;
+ #endif
+
+ inline iterator begin();
+ inline const_iterator begin() const;
+ inline const_iterator cbegin() const;
+ inline iterator end();
+ inline const_iterator end() const;
+ inline const_iterator cend() const;
+
#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::DenseBase
#define EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
#define EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(COND)
@@ -574,6 +658,7 @@ template<typename Derived> class DenseBase
# include "../plugins/CommonCwiseUnaryOps.h"
# include "../plugins/BlockMethods.h"
# include "../plugins/IndexedViewMethods.h"
+# include "../plugins/ReshapedMethods.h"
# ifdef EIGEN_DENSEBASE_PLUGIN
# include EIGEN_DENSEBASE_PLUGIN
# endif
@@ -591,11 +676,12 @@ template<typename Derived> class DenseBase
}
protected:
+ EIGEN_DEFAULT_COPY_CONSTRUCTOR(DenseBase)
/** Default constructor. Do nothing. */
EIGEN_DEVICE_FUNC DenseBase()
{
/* Just checks for self-consistency of the flags.
- * Only do it when debugging Eigen, as this borders on paranoiac and could slow compilation down
+ * Only do it when debugging Eigen, as this borders on paranoia and could slow compilation down
*/
#ifdef EIGEN_INTERNAL_DEBUGGING
EIGEN_STATIC_ASSERT((EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, int(IsRowMajor))
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/DenseCoeffsBase.h b/examples/ThirdPartyLibs/Eigen/src/Core/DenseCoeffsBase.h
index c4af48ab6..37fcdb591 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/DenseCoeffsBase.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/DenseCoeffsBase.h
@@ -22,11 +22,12 @@ template<typename T> struct add_const_on_value_type_if_arithmetic
/** \brief Base class providing read-only coefficient access to matrices and arrays.
* \ingroup Core_Module
* \tparam Derived Type of the derived class
- * \tparam #ReadOnlyAccessors Constant indicating read-only access
+ *
+ * \note #ReadOnlyAccessors Constant indicating read-only access
*
* This class defines the \c operator() \c const function and friends, which can be used to read specific
* entries of a matrix or array.
- *
+ *
* \sa DenseCoeffsBase<Derived, WriteAccessors>, DenseCoeffsBase<Derived, DirectAccessors>,
* \ref TopicClassHierarchy
*/
@@ -288,12 +289,13 @@ class DenseCoeffsBase<Derived,ReadOnlyAccessors> : public EigenBase<Derived>
/** \brief Base class providing read/write coefficient access to matrices and arrays.
* \ingroup Core_Module
* \tparam Derived Type of the derived class
- * \tparam #WriteAccessors Constant indicating read/write access
+ *
+ * \note #WriteAccessors Constant indicating read/write access
*
* This class defines the non-const \c operator() function and friends, which can be used to write specific
* entries of a matrix or array. This class inherits DenseCoeffsBase<Derived, ReadOnlyAccessors> which
* defines the const variant for reading specific entries.
- *
+ *
* \sa DenseCoeffsBase<Derived, DirectAccessors>, \ref TopicClassHierarchy
*/
template<typename Derived>
@@ -466,7 +468,8 @@ class DenseCoeffsBase<Derived, WriteAccessors> : public DenseCoeffsBase<Derived,
/** \brief Base class providing direct read-only coefficient access to matrices and arrays.
* \ingroup Core_Module
* \tparam Derived Type of the derived class
- * \tparam #DirectAccessors Constant indicating direct access
+ *
+ * \note #DirectAccessors Constant indicating direct access
*
* This class defines functions to work with strides which can be used to access entries directly. This class
* inherits DenseCoeffsBase<Derived, ReadOnlyAccessors> which defines functions to access entries read-only using
@@ -492,7 +495,7 @@ class DenseCoeffsBase<Derived, DirectAccessors> : public DenseCoeffsBase<Derived
*
* \sa outerStride(), rowStride(), colStride()
*/
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
inline Index innerStride() const
{
return derived().innerStride();
@@ -503,14 +506,14 @@ class DenseCoeffsBase<Derived, DirectAccessors> : public DenseCoeffsBase<Derived
*
* \sa innerStride(), rowStride(), colStride()
*/
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
inline Index outerStride() const
{
return derived().outerStride();
}
// FIXME shall we remove it ?
- inline Index stride() const
+ EIGEN_CONSTEXPR inline Index stride() const
{
return Derived::IsVectorAtCompileTime ? innerStride() : outerStride();
}
@@ -519,7 +522,7 @@ class DenseCoeffsBase<Derived, DirectAccessors> : public DenseCoeffsBase<Derived
*
* \sa innerStride(), outerStride(), colStride()
*/
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
inline Index rowStride() const
{
return Derived::IsRowMajor ? outerStride() : innerStride();
@@ -529,7 +532,7 @@ class DenseCoeffsBase<Derived, DirectAccessors> : public DenseCoeffsBase<Derived
*
* \sa innerStride(), outerStride(), rowStride()
*/
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
inline Index colStride() const
{
return Derived::IsRowMajor ? innerStride() : outerStride();
@@ -539,7 +542,8 @@ class DenseCoeffsBase<Derived, DirectAccessors> : public DenseCoeffsBase<Derived
/** \brief Base class providing direct read/write coefficient access to matrices and arrays.
* \ingroup Core_Module
* \tparam Derived Type of the derived class
- * \tparam #DirectWriteAccessors Constant indicating direct access
+ *
+ * \note #DirectWriteAccessors Constant indicating direct access
*
* This class defines functions to work with strides which can be used to access entries directly. This class
* inherits DenseCoeffsBase<Derived, WriteAccessors> which defines functions to access entries read/write using
@@ -566,8 +570,8 @@ class DenseCoeffsBase<Derived, DirectWriteAccessors>
*
* \sa outerStride(), rowStride(), colStride()
*/
- EIGEN_DEVICE_FUNC
- inline Index innerStride() const
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index innerStride() const EIGEN_NOEXCEPT
{
return derived().innerStride();
}
@@ -577,14 +581,14 @@ class DenseCoeffsBase<Derived, DirectWriteAccessors>
*
* \sa innerStride(), rowStride(), colStride()
*/
- EIGEN_DEVICE_FUNC
- inline Index outerStride() const
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index outerStride() const EIGEN_NOEXCEPT
{
return derived().outerStride();
}
// FIXME shall we remove it ?
- inline Index stride() const
+ EIGEN_CONSTEXPR inline Index stride() const EIGEN_NOEXCEPT
{
return Derived::IsVectorAtCompileTime ? innerStride() : outerStride();
}
@@ -593,8 +597,8 @@ class DenseCoeffsBase<Derived, DirectWriteAccessors>
*
* \sa innerStride(), outerStride(), colStride()
*/
- EIGEN_DEVICE_FUNC
- inline Index rowStride() const
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index rowStride() const EIGEN_NOEXCEPT
{
return Derived::IsRowMajor ? outerStride() : innerStride();
}
@@ -603,8 +607,8 @@ class DenseCoeffsBase<Derived, DirectWriteAccessors>
*
* \sa innerStride(), outerStride(), rowStride()
*/
- EIGEN_DEVICE_FUNC
- inline Index colStride() const
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index colStride() const EIGEN_NOEXCEPT
{
return Derived::IsRowMajor ? innerStride() : outerStride();
}
@@ -615,7 +619,7 @@ namespace internal {
template<int Alignment, typename Derived, bool JustReturnZero>
struct first_aligned_impl
{
- static inline Index run(const Derived&)
+ static EIGEN_CONSTEXPR inline Index run(const Derived&) EIGEN_NOEXCEPT
{ return 0; }
};
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/DenseStorage.h b/examples/ThirdPartyLibs/Eigen/src/Core/DenseStorage.h
index 7958feeb9..08ef6c530 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/DenseStorage.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/DenseStorage.h
@@ -47,21 +47,21 @@ struct plain_array
EIGEN_DEVICE_FUNC
plain_array()
- {
+ {
check_static_allocation_size<T,Size>();
}
EIGEN_DEVICE_FUNC
plain_array(constructor_without_unaligned_array_assert)
- {
+ {
check_static_allocation_size<T,Size>();
}
};
#if defined(EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT)
#define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask)
-#elif EIGEN_GNUC_AT_LEAST(4,7)
- // GCC 4.7 is too aggressive in its optimizations and remove the alignement test based on the fact the array is declared to be aligned.
+#elif EIGEN_GNUC_AT_LEAST(4,7)
+ // GCC 4.7 is too aggressive in its optimizations and remove the alignment test based on the fact the array is declared to be aligned.
// See this bug report: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=53900
// Hiding the origin of the array pointer behind a function argument seems to do the trick even if the function is inlined:
template<typename PtrType>
@@ -85,15 +85,15 @@ struct plain_array<T, Size, MatrixOrArrayOptions, 8>
EIGEN_ALIGN_TO_BOUNDARY(8) T array[Size];
EIGEN_DEVICE_FUNC
- plain_array()
+ plain_array()
{
EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(7);
check_static_allocation_size<T,Size>();
}
EIGEN_DEVICE_FUNC
- plain_array(constructor_without_unaligned_array_assert)
- {
+ plain_array(constructor_without_unaligned_array_assert)
+ {
check_static_allocation_size<T,Size>();
}
};
@@ -104,15 +104,15 @@ struct plain_array<T, Size, MatrixOrArrayOptions, 16>
EIGEN_ALIGN_TO_BOUNDARY(16) T array[Size];
EIGEN_DEVICE_FUNC
- plain_array()
- {
+ plain_array()
+ {
EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(15);
check_static_allocation_size<T,Size>();
}
EIGEN_DEVICE_FUNC
- plain_array(constructor_without_unaligned_array_assert)
- {
+ plain_array(constructor_without_unaligned_array_assert)
+ {
check_static_allocation_size<T,Size>();
}
};
@@ -123,15 +123,15 @@ struct plain_array<T, Size, MatrixOrArrayOptions, 32>
EIGEN_ALIGN_TO_BOUNDARY(32) T array[Size];
EIGEN_DEVICE_FUNC
- plain_array()
+ plain_array()
{
EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(31);
check_static_allocation_size<T,Size>();
}
EIGEN_DEVICE_FUNC
- plain_array(constructor_without_unaligned_array_assert)
- {
+ plain_array(constructor_without_unaligned_array_assert)
+ {
check_static_allocation_size<T,Size>();
}
};
@@ -142,15 +142,15 @@ struct plain_array<T, Size, MatrixOrArrayOptions, 64>
EIGEN_ALIGN_TO_BOUNDARY(64) T array[Size];
EIGEN_DEVICE_FUNC
- plain_array()
- {
+ plain_array()
+ {
EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(63);
check_static_allocation_size<T,Size>();
}
EIGEN_DEVICE_FUNC
- plain_array(constructor_without_unaligned_array_assert)
- {
+ plain_array(constructor_without_unaligned_array_assert)
+ {
check_static_allocation_size<T,Size>();
}
};
@@ -163,6 +163,30 @@ struct plain_array<T, 0, MatrixOrArrayOptions, Alignment>
EIGEN_DEVICE_FUNC plain_array(constructor_without_unaligned_array_assert) {}
};
+struct plain_array_helper {
+ template<typename T, int Size, int MatrixOrArrayOptions, int Alignment>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ static void copy(const plain_array<T, Size, MatrixOrArrayOptions, Alignment>& src, const Eigen::Index size,
+ plain_array<T, Size, MatrixOrArrayOptions, Alignment>& dst) {
+ smart_copy(src.array, src.array + size, dst.array);
+ }
+
+ template<typename T, int Size, int MatrixOrArrayOptions, int Alignment>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ static void swap(plain_array<T, Size, MatrixOrArrayOptions, Alignment>& a, const Eigen::Index a_size,
+ plain_array<T, Size, MatrixOrArrayOptions, Alignment>& b, const Eigen::Index b_size) {
+ if (a_size < b_size) {
+ std::swap_ranges(b.array, b.array + a_size, a.array);
+ smart_move(b.array + a_size, b.array + b_size, a.array + a_size);
+ } else if (a_size > b_size) {
+ std::swap_ranges(a.array, a.array + b_size, b.array);
+ smart_move(a.array + b_size, a.array + a_size, b.array + b_size);
+ } else {
+ std::swap_ranges(a.array, a.array + a_size, b.array);
+ }
+ }
+};
+
} // end namespace internal
/** \internal
@@ -190,16 +214,41 @@ template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseSt
EIGEN_DEVICE_FUNC
explicit DenseStorage(internal::constructor_without_unaligned_array_assert)
: m_data(internal::constructor_without_unaligned_array_assert()) {}
- EIGEN_DEVICE_FUNC
+#if !EIGEN_HAS_CXX11 || defined(EIGEN_DENSE_STORAGE_CTOR_PLUGIN)
+ EIGEN_DEVICE_FUNC
DenseStorage(const DenseStorage& other) : m_data(other.m_data) {
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = Size)
}
- EIGEN_DEVICE_FUNC
+#else
+ EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage&) = default;
+#endif
+#if !EIGEN_HAS_CXX11
+ EIGEN_DEVICE_FUNC
DenseStorage& operator=(const DenseStorage& other)
- {
+ {
if (this != &other) m_data = other.m_data;
- return *this;
+ return *this;
+ }
+#else
+ EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage&) = default;
+#endif
+#if EIGEN_HAS_RVALUE_REFERENCES
+#if !EIGEN_HAS_CXX11
+ EIGEN_DEVICE_FUNC DenseStorage(DenseStorage&& other) EIGEN_NOEXCEPT
+ : m_data(std::move(other.m_data))
+ {
}
+ EIGEN_DEVICE_FUNC DenseStorage& operator=(DenseStorage&& other) EIGEN_NOEXCEPT
+ {
+ if (this != &other)
+ m_data = std::move(other.m_data);
+ return *this;
+ }
+#else
+ EIGEN_DEVICE_FUNC DenseStorage(DenseStorage&&) = default;
+ EIGEN_DEVICE_FUNC DenseStorage& operator=(DenseStorage&&) = default;
+#endif
+#endif
EIGEN_DEVICE_FUNC DenseStorage(Index size, Index rows, Index cols) {
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
eigen_internal_assert(size==rows*cols && rows==_Rows && cols==_Cols);
@@ -207,9 +256,11 @@ template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseSt
EIGEN_UNUSED_VARIABLE(rows);
EIGEN_UNUSED_VARIABLE(cols);
}
- EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); }
- EIGEN_DEVICE_FUNC static Index rows(void) {return _Rows;}
- EIGEN_DEVICE_FUNC static Index cols(void) {return _Cols;}
+ EIGEN_DEVICE_FUNC void swap(DenseStorage& other) {
+ numext::swap(m_data, other.m_data);
+ }
+ EIGEN_DEVICE_FUNC static EIGEN_CONSTEXPR Index rows(void) EIGEN_NOEXCEPT {return _Rows;}
+ EIGEN_DEVICE_FUNC static EIGEN_CONSTEXPR Index cols(void) EIGEN_NOEXCEPT {return _Cols;}
EIGEN_DEVICE_FUNC void conservativeResize(Index,Index,Index) {}
EIGEN_DEVICE_FUNC void resize(Index,Index,Index) {}
EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; }
@@ -226,8 +277,8 @@ template<typename T, int _Rows, int _Cols, int _Options> class DenseStorage<T, 0
EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage&) { return *this; }
EIGEN_DEVICE_FUNC DenseStorage(Index,Index,Index) {}
EIGEN_DEVICE_FUNC void swap(DenseStorage& ) {}
- EIGEN_DEVICE_FUNC static Index rows(void) {return _Rows;}
- EIGEN_DEVICE_FUNC static Index cols(void) {return _Cols;}
+ EIGEN_DEVICE_FUNC static EIGEN_CONSTEXPR Index rows(void) EIGEN_NOEXCEPT {return _Rows;}
+ EIGEN_DEVICE_FUNC static EIGEN_CONSTEXPR Index cols(void) EIGEN_NOEXCEPT {return _Cols;}
EIGEN_DEVICE_FUNC void conservativeResize(Index,Index,Index) {}
EIGEN_DEVICE_FUNC void resize(Index,Index,Index) {}
EIGEN_DEVICE_FUNC const T *data() const { return 0; }
@@ -254,20 +305,28 @@ template<typename T, int Size, int _Options> class DenseStorage<T, Size, Dynamic
EIGEN_DEVICE_FUNC DenseStorage() : m_rows(0), m_cols(0) {}
EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert)
: m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {}
- EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_rows(other.m_rows), m_cols(other.m_cols) {}
- EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other)
- {
+ EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other)
+ : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(other.m_rows), m_cols(other.m_cols)
+ {
+ internal::plain_array_helper::copy(other.m_data, m_rows * m_cols, m_data);
+ }
+ EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other)
+ {
if (this != &other)
{
- m_data = other.m_data;
m_rows = other.m_rows;
m_cols = other.m_cols;
+ internal::plain_array_helper::copy(other.m_data, m_rows * m_cols, m_data);
}
- return *this;
+ return *this;
}
EIGEN_DEVICE_FUNC DenseStorage(Index, Index rows, Index cols) : m_rows(rows), m_cols(cols) {}
EIGEN_DEVICE_FUNC void swap(DenseStorage& other)
- { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
+ {
+ internal::plain_array_helper::swap(m_data, m_rows * m_cols, other.m_data, other.m_rows * other.m_cols);
+ numext::swap(m_rows,other.m_rows);
+ numext::swap(m_cols,other.m_cols);
+ }
EIGEN_DEVICE_FUNC Index rows() const {return m_rows;}
EIGEN_DEVICE_FUNC Index cols() const {return m_cols;}
EIGEN_DEVICE_FUNC void conservativeResize(Index, Index rows, Index cols) { m_rows = rows; m_cols = cols; }
@@ -285,20 +344,29 @@ template<typename T, int Size, int _Cols, int _Options> class DenseStorage<T, Si
EIGEN_DEVICE_FUNC DenseStorage() : m_rows(0) {}
EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert)
: m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0) {}
- EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_rows(other.m_rows) {}
- EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other)
+ EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other)
+ : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(other.m_rows)
+ {
+ internal::plain_array_helper::copy(other.m_data, m_rows * _Cols, m_data);
+ }
+
+ EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other)
{
if (this != &other)
{
- m_data = other.m_data;
m_rows = other.m_rows;
+ internal::plain_array_helper::copy(other.m_data, m_rows * _Cols, m_data);
}
- return *this;
+ return *this;
}
EIGEN_DEVICE_FUNC DenseStorage(Index, Index rows, Index) : m_rows(rows) {}
- EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
- EIGEN_DEVICE_FUNC Index rows(void) const {return m_rows;}
- EIGEN_DEVICE_FUNC Index cols(void) const {return _Cols;}
+ EIGEN_DEVICE_FUNC void swap(DenseStorage& other)
+ {
+ internal::plain_array_helper::swap(m_data, m_rows * _Cols, other.m_data, other.m_rows * _Cols);
+ numext::swap(m_rows, other.m_rows);
+ }
+ EIGEN_DEVICE_FUNC Index rows(void) const EIGEN_NOEXCEPT {return m_rows;}
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index cols(void) const EIGEN_NOEXCEPT {return _Cols;}
EIGEN_DEVICE_FUNC void conservativeResize(Index, Index rows, Index) { m_rows = rows; }
EIGEN_DEVICE_FUNC void resize(Index, Index rows, Index) { m_rows = rows; }
EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; }
@@ -314,22 +382,29 @@ template<typename T, int Size, int _Rows, int _Options> class DenseStorage<T, Si
EIGEN_DEVICE_FUNC DenseStorage() : m_cols(0) {}
EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert)
: m_data(internal::constructor_without_unaligned_array_assert()), m_cols(0) {}
- EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_cols(other.m_cols) {}
+ EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other)
+ : m_data(internal::constructor_without_unaligned_array_assert()), m_cols(other.m_cols)
+ {
+ internal::plain_array_helper::copy(other.m_data, _Rows * m_cols, m_data);
+ }
EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other)
{
if (this != &other)
{
- m_data = other.m_data;
m_cols = other.m_cols;
+ internal::plain_array_helper::copy(other.m_data, _Rows * m_cols, m_data);
}
return *this;
}
EIGEN_DEVICE_FUNC DenseStorage(Index, Index, Index cols) : m_cols(cols) {}
- EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
- EIGEN_DEVICE_FUNC Index rows(void) const {return _Rows;}
- EIGEN_DEVICE_FUNC Index cols(void) const {return m_cols;}
- void conservativeResize(Index, Index, Index cols) { m_cols = cols; }
- void resize(Index, Index, Index cols) { m_cols = cols; }
+ EIGEN_DEVICE_FUNC void swap(DenseStorage& other) {
+ internal::plain_array_helper::swap(m_data, _Rows * m_cols, other.m_data, _Rows * other.m_cols);
+ numext::swap(m_cols, other.m_cols);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index rows(void) const EIGEN_NOEXCEPT {return _Rows;}
+ EIGEN_DEVICE_FUNC Index cols(void) const EIGEN_NOEXCEPT {return m_cols;}
+ EIGEN_DEVICE_FUNC void conservativeResize(Index, Index, Index cols) { m_cols = cols; }
+ EIGEN_DEVICE_FUNC void resize(Index, Index, Index cols) { m_cols = cols; }
EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; }
EIGEN_DEVICE_FUNC T *data() { return m_data.array; }
};
@@ -381,18 +456,21 @@ template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynam
EIGEN_DEVICE_FUNC
DenseStorage& operator=(DenseStorage&& other) EIGEN_NOEXCEPT
{
- using std::swap;
- swap(m_data, other.m_data);
- swap(m_rows, other.m_rows);
- swap(m_cols, other.m_cols);
+ numext::swap(m_data, other.m_data);
+ numext::swap(m_rows, other.m_rows);
+ numext::swap(m_cols, other.m_cols);
return *this;
}
#endif
EIGEN_DEVICE_FUNC ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); }
EIGEN_DEVICE_FUNC void swap(DenseStorage& other)
- { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
- EIGEN_DEVICE_FUNC Index rows(void) const {return m_rows;}
- EIGEN_DEVICE_FUNC Index cols(void) const {return m_cols;}
+ {
+ numext::swap(m_data,other.m_data);
+ numext::swap(m_rows,other.m_rows);
+ numext::swap(m_cols,other.m_cols);
+ }
+ EIGEN_DEVICE_FUNC Index rows(void) const EIGEN_NOEXCEPT {return m_rows;}
+ EIGEN_DEVICE_FUNC Index cols(void) const EIGEN_NOEXCEPT {return m_cols;}
void conservativeResize(Index size, Index rows, Index cols)
{
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*m_cols);
@@ -404,7 +482,7 @@ template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynam
if(size != m_rows*m_cols)
{
internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols);
- if (size)
+ if (size>0) // >0 and not simply !=0 to let the compiler knows that size cannot be negative
m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size);
else
m_data = 0;
@@ -446,7 +524,7 @@ template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Ro
this->swap(tmp);
}
return *this;
- }
+ }
#if EIGEN_HAS_RVALUE_REFERENCES
EIGEN_DEVICE_FUNC
DenseStorage(DenseStorage&& other) EIGEN_NOEXCEPT
@@ -459,16 +537,18 @@ template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Ro
EIGEN_DEVICE_FUNC
DenseStorage& operator=(DenseStorage&& other) EIGEN_NOEXCEPT
{
- using std::swap;
- swap(m_data, other.m_data);
- swap(m_cols, other.m_cols);
+ numext::swap(m_data, other.m_data);
+ numext::swap(m_cols, other.m_cols);
return *this;
}
#endif
EIGEN_DEVICE_FUNC ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); }
- EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
- EIGEN_DEVICE_FUNC static Index rows(void) {return _Rows;}
- EIGEN_DEVICE_FUNC Index cols(void) const {return m_cols;}
+ EIGEN_DEVICE_FUNC void swap(DenseStorage& other) {
+ numext::swap(m_data,other.m_data);
+ numext::swap(m_cols,other.m_cols);
+ }
+ EIGEN_DEVICE_FUNC static EIGEN_CONSTEXPR Index rows(void) EIGEN_NOEXCEPT {return _Rows;}
+ EIGEN_DEVICE_FUNC Index cols(void) const EIGEN_NOEXCEPT {return m_cols;}
EIGEN_DEVICE_FUNC void conservativeResize(Index size, Index, Index cols)
{
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, _Rows*m_cols);
@@ -479,7 +559,7 @@ template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Ro
if(size != _Rows*m_cols)
{
internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols);
- if (size)
+ if (size>0) // >0 and not simply !=0 to let the compiler knows that size cannot be negative
m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size);
else
m_data = 0;
@@ -520,7 +600,7 @@ template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dyn
this->swap(tmp);
}
return *this;
- }
+ }
#if EIGEN_HAS_RVALUE_REFERENCES
EIGEN_DEVICE_FUNC
DenseStorage(DenseStorage&& other) EIGEN_NOEXCEPT
@@ -533,16 +613,18 @@ template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dyn
EIGEN_DEVICE_FUNC
DenseStorage& operator=(DenseStorage&& other) EIGEN_NOEXCEPT
{
- using std::swap;
- swap(m_data, other.m_data);
- swap(m_rows, other.m_rows);
+ numext::swap(m_data, other.m_data);
+ numext::swap(m_rows, other.m_rows);
return *this;
}
#endif
EIGEN_DEVICE_FUNC ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); }
- EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
- EIGEN_DEVICE_FUNC Index rows(void) const {return m_rows;}
- EIGEN_DEVICE_FUNC static Index cols(void) {return _Cols;}
+ EIGEN_DEVICE_FUNC void swap(DenseStorage& other) {
+ numext::swap(m_data,other.m_data);
+ numext::swap(m_rows,other.m_rows);
+ }
+ EIGEN_DEVICE_FUNC Index rows(void) const EIGEN_NOEXCEPT {return m_rows;}
+ EIGEN_DEVICE_FUNC static EIGEN_CONSTEXPR Index cols(void) {return _Cols;}
void conservativeResize(Index size, Index rows, Index)
{
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*_Cols);
@@ -553,7 +635,7 @@ template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dyn
if(size != m_rows*_Cols)
{
internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows);
- if (size)
+ if (size>0) // >0 and not simply !=0 to let the compiler knows that size cannot be negative
m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size);
else
m_data = 0;
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/Diagonal.h b/examples/ThirdPartyLibs/Eigen/src/Core/Diagonal.h
index c62f5ff21..3112d2c16 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/Diagonal.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/Diagonal.h
@@ -11,7 +11,7 @@
#ifndef EIGEN_DIAGONAL_H
#define EIGEN_DIAGONAL_H
-namespace Eigen {
+namespace Eigen {
/** \class Diagonal
* \ingroup Core_Module
@@ -70,7 +70,10 @@ template<typename MatrixType, int _DiagIndex> class Diagonal
EIGEN_DENSE_PUBLIC_INTERFACE(Diagonal)
EIGEN_DEVICE_FUNC
- explicit inline Diagonal(MatrixType& matrix, Index a_index = DiagIndex) : m_matrix(matrix), m_index(a_index) {}
+ explicit inline Diagonal(MatrixType& matrix, Index a_index = DiagIndex) : m_matrix(matrix), m_index(a_index)
+ {
+ eigen_assert( a_index <= m_matrix.cols() && -a_index <= m_matrix.rows() );
+ }
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal)
@@ -81,20 +84,16 @@ template<typename MatrixType, int _DiagIndex> class Diagonal
: numext::mini<Index>(m_matrix.rows(),m_matrix.cols()-m_index.value());
}
- EIGEN_DEVICE_FUNC
- inline Index cols() const { return 1; }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index cols() const EIGEN_NOEXCEPT { return 1; }
- EIGEN_DEVICE_FUNC
- inline Index innerStride() const
- {
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index innerStride() const EIGEN_NOEXCEPT {
return m_matrix.outerStride() + 1;
}
- EIGEN_DEVICE_FUNC
- inline Index outerStride() const
- {
- return 0;
- }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index outerStride() const EIGEN_NOEXCEPT { return 0; }
typedef typename internal::conditional<
internal::is_lvalue<MatrixType>::value,
@@ -146,8 +145,8 @@ template<typename MatrixType, int _DiagIndex> class Diagonal
}
EIGEN_DEVICE_FUNC
- inline const typename internal::remove_all<typename MatrixType::Nested>::type&
- nestedExpression() const
+ inline const typename internal::remove_all<typename MatrixType::Nested>::type&
+ nestedExpression() const
{
return m_matrix;
}
@@ -164,12 +163,12 @@ template<typename MatrixType, int _DiagIndex> class Diagonal
private:
// some compilers may fail to optimize std::max etc in case of compile-time constants...
- EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE Index absDiagIndex() const { return m_index.value()>0 ? m_index.value() : -m_index.value(); }
- EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE Index rowOffset() const { return m_index.value()>0 ? 0 : -m_index.value(); }
- EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE Index colOffset() const { return m_index.value()>0 ? m_index.value() : 0; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ Index absDiagIndex() const EIGEN_NOEXCEPT { return m_index.value()>0 ? m_index.value() : -m_index.value(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ Index rowOffset() const EIGEN_NOEXCEPT { return m_index.value()>0 ? 0 : -m_index.value(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ Index colOffset() const EIGEN_NOEXCEPT { return m_index.value()>0 ? m_index.value() : 0; }
// trigger a compile-time error if someone try to call packet
template<int LoadMode> typename MatrixType::PacketReturnType packet(Index) const;
template<int LoadMode> typename MatrixType::PacketReturnType packet(Index,Index) const;
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/DiagonalMatrix.h b/examples/ThirdPartyLibs/Eigen/src/Core/DiagonalMatrix.h
index 4e8297ee6..542685c65 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/DiagonalMatrix.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/DiagonalMatrix.h
@@ -83,6 +83,30 @@ class DiagonalBase : public EigenBase<Derived>
{
return DiagonalWrapper<const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(Scalar,DiagonalVectorType,product) >(scalar * other.diagonal());
}
+
+ template<typename OtherDerived>
+ EIGEN_DEVICE_FUNC
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
+ inline unspecified_expression_type
+ #else
+ inline const DiagonalWrapper<const EIGEN_CWISE_BINARY_RETURN_TYPE(DiagonalVectorType,typename OtherDerived::DiagonalVectorType,sum) >
+ #endif
+ operator+(const DiagonalBase<OtherDerived>& other) const
+ {
+ return (diagonal() + other.diagonal()).asDiagonal();
+ }
+
+ template<typename OtherDerived>
+ EIGEN_DEVICE_FUNC
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
+ inline unspecified_expression_type
+ #else
+ inline const DiagonalWrapper<const EIGEN_CWISE_BINARY_RETURN_TYPE(DiagonalVectorType,typename OtherDerived::DiagonalVectorType,difference) >
+ #endif
+ operator-(const DiagonalBase<OtherDerived>& other) const
+ {
+ return (diagonal() - other.diagonal()).asDiagonal();
+ }
};
#endif
@@ -154,6 +178,30 @@ class DiagonalMatrix
EIGEN_DEVICE_FUNC
inline DiagonalMatrix(const Scalar& x, const Scalar& y, const Scalar& z) : m_diagonal(x,y,z) {}
+ #if EIGEN_HAS_CXX11
+ /** \brief Construct a diagonal matrix with fixed size from an arbitrary number of coefficients. \cpp11
+ *
+ * There exists C++98 anologue constructors for fixed-size diagonal matrices having 2 or 3 coefficients.
+ *
+ * \warning To construct a diagonal matrix of fixed size, the number of values passed to this
+ * constructor must match the fixed dimension of \c *this.
+ *
+ * \sa DiagonalMatrix(const Scalar&, const Scalar&)
+ * \sa DiagonalMatrix(const Scalar&, const Scalar&, const Scalar&)
+ */
+ template <typename... ArgTypes>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ DiagonalMatrix(const Scalar& a0, const Scalar& a1, const Scalar& a2, const ArgTypes&... args)
+ : m_diagonal(a0, a1, a2, args...) {}
+
+ /** \brief Constructs a DiagonalMatrix and initializes it by elements given by an initializer list of initializer
+ * lists \cpp11
+ */
+ EIGEN_DEVICE_FUNC
+ explicit EIGEN_STRONG_INLINE DiagonalMatrix(const std::initializer_list<std::initializer_list<Scalar>>& list)
+ : m_diagonal(list) {}
+ #endif // EIGEN_HAS_CXX11
+
/** Copy constructor. */
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/Dot.h b/examples/ThirdPartyLibs/Eigen/src/Core/Dot.h
index bb8e3fecc..5c3441b92 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/Dot.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/Dot.h
@@ -31,7 +31,8 @@ struct dot_nocheck
typedef scalar_conj_product_op<typename traits<T>::Scalar,typename traits<U>::Scalar> conj_prod;
typedef typename conj_prod::result_type ResScalar;
EIGEN_DEVICE_FUNC
- static inline ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b)
+ EIGEN_STRONG_INLINE
+ static ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b)
{
return a.template binaryExpr<conj_prod>(b).sum();
}
@@ -43,7 +44,8 @@ struct dot_nocheck<T, U, true>
typedef scalar_conj_product_op<typename traits<T>::Scalar,typename traits<U>::Scalar> conj_prod;
typedef typename conj_prod::result_type ResScalar;
EIGEN_DEVICE_FUNC
- static inline ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b)
+ EIGEN_STRONG_INLINE
+ static ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b)
{
return a.transpose().template binaryExpr<conj_prod>(b).sum();
}
@@ -65,6 +67,7 @@ struct dot_nocheck<T, U, true>
template<typename Derived>
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
+EIGEN_STRONG_INLINE
typename ScalarBinaryOpTraits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType
MatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
{
@@ -83,7 +86,7 @@ MatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
//---------- implementation of L2 norm and related functions ----------
-/** \returns, for vectors, the squared \em l2 norm of \c *this, and for matrices the Frobenius norm.
+/** \returns, for vectors, the squared \em l2 norm of \c *this, and for matrices the squared Frobenius norm.
* In both cases, it consists in the sum of the square of all the matrix entries.
* For vectors, this is also equals to the dot product of \c *this with itself.
*
@@ -102,7 +105,7 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename NumTraits<typename internal::trai
* \sa lpNorm(), dot(), squaredNorm()
*/
template<typename Derived>
-EIGEN_DEVICE_FUNC inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::norm() const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::norm() const
{
return numext::sqrt(squaredNorm());
}
@@ -117,7 +120,7 @@ EIGEN_DEVICE_FUNC inline typename NumTraits<typename internal::traits<Derived>::
* \sa norm(), normalize()
*/
template<typename Derived>
-EIGEN_DEVICE_FUNC inline const typename MatrixBase<Derived>::PlainObject
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::PlainObject
MatrixBase<Derived>::normalized() const
{
typedef typename internal::nested_eval<Derived,2>::type _Nested;
@@ -139,7 +142,7 @@ MatrixBase<Derived>::normalized() const
* \sa norm(), normalized()
*/
template<typename Derived>
-EIGEN_DEVICE_FUNC inline void MatrixBase<Derived>::normalize()
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void MatrixBase<Derived>::normalize()
{
RealScalar z = squaredNorm();
// NOTE: after extensive benchmarking, this conditional does not impact performance, at least on recent x86 CPU
@@ -160,7 +163,7 @@ EIGEN_DEVICE_FUNC inline void MatrixBase<Derived>::normalize()
* \sa stableNorm(), stableNormalize(), normalized()
*/
template<typename Derived>
-EIGEN_DEVICE_FUNC inline const typename MatrixBase<Derived>::PlainObject
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::PlainObject
MatrixBase<Derived>::stableNormalized() const
{
typedef typename internal::nested_eval<Derived,3>::type _Nested;
@@ -185,7 +188,7 @@ MatrixBase<Derived>::stableNormalized() const
* \sa stableNorm(), stableNormalized(), normalize()
*/
template<typename Derived>
-EIGEN_DEVICE_FUNC inline void MatrixBase<Derived>::stableNormalize()
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void MatrixBase<Derived>::stableNormalize()
{
RealScalar w = cwiseAbs().maxCoeff();
RealScalar z = (derived()/w).squaredNorm();
@@ -204,7 +207,7 @@ struct lpNorm_selector
EIGEN_DEVICE_FUNC
static inline RealScalar run(const MatrixBase<Derived>& m)
{
- EIGEN_USING_STD_MATH(pow)
+ EIGEN_USING_STD(pow)
return pow(m.cwiseAbs().array().pow(p).sum(), RealScalar(1)/p);
}
};
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/EigenBase.h b/examples/ThirdPartyLibs/Eigen/src/Core/EigenBase.h
index b195506a9..6b3c7d374 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/EigenBase.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/EigenBase.h
@@ -15,7 +15,7 @@ namespace Eigen {
/** \class EigenBase
* \ingroup Core_Module
- *
+ *
* Common base class for all classes T such that MatrixBase has an operator=(T) and a constructor MatrixBase(T).
*
* In other words, an EigenBase object is an object that can be copied into a MatrixBase.
@@ -29,11 +29,12 @@ namespace Eigen {
template<typename Derived> struct EigenBase
{
// typedef typename internal::plain_matrix_type<Derived>::type PlainObject;
-
+
/** \brief The interface type of indices
* \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE.
- * \deprecated Since Eigen 3.3, its usage is deprecated. Use Eigen::Index instead.
* \sa StorageIndex, \ref TopicPreprocessorDirectives.
+ * DEPRECATED: Since Eigen 3.3, its usage is deprecated. Use Eigen::Index instead.
+ * Deprecation is not marked with a doxygen comment because there are too many existing usages to add the deprecation attribute.
*/
typedef Eigen::Index Index;
@@ -55,15 +56,15 @@ template<typename Derived> struct EigenBase
{ return *static_cast<const Derived*>(this); }
/** \returns the number of rows. \sa cols(), RowsAtCompileTime */
- EIGEN_DEVICE_FUNC
- inline Index rows() const { return derived().rows(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index rows() const EIGEN_NOEXCEPT { return derived().rows(); }
/** \returns the number of columns. \sa rows(), ColsAtCompileTime*/
- EIGEN_DEVICE_FUNC
- inline Index cols() const { return derived().cols(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index cols() const EIGEN_NOEXCEPT { return derived().cols(); }
/** \returns the number of coefficients, which is rows()*cols().
* \sa rows(), cols(), SizeAtCompileTime. */
- EIGEN_DEVICE_FUNC
- inline Index size() const { return rows() * cols(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index size() const EIGEN_NOEXCEPT { return rows() * cols(); }
/** \internal Don't use it, but do the equivalent: \code dst = *this; \endcode */
template<typename Dest>
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/ForceAlignedAccess.h b/examples/ThirdPartyLibs/Eigen/src/Core/ForceAlignedAccess.h
index 7b08b45e6..817a43afc 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/ForceAlignedAccess.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/ForceAlignedAccess.h
@@ -41,10 +41,14 @@ template<typename ExpressionType> class ForceAlignedAccess
EIGEN_DEVICE_FUNC explicit inline ForceAlignedAccess(const ExpressionType& matrix) : m_expression(matrix) {}
- EIGEN_DEVICE_FUNC inline Index rows() const { return m_expression.rows(); }
- EIGEN_DEVICE_FUNC inline Index cols() const { return m_expression.cols(); }
- EIGEN_DEVICE_FUNC inline Index outerStride() const { return m_expression.outerStride(); }
- EIGEN_DEVICE_FUNC inline Index innerStride() const { return m_expression.innerStride(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index rows() const EIGEN_NOEXCEPT { return m_expression.rows(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index cols() const EIGEN_NOEXCEPT { return m_expression.cols(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index outerStride() const EIGEN_NOEXCEPT { return m_expression.outerStride(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index innerStride() const EIGEN_NOEXCEPT { return m_expression.innerStride(); }
EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index row, Index col) const
{
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/GeneralProduct.h b/examples/ThirdPartyLibs/Eigen/src/Core/GeneralProduct.h
index 694f7cbde..6906aa75d 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/GeneralProduct.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/GeneralProduct.h
@@ -35,7 +35,7 @@ template<int Rows, int Cols, int Depth> struct product_type_selector;
template<int Size, int MaxSize> struct product_size_category
{
enum {
- #ifndef EIGEN_CUDA_ARCH
+ #ifndef EIGEN_GPU_COMPILE_PHASE
is_large = MaxSize == Dynamic ||
Size >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD ||
(Size==Dynamic && MaxSize>=EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD),
@@ -163,13 +163,13 @@ template<typename Scalar,int Size,int MaxSize,bool Cond> struct gemv_static_vect
template<typename Scalar,int Size,int MaxSize>
struct gemv_static_vector_if<Scalar,Size,MaxSize,false>
{
- EIGEN_STRONG_INLINE Scalar* data() { eigen_internal_assert(false && "should never be called"); return 0; }
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Scalar* data() { eigen_internal_assert(false && "should never be called"); return 0; }
};
template<typename Scalar,int Size>
struct gemv_static_vector_if<Scalar,Size,Dynamic,true>
{
- EIGEN_STRONG_INLINE Scalar* data() { return 0; }
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Scalar* data() { return 0; }
};
template<typename Scalar,int Size,int MaxSize>
@@ -228,8 +228,7 @@ template<> struct gemv_dense_selector<OnTheRight,ColMajor,true>
ActualLhsType actualLhs = LhsBlasTraits::extract(lhs);
ActualRhsType actualRhs = RhsBlasTraits::extract(rhs);
- ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(lhs)
- * RhsBlasTraits::extractScalarFactor(rhs);
+ ResScalar actualAlpha = combine_scalar_factors(alpha, lhs, rhs);
// make sure Dest is a compile-time vector type (bug 1166)
typedef typename conditional<Dest::IsVectorAtCompileTime, Dest, typename Dest::ColXpr>::type ActualDest;
@@ -239,7 +238,7 @@ template<> struct gemv_dense_selector<OnTheRight,ColMajor,true>
// on, the other hand it is good for the cache to pack the vector anyways...
EvalToDestAtCompileTime = (ActualDest::InnerStrideAtCompileTime==1),
ComplexByReal = (NumTraits<LhsScalar>::IsComplex) && (!NumTraits<RhsScalar>::IsComplex),
- MightCannotUseDest = (!EvalToDestAtCompileTime) || ComplexByReal
+ MightCannotUseDest = ((!EvalToDestAtCompileTime) || ComplexByReal) && (ActualDest::MaxSizeAtCompileTime!=0)
};
typedef const_blas_data_mapper<LhsScalar,Index,ColMajor> LhsMapper;
@@ -320,13 +319,12 @@ template<> struct gemv_dense_selector<OnTheRight,RowMajor,true>
typename add_const<ActualLhsType>::type actualLhs = LhsBlasTraits::extract(lhs);
typename add_const<ActualRhsType>::type actualRhs = RhsBlasTraits::extract(rhs);
- ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(lhs)
- * RhsBlasTraits::extractScalarFactor(rhs);
+ ResScalar actualAlpha = combine_scalar_factors(alpha, lhs, rhs);
enum {
// FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1
// on, the other hand it is good for the cache to pack the vector anyways...
- DirectlyUseRhs = ActualRhsTypeCleaned::InnerStrideAtCompileTime==1
+ DirectlyUseRhs = ActualRhsTypeCleaned::InnerStrideAtCompileTime==1 || ActualRhsTypeCleaned::MaxSizeAtCompileTime==0
};
gemv_static_vector_if<RhsScalar,ActualRhsTypeCleaned::SizeAtCompileTime,ActualRhsTypeCleaned::MaxSizeAtCompileTime,!DirectlyUseRhs> static_rhs;
@@ -396,8 +394,8 @@ template<> struct gemv_dense_selector<OnTheRight,RowMajor,false>
*/
template<typename Derived>
template<typename OtherDerived>
-EIGEN_DEVICE_FUNC
-inline const Product<Derived, OtherDerived>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+const Product<Derived, OtherDerived>
MatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
{
// A note regarding the function declaration: In MSVC, this function will sometimes
@@ -439,8 +437,9 @@ MatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
*/
template<typename Derived>
template<typename OtherDerived>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Product<Derived,OtherDerived,LazyProduct>
-EIGEN_DEVICE_FUNC MatrixBase<Derived>::lazyProduct(const MatrixBase<OtherDerived> &other) const
+MatrixBase<Derived>::lazyProduct(const MatrixBase<OtherDerived> &other) const
{
enum {
ProductIsValid = Derived::ColsAtCompileTime==Dynamic
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/GenericPacketMath.h b/examples/ThirdPartyLibs/Eigen/src/Core/GenericPacketMath.h
index 30878eda6..cf677a190 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/GenericPacketMath.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/GenericPacketMath.h
@@ -44,18 +44,23 @@ struct default_packet_traits
enum {
HasHalfPacket = 0,
- HasAdd = 1,
- HasSub = 1,
- HasMul = 1,
- HasNegate = 1,
- HasAbs = 1,
- HasArg = 0,
- HasAbs2 = 1,
- HasMin = 1,
- HasMax = 1,
- HasConj = 1,
+ HasAdd = 1,
+ HasSub = 1,
+ HasShift = 1,
+ HasMul = 1,
+ HasNegate = 1,
+ HasAbs = 1,
+ HasArg = 0,
+ HasAbs2 = 1,
+ HasAbsDiff = 0,
+ HasMin = 1,
+ HasMax = 1,
+ HasConj = 1,
HasSetLinear = 1,
- HasBlend = 0,
+ HasBlend = 0,
+ // This flag is used to indicate whether packet comparison is supported.
+ // pcmp_eq, pcmp_lt and pcmp_le should be defined for it to be true.
+ HasCmp = 0,
HasDiv = 0,
HasSqrt = 0,
@@ -82,14 +87,18 @@ struct default_packet_traits
HasPolygamma = 0,
HasErf = 0,
HasErfc = 0,
+ HasNdtri = 0,
+ HasBessel = 0,
HasIGamma = 0,
+ HasIGammaDerA = 0,
+ HasGammaSampleDerAlpha = 0,
HasIGammac = 0,
HasBetaInc = 0,
HasRound = 0,
+ HasRint = 0,
HasFloor = 0,
HasCeil = 0,
-
HasSign = 0
};
};
@@ -120,6 +129,22 @@ template<typename T> struct packet_traits : default_packet_traits
template<typename T> struct packet_traits<const T> : packet_traits<T> { };
+template<typename T> struct unpacket_traits
+{
+ typedef T type;
+ typedef T half;
+ enum
+ {
+ size = 1,
+ alignment = 1,
+ vectorizable = false,
+ masked_load_available=false,
+ masked_store_available=false
+ };
+};
+
+template<typename T> struct unpacket_traits<const T> : unpacket_traits<T> { };
+
template <typename Src, typename Tgt> struct type_casting_traits {
enum {
VectorizedCast = 0,
@@ -128,6 +153,34 @@ template <typename Src, typename Tgt> struct type_casting_traits {
};
};
+/** \internal Wrapper to ensure that multiple packet types can map to the same
+ same underlying vector type. */
+template<typename T, int unique_id = 0>
+struct eigen_packet_wrapper
+{
+ EIGEN_ALWAYS_INLINE operator T&() { return m_val; }
+ EIGEN_ALWAYS_INLINE operator const T&() const { return m_val; }
+ EIGEN_ALWAYS_INLINE eigen_packet_wrapper() {}
+ EIGEN_ALWAYS_INLINE eigen_packet_wrapper(const T &v) : m_val(v) {}
+ EIGEN_ALWAYS_INLINE eigen_packet_wrapper& operator=(const T &v) {
+ m_val = v;
+ return *this;
+ }
+
+ T m_val;
+};
+
+
+/** \internal A convenience utility for determining if the type is a scalar.
+ * This is used to enable some generic packet implementations.
+ */
+template<typename Packet>
+struct is_scalar {
+ typedef typename unpacket_traits<Packet>::type Scalar;
+ enum {
+ value = internal::is_same<Packet, Scalar>::value
+ };
+};
/** \internal \returns static_cast<TgtType>(a) (coeff-wise) */
template <typename SrcPacket, typename TgtPacket>
@@ -140,75 +193,406 @@ EIGEN_DEVICE_FUNC inline TgtPacket
pcast(const SrcPacket& a, const SrcPacket& /*b*/) {
return static_cast<TgtPacket>(a);
}
-
template <typename SrcPacket, typename TgtPacket>
EIGEN_DEVICE_FUNC inline TgtPacket
pcast(const SrcPacket& a, const SrcPacket& /*b*/, const SrcPacket& /*c*/, const SrcPacket& /*d*/) {
return static_cast<TgtPacket>(a);
}
+template <typename SrcPacket, typename TgtPacket>
+EIGEN_DEVICE_FUNC inline TgtPacket
+pcast(const SrcPacket& a, const SrcPacket& /*b*/, const SrcPacket& /*c*/, const SrcPacket& /*d*/,
+ const SrcPacket& /*e*/, const SrcPacket& /*f*/, const SrcPacket& /*g*/, const SrcPacket& /*h*/) {
+ return static_cast<TgtPacket>(a);
+}
+
+/** \internal \returns reinterpret_cast<Target>(a) */
+template <typename Target, typename Packet>
+EIGEN_DEVICE_FUNC inline Target
+preinterpret(const Packet& a); /* { return reinterpret_cast<const Target&>(a); } */
/** \internal \returns a + b (coeff-wise) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
-padd(const Packet& a,
- const Packet& b) { return a+b; }
+padd(const Packet& a, const Packet& b) { return a+b; }
+// Avoid compiler warning for boolean algebra.
+template<> EIGEN_DEVICE_FUNC inline bool
+padd(const bool& a, const bool& b) { return a || b; }
/** \internal \returns a - b (coeff-wise) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
-psub(const Packet& a,
- const Packet& b) { return a-b; }
+psub(const Packet& a, const Packet& b) { return a-b; }
/** \internal \returns -a (coeff-wise) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
pnegate(const Packet& a) { return -a; }
-/** \internal \returns conj(a) (coeff-wise) */
+template<> EIGEN_DEVICE_FUNC inline bool
+pnegate(const bool& a) { return !a; }
+/** \internal \returns conj(a) (coeff-wise) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
pconj(const Packet& a) { return numext::conj(a); }
/** \internal \returns a * b (coeff-wise) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
-pmul(const Packet& a,
- const Packet& b) { return a*b; }
+pmul(const Packet& a, const Packet& b) { return a*b; }
+// Avoid compiler warning for boolean algebra.
+template<> EIGEN_DEVICE_FUNC inline bool
+pmul(const bool& a, const bool& b) { return a && b; }
/** \internal \returns a / b (coeff-wise) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
-pdiv(const Packet& a,
- const Packet& b) { return a/b; }
+pdiv(const Packet& a, const Packet& b) { return a/b; }
+
+// In the generic case, memset to all one bits.
+template<typename Packet, typename EnableIf = void>
+struct ptrue_impl {
+ static EIGEN_DEVICE_FUNC inline Packet run(const Packet& /*a*/){
+ Packet b;
+ memset(static_cast<void*>(&b), 0xff, sizeof(Packet));
+ return b;
+ }
+};
-/** \internal \returns the min of \a a and \a b (coeff-wise) */
+// For non-trivial scalars, set to Scalar(1) (i.e. a non-zero value).
+// Although this is technically not a valid bitmask, the scalar path for pselect
+// uses a comparison to zero, so this should still work in most cases. We don't
+// have another option, since the scalar type requires initialization.
+template<typename T>
+struct ptrue_impl<T,
+ typename internal::enable_if<is_scalar<T>::value && NumTraits<T>::RequireInitialization>::type > {
+ static EIGEN_DEVICE_FUNC inline T run(const T& /*a*/){
+ return T(1);
+ }
+};
+
+/** \internal \returns one bits. */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
-pmin(const Packet& a,
- const Packet& b) { return numext::mini(a, b); }
+ptrue(const Packet& a) {
+ return ptrue_impl<Packet>::run(a);
+}
+
+// In the general case, memset to zero.
+template<typename Packet, typename EnableIf = void>
+struct pzero_impl {
+ static EIGEN_DEVICE_FUNC inline Packet run(const Packet& /*a*/) {
+ Packet b;
+ memset(static_cast<void*>(&b), 0x00, sizeof(Packet));
+ return b;
+ }
+};
+
+// For scalars, explicitly set to Scalar(0), since the underlying representation
+// for zero may not consist of all-zero bits.
+template<typename T>
+struct pzero_impl<T,
+ typename internal::enable_if<is_scalar<T>::value>::type> {
+ static EIGEN_DEVICE_FUNC inline T run(const T& /*a*/) {
+ return T(0);
+ }
+};
-/** \internal \returns the max of \a a and \a b (coeff-wise) */
+/** \internal \returns packet of zeros */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
-pmax(const Packet& a,
- const Packet& b) { return numext::maxi(a, b); }
+pzero(const Packet& a) {
+ return pzero_impl<Packet>::run(a);
+}
-/** \internal \returns the absolute value of \a a */
+/** \internal \returns a <= b as a bit mask */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
-pabs(const Packet& a) { using std::abs; return abs(a); }
+pcmp_le(const Packet& a, const Packet& b) { return a<=b ? ptrue(a) : pzero(a); }
-/** \internal \returns the phase angle of \a a */
+/** \internal \returns a < b as a bit mask */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
-parg(const Packet& a) { using numext::arg; return arg(a); }
+pcmp_lt(const Packet& a, const Packet& b) { return a<b ? ptrue(a) : pzero(a); }
+
+/** \internal \returns a == b as a bit mask */
+template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
+pcmp_eq(const Packet& a, const Packet& b) { return a==b ? ptrue(a) : pzero(a); }
+
+/** \internal \returns a < b or a==NaN or b==NaN as a bit mask */
+template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
+pcmp_lt_or_nan(const Packet& a, const Packet& b) { return a>=b ? pzero(a) : ptrue(a); }
+
+template<typename T>
+struct bit_and {
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE T operator()(const T& a, const T& b) const {
+ return a & b;
+ }
+};
+
+template<typename T>
+struct bit_or {
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE T operator()(const T& a, const T& b) const {
+ return a | b;
+ }
+};
+
+template<typename T>
+struct bit_xor {
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE T operator()(const T& a, const T& b) const {
+ return a ^ b;
+ }
+};
+
+template<typename T>
+struct bit_not {
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE T operator()(const T& a) const {
+ return ~a;
+ }
+};
+
+// Use operators &, |, ^, ~.
+template<typename T>
+struct operator_bitwise_helper {
+ EIGEN_DEVICE_FUNC static inline T bitwise_and(const T& a, const T& b) { return bit_and<T>()(a, b); }
+ EIGEN_DEVICE_FUNC static inline T bitwise_or(const T& a, const T& b) { return bit_or<T>()(a, b); }
+ EIGEN_DEVICE_FUNC static inline T bitwise_xor(const T& a, const T& b) { return bit_xor<T>()(a, b); }
+ EIGEN_DEVICE_FUNC static inline T bitwise_not(const T& a) { return bit_not<T>()(a); }
+};
+
+// Apply binary operations byte-by-byte
+template<typename T>
+struct bytewise_bitwise_helper {
+ EIGEN_DEVICE_FUNC static inline T bitwise_and(const T& a, const T& b) {
+ return binary(a, b, bit_and<unsigned char>());
+ }
+ EIGEN_DEVICE_FUNC static inline T bitwise_or(const T& a, const T& b) {
+ return binary(a, b, bit_or<unsigned char>());
+ }
+ EIGEN_DEVICE_FUNC static inline T bitwise_xor(const T& a, const T& b) {
+ return binary(a, b, bit_xor<unsigned char>());
+ }
+ EIGEN_DEVICE_FUNC static inline T bitwise_not(const T& a) {
+ return unary(a,bit_not<unsigned char>());
+ }
+
+ private:
+ template<typename Op>
+ EIGEN_DEVICE_FUNC static inline T unary(const T& a, Op op) {
+ const unsigned char* a_ptr = reinterpret_cast<const unsigned char*>(&a);
+ T c;
+ unsigned char* c_ptr = reinterpret_cast<unsigned char*>(&c);
+ for (size_t i = 0; i < sizeof(T); ++i) {
+ *c_ptr++ = op(*a_ptr++);
+ }
+ return c;
+ }
+
+ template<typename Op>
+ EIGEN_DEVICE_FUNC static inline T binary(const T& a, const T& b, Op op) {
+ const unsigned char* a_ptr = reinterpret_cast<const unsigned char*>(&a);
+ const unsigned char* b_ptr = reinterpret_cast<const unsigned char*>(&b);
+ T c;
+ unsigned char* c_ptr = reinterpret_cast<unsigned char*>(&c);
+ for (size_t i = 0; i < sizeof(T); ++i) {
+ *c_ptr++ = op(*a_ptr++, *b_ptr++);
+ }
+ return c;
+ }
+};
+
+// In the general case, use byte-by-byte manipulation.
+template<typename T, typename EnableIf = void>
+struct bitwise_helper : public bytewise_bitwise_helper<T> {};
+
+// For integers or non-trivial scalars, use binary operators.
+template<typename T>
+struct bitwise_helper<T,
+ typename internal::enable_if<
+ is_scalar<T>::value && (NumTraits<T>::IsInteger || NumTraits<T>::RequireInitialization)>::type
+ > : public operator_bitwise_helper<T> {};
/** \internal \returns the bitwise and of \a a and \a b */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
-pand(const Packet& a, const Packet& b) { return a & b; }
+pand(const Packet& a, const Packet& b) {
+ return bitwise_helper<Packet>::bitwise_and(a, b);
+}
/** \internal \returns the bitwise or of \a a and \a b */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
-por(const Packet& a, const Packet& b) { return a | b; }
+por(const Packet& a, const Packet& b) {
+ return bitwise_helper<Packet>::bitwise_or(a, b);
+}
/** \internal \returns the bitwise xor of \a a and \a b */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
-pxor(const Packet& a, const Packet& b) { return a ^ b; }
+pxor(const Packet& a, const Packet& b) {
+ return bitwise_helper<Packet>::bitwise_xor(a, b);
+}
+
+/** \internal \returns the bitwise not of \a a */
+template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
+pnot(const Packet& a) {
+ return bitwise_helper<Packet>::bitwise_not(a);
+}
+
+/** \internal \returns the bitwise and of \a a and not \a b */
+template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
+pandnot(const Packet& a, const Packet& b) { return pand(a, pnot(b)); }
+
+// In the general case, use bitwise select.
+template<typename Packet, typename EnableIf = void>
+struct pselect_impl {
+ static EIGEN_DEVICE_FUNC inline Packet run(const Packet& mask, const Packet& a, const Packet& b) {
+ return por(pand(a,mask),pandnot(b,mask));
+ }
+};
+
+// For scalars, use ternary select.
+template<typename Packet>
+struct pselect_impl<Packet,
+ typename internal::enable_if<is_scalar<Packet>::value>::type > {
+ static EIGEN_DEVICE_FUNC inline Packet run(const Packet& mask, const Packet& a, const Packet& b) {
+ return numext::equal_strict(mask, Packet(0)) ? b : a;
+ }
+};
+
+/** \internal \returns \a or \b for each field in packet according to \mask */
+template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
+pselect(const Packet& mask, const Packet& a, const Packet& b) {
+ return pselect_impl<Packet>::run(mask, a, b);
+}
+
+template<> EIGEN_DEVICE_FUNC inline bool pselect<bool>(
+ const bool& cond, const bool& a, const bool& b) {
+ return cond ? a : b;
+}
+
+/** \internal \returns the min or of \a a and \a b (coeff-wise)
+ If either \a a or \a b are NaN, the result is implementation defined. */
+template<int NaNPropagation>
+struct pminmax_impl {
+ template <typename Packet, typename Op>
+ static EIGEN_DEVICE_FUNC inline Packet run(const Packet& a, const Packet& b, Op op) {
+ return op(a,b);
+ }
+};
+
+/** \internal \returns the min or max of \a a and \a b (coeff-wise)
+ If either \a a or \a b are NaN, NaN is returned. */
+template<>
+struct pminmax_impl<PropagateNaN> {
+ template <typename Packet, typename Op>
+ static EIGEN_DEVICE_FUNC inline Packet run(const Packet& a, const Packet& b, Op op) {
+ Packet not_nan_mask_a = pcmp_eq(a, a);
+ Packet not_nan_mask_b = pcmp_eq(b, b);
+ return pselect(not_nan_mask_a,
+ pselect(not_nan_mask_b, op(a, b), b),
+ a);
+ }
+};
+
+/** \internal \returns the min or max of \a a and \a b (coeff-wise)
+ If both \a a and \a b are NaN, NaN is returned.
+ Equivalent to std::fmin(a, b). */
+template<>
+struct pminmax_impl<PropagateNumbers> {
+ template <typename Packet, typename Op>
+ static EIGEN_DEVICE_FUNC inline Packet run(const Packet& a, const Packet& b, Op op) {
+ Packet not_nan_mask_a = pcmp_eq(a, a);
+ Packet not_nan_mask_b = pcmp_eq(b, b);
+ return pselect(not_nan_mask_a,
+ pselect(not_nan_mask_b, op(a, b), a),
+ b);
+ }
+};
+
+
+#ifndef SYCL_DEVICE_ONLY
+#define EIGEN_BINARY_OP_NAN_PROPAGATION(Type, Func) Func
+#else
+#define EIGEN_BINARY_OP_NAN_PROPAGATION(Type, Func) \
+[](const Type& a, const Type& b) { \
+ return Func(a, b);}
+#endif
+
+/** \internal \returns the min of \a a and \a b (coeff-wise).
+ If \a a or \b b is NaN, the return value is implementation defined. */
+template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
+pmin(const Packet& a, const Packet& b) { return numext::mini(a,b); }
+
+/** \internal \returns the min of \a a and \a b (coeff-wise).
+ NaNPropagation determines the NaN propagation semantics. */
+template <int NaNPropagation, typename Packet>
+EIGEN_DEVICE_FUNC inline Packet pmin(const Packet& a, const Packet& b) {
+ return pminmax_impl<NaNPropagation>::run(a, b, EIGEN_BINARY_OP_NAN_PROPAGATION(Packet, (pmin<Packet>)));
+}
+
+/** \internal \returns the max of \a a and \a b (coeff-wise)
+ If \a a or \b b is NaN, the return value is implementation defined. */
+template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
+pmax(const Packet& a, const Packet& b) { return numext::maxi(a, b); }
+
+/** \internal \returns the max of \a a and \a b (coeff-wise).
+ NaNPropagation determines the NaN propagation semantics. */
+template <int NaNPropagation, typename Packet>
+EIGEN_DEVICE_FUNC inline Packet pmax(const Packet& a, const Packet& b) {
+ return pminmax_impl<NaNPropagation>::run(a, b, EIGEN_BINARY_OP_NAN_PROPAGATION(Packet,(pmax<Packet>)));
+}
+
+/** \internal \returns the absolute value of \a a */
+template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
+pabs(const Packet& a) { return numext::abs(a); }
+template<> EIGEN_DEVICE_FUNC inline unsigned int
+pabs(const unsigned int& a) { return a; }
+template<> EIGEN_DEVICE_FUNC inline unsigned long
+pabs(const unsigned long& a) { return a; }
+template<> EIGEN_DEVICE_FUNC inline unsigned long long
+pabs(const unsigned long long& a) { return a; }
+
+/** \internal \returns the addsub value of \a a,b */
+template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
+paddsub(const Packet& a, const Packet& b) {
+ return pselect(peven_mask(a), padd(a, b), psub(a, b));
+ }
+
+/** \internal \returns the phase angle of \a a */
+template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
+parg(const Packet& a) { using numext::arg; return arg(a); }
+
+
+/** \internal \returns \a a logically shifted by N bits to the right */
+template<int N> EIGEN_DEVICE_FUNC inline int
+parithmetic_shift_right(const int& a) { return a >> N; }
+template<int N> EIGEN_DEVICE_FUNC inline long int
+parithmetic_shift_right(const long int& a) { return a >> N; }
-/** \internal \returns the bitwise andnot of \a a and \a b */
+/** \internal \returns \a a arithmetically shifted by N bits to the right */
+template<int N> EIGEN_DEVICE_FUNC inline int
+plogical_shift_right(const int& a) { return static_cast<int>(static_cast<unsigned int>(a) >> N); }
+template<int N> EIGEN_DEVICE_FUNC inline long int
+plogical_shift_right(const long int& a) { return static_cast<long>(static_cast<unsigned long>(a) >> N); }
+
+/** \internal \returns \a a shifted by N bits to the left */
+template<int N> EIGEN_DEVICE_FUNC inline int
+plogical_shift_left(const int& a) { return a << N; }
+template<int N> EIGEN_DEVICE_FUNC inline long int
+plogical_shift_left(const long int& a) { return a << N; }
+
+/** \internal \returns the significant and exponent of the underlying floating point numbers
+ * See https://en.cppreference.com/w/cpp/numeric/math/frexp
+ */
+template <typename Packet>
+EIGEN_DEVICE_FUNC inline Packet pfrexp(const Packet& a, Packet& exponent) {
+ int exp;
+ EIGEN_USING_STD(frexp);
+ Packet result = static_cast<Packet>(frexp(a, &exp));
+ exponent = static_cast<Packet>(exp);
+ return result;
+}
+
+/** \internal \returns a * 2^((int)exponent)
+ * See https://en.cppreference.com/w/cpp/numeric/math/ldexp
+ */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
-pandnot(const Packet& a, const Packet& b) { return a & (!b); }
+pldexp(const Packet &a, const Packet &exponent) {
+ EIGEN_USING_STD(ldexp)
+ return static_cast<Packet>(ldexp(a, static_cast<int>(exponent)));
+}
+
+/** \internal \returns the min of \a a and \a b (coeff-wise) */
+template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
+pabsdiff(const Packet& a, const Packet& b) { return pselect(pcmp_lt(a, b), psub(b, a), psub(a, b)); }
/** \internal \returns a packet version of \a *from, from must be 16 bytes aligned */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
@@ -218,10 +602,22 @@ pload(const typename unpacket_traits<Packet>::type* from) { return *from; }
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
ploadu(const typename unpacket_traits<Packet>::type* from) { return *from; }
+/** \internal \returns a packet version of \a *from, (un-aligned masked load)
+ * There is no generic implementation. We only have implementations for specialized
+ * cases. Generic case should not be called.
+ */
+template<typename Packet> EIGEN_DEVICE_FUNC inline
+typename enable_if<unpacket_traits<Packet>::masked_load_available, Packet>::type
+ploadu(const typename unpacket_traits<Packet>::type* from, typename unpacket_traits<Packet>::mask_t umask);
+
/** \internal \returns a packet with constant coefficients \a a, e.g.: (a,a,a,a) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
pset1(const typename unpacket_traits<Packet>::type& a) { return a; }
+/** \internal \returns a packet with constant coefficients set from bits */
+template<typename Packet,typename BitsType> EIGEN_DEVICE_FUNC inline Packet
+pset1frombits(BitsType a);
+
/** \internal \returns a packet with constant coefficients \a a[0], e.g.: (a[0],a[0],a[0],a[0]) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
pload1(const typename unpacket_traits<Packet>::type *a) { return pset1<Packet>(*a); }
@@ -238,7 +634,7 @@ ploaddup(const typename unpacket_traits<Packet>::type* from) { return *from; }
* For instance, for a packet of 8 elements, 2 scalars will be read from \a *from and
* replicated to form: {from[0],from[0],from[0],from[0],from[1],from[1],from[1],from[1]}
* Currently, this function is only used in matrix products.
- * For packet-size smaller or equal to 4, this function is equivalent to pload1
+ * For packet-size smaller or equal to 4, this function is equivalent to pload1
*/
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
ploadquad(const typename unpacket_traits<Packet>::type* from)
@@ -282,6 +678,20 @@ inline void pbroadcast2(const typename unpacket_traits<Packet>::type *a,
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet
plset(const typename unpacket_traits<Packet>::type& a) { return a; }
+/** \internal \returns a packet with constant coefficients \a a, e.g.: (x, 0, x, 0),
+ where x is the value of all 1-bits. */
+template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
+peven_mask(const Packet& /*a*/) {
+ typedef typename unpacket_traits<Packet>::type Scalar;
+ const size_t n = unpacket_traits<Packet>::size;
+ EIGEN_ALIGN_TO_BOUNDARY(sizeof(Packet)) Scalar elements[n];
+ for(size_t i = 0; i < n; ++i) {
+ memset(elements+i, ((i & 1) == 0 ? 0xff : 0), sizeof(Scalar));
+ }
+ return ploadu<Packet>(elements);
+}
+
+
/** \internal copy the packet \a from to \a *to, \a to must be 16 bytes aligned */
template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline void pstore(Scalar* to, const Packet& from)
{ (*to) = from; }
@@ -290,6 +700,15 @@ template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline void pstore(
template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline void pstoreu(Scalar* to, const Packet& from)
{ (*to) = from; }
+/** \internal copy the packet \a from to \a *to, (un-aligned store with a mask)
+ * There is no generic implementation. We only have implementations for specialized
+ * cases. Generic case should not be called.
+ */
+template<typename Scalar, typename Packet>
+EIGEN_DEVICE_FUNC inline
+typename enable_if<unpacket_traits<Packet>::masked_store_available, void>::type
+pstoreu(Scalar* to, const Packet& from, typename unpacket_traits<Packet>::mask_t umask);
+
template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline Packet pgather(const Scalar* from, Index /*stride*/)
{ return ploadu<Packet>(from); }
@@ -299,8 +718,10 @@ template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline void pstoreu
/** \internal tries to do cache prefetching of \a addr */
template<typename Scalar> EIGEN_DEVICE_FUNC inline void prefetch(const Scalar* addr)
{
-#ifdef EIGEN_CUDA_ARCH
-#if defined(__LP64__)
+#if defined(EIGEN_HIP_DEVICE_COMPILE)
+ // do nothing
+#elif defined(EIGEN_CUDA_ARCH)
+#if defined(__LP64__) || EIGEN_OS_WIN64
// 64-bit pointer operand constraint for inlined asm
asm(" prefetch.L1 [ %1 ];" : "=l"(addr) : "l"(addr));
#else
@@ -312,39 +733,6 @@ template<typename Scalar> EIGEN_DEVICE_FUNC inline void prefetch(const Scalar* a
#endif
}
-/** \internal \returns the first element of a packet */
-template<typename Packet> EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type pfirst(const Packet& a)
-{ return a; }
-
-/** \internal \returns a packet where the element i contains the sum of the packet of \a vec[i] */
-template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
-preduxp(const Packet* vecs) { return vecs[0]; }
-
-/** \internal \returns the sum of the elements of \a a*/
-template<typename Packet> EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux(const Packet& a)
-{ return a; }
-
-/** \internal \returns the sum of the elements of \a a by block of 4 elements.
- * For a packet {a0, a1, a2, a3, a4, a5, a6, a7}, it returns a half packet {a0+a4, a1+a5, a2+a6, a3+a7}
- * For packet-size smaller or equal to 4, this boils down to a noop.
- */
-template<typename Packet> EIGEN_DEVICE_FUNC inline
-typename conditional<(unpacket_traits<Packet>::size%8)==0,typename unpacket_traits<Packet>::half,Packet>::type
-predux_downto4(const Packet& a)
-{ return a; }
-
-/** \internal \returns the product of the elements of \a a*/
-template<typename Packet> EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_mul(const Packet& a)
-{ return a; }
-
-/** \internal \returns the min of the elements of \a a*/
-template<typename Packet> EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_min(const Packet& a)
-{ return a; }
-
-/** \internal \returns the max of the elements of \a a*/
-template<typename Packet> EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_max(const Packet& a)
-{ return a; }
-
/** \internal \returns the reversed elements of \a a*/
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet preverse(const Packet& a)
{ return a; }
@@ -352,10 +740,7 @@ template<typename Packet> EIGEN_DEVICE_FUNC inline Packet preverse(const Packet&
/** \internal \returns \a a with real and imaginary part flipped (for complex type only) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet pcplxflip(const Packet& a)
{
- // FIXME: uncomment the following in case we drop the internal imag and real functions.
-// using std::imag;
-// using std::real;
- return Packet(imag(a),real(a));
+ return Packet(numext::imag(a),numext::real(a));
}
/**************************
@@ -364,43 +749,43 @@ template<typename Packet> EIGEN_DEVICE_FUNC inline Packet pcplxflip(const Packet
/** \internal \returns the sine of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-Packet psin(const Packet& a) { using std::sin; return sin(a); }
+Packet psin(const Packet& a) { EIGEN_USING_STD(sin); return sin(a); }
/** \internal \returns the cosine of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-Packet pcos(const Packet& a) { using std::cos; return cos(a); }
+Packet pcos(const Packet& a) { EIGEN_USING_STD(cos); return cos(a); }
/** \internal \returns the tan of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-Packet ptan(const Packet& a) { using std::tan; return tan(a); }
+Packet ptan(const Packet& a) { EIGEN_USING_STD(tan); return tan(a); }
/** \internal \returns the arc sine of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-Packet pasin(const Packet& a) { using std::asin; return asin(a); }
+Packet pasin(const Packet& a) { EIGEN_USING_STD(asin); return asin(a); }
/** \internal \returns the arc cosine of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-Packet pacos(const Packet& a) { using std::acos; return acos(a); }
+Packet pacos(const Packet& a) { EIGEN_USING_STD(acos); return acos(a); }
/** \internal \returns the arc tangent of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-Packet patan(const Packet& a) { using std::atan; return atan(a); }
+Packet patan(const Packet& a) { EIGEN_USING_STD(atan); return atan(a); }
/** \internal \returns the hyperbolic sine of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-Packet psinh(const Packet& a) { using std::sinh; return sinh(a); }
+Packet psinh(const Packet& a) { EIGEN_USING_STD(sinh); return sinh(a); }
/** \internal \returns the hyperbolic cosine of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-Packet pcosh(const Packet& a) { using std::cosh; return cosh(a); }
+Packet pcosh(const Packet& a) { EIGEN_USING_STD(cosh); return cosh(a); }
/** \internal \returns the hyperbolic tan of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-Packet ptanh(const Packet& a) { using std::tanh; return tanh(a); }
+Packet ptanh(const Packet& a) { EIGEN_USING_STD(tanh); return tanh(a); }
/** \internal \returns the exp of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-Packet pexp(const Packet& a) { using std::exp; return exp(a); }
+Packet pexp(const Packet& a) { EIGEN_USING_STD(exp); return exp(a); }
/** \internal \returns the expm1 of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
@@ -408,7 +793,7 @@ Packet pexpm1(const Packet& a) { return numext::expm1(a); }
/** \internal \returns the log of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-Packet plog(const Packet& a) { using std::log; return log(a); }
+Packet plog(const Packet& a) { EIGEN_USING_STD(log); return log(a); }
/** \internal \returns the log1p of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
@@ -416,16 +801,24 @@ Packet plog1p(const Packet& a) { return numext::log1p(a); }
/** \internal \returns the log10 of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-Packet plog10(const Packet& a) { using std::log10; return log10(a); }
+Packet plog10(const Packet& a) { EIGEN_USING_STD(log10); return log10(a); }
+
+/** \internal \returns the log10 of \a a (coeff-wise) */
+template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+Packet plog2(const Packet& a) {
+ typedef typename internal::unpacket_traits<Packet>::type Scalar;
+ return pmul(pset1<Packet>(Scalar(EIGEN_LOG2E)), plog(a));
+}
/** \internal \returns the square-root of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-Packet psqrt(const Packet& a) { using std::sqrt; return sqrt(a); }
+Packet psqrt(const Packet& a) { return numext::sqrt(a); }
/** \internal \returns the reciprocal square-root of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet prsqrt(const Packet& a) {
- return pdiv(pset1<Packet>(1), psqrt(a));
+ typedef typename internal::unpacket_traits<Packet>::type Scalar;
+ return pdiv(pset1<Packet>(Scalar(1)), psqrt(a));
}
/** \internal \returns the rounded value of \a a (coeff-wise) */
@@ -436,15 +829,121 @@ Packet pround(const Packet& a) { using numext::round; return round(a); }
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet pfloor(const Packet& a) { using numext::floor; return floor(a); }
+/** \internal \returns the rounded value of \a a (coeff-wise) with current
+ * rounding mode */
+template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+Packet print(const Packet& a) { using numext::rint; return rint(a); }
+
/** \internal \returns the ceil of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet pceil(const Packet& a) { using numext::ceil; return ceil(a); }
+/** \internal \returns the first element of a packet */
+template<typename Packet>
+EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type
+pfirst(const Packet& a)
+{ return a; }
+
+/** \internal \returns the sum of the elements of upper and lower half of \a a if \a a is larger than 4.
+ * For a packet {a0, a1, a2, a3, a4, a5, a6, a7}, it returns a half packet {a0+a4, a1+a5, a2+a6, a3+a7}
+ * For packet-size smaller or equal to 4, this boils down to a noop.
+ */
+template<typename Packet>
+EIGEN_DEVICE_FUNC inline typename conditional<(unpacket_traits<Packet>::size%8)==0,typename unpacket_traits<Packet>::half,Packet>::type
+predux_half_dowto4(const Packet& a)
+{ return a; }
+
+// Slow generic implementation of Packet reduction.
+template <typename Packet, typename Op>
+EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type
+predux_helper(const Packet& a, Op op) {
+ typedef typename unpacket_traits<Packet>::type Scalar;
+ const size_t n = unpacket_traits<Packet>::size;
+ EIGEN_ALIGN_TO_BOUNDARY(sizeof(Packet)) Scalar elements[n];
+ pstoreu<Scalar>(elements, a);
+ for(size_t k = n / 2; k > 0; k /= 2) {
+ for(size_t i = 0; i < k; ++i) {
+ elements[i] = op(elements[i], elements[i + k]);
+ }
+ }
+ return elements[0];
+}
+
+/** \internal \returns the sum of the elements of \a a*/
+template<typename Packet>
+EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type
+predux(const Packet& a)
+{
+ return a;
+}
+
+/** \internal \returns the product of the elements of \a a */
+template <typename Packet>
+EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_mul(
+ const Packet& a) {
+ typedef typename unpacket_traits<Packet>::type Scalar;
+ return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmul<Scalar>)));
+}
+
+/** \internal \returns the min of the elements of \a a */
+template <typename Packet>
+EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_min(
+ const Packet &a) {
+ typedef typename unpacket_traits<Packet>::type Scalar;
+ return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmin<PropagateFast, Scalar>)));
+}
+
+template <int NaNPropagation, typename Packet>
+EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_min(
+ const Packet& a) {
+ typedef typename unpacket_traits<Packet>::type Scalar;
+ return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmin<NaNPropagation, Scalar>)));
+}
+
+/** \internal \returns the min of the elements of \a a */
+template <typename Packet>
+EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_max(
+ const Packet &a) {
+ typedef typename unpacket_traits<Packet>::type Scalar;
+ return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmax<PropagateFast, Scalar>)));
+}
+
+template <int NaNPropagation, typename Packet>
+EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_max(
+ const Packet& a) {
+ typedef typename unpacket_traits<Packet>::type Scalar;
+ return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmax<NaNPropagation, Scalar>)));
+}
+
+#undef EIGEN_BINARY_OP_NAN_PROPAGATION
+
+/** \internal \returns true if all coeffs of \a a means "true"
+ * It is supposed to be called on values returned by pcmp_*.
+ */
+// not needed yet
+// template<typename Packet> EIGEN_DEVICE_FUNC inline bool predux_all(const Packet& a)
+// { return bool(a); }
+
+/** \internal \returns true if any coeffs of \a a means "true"
+ * It is supposed to be called on values returned by pcmp_*.
+ */
+template<typename Packet> EIGEN_DEVICE_FUNC inline bool predux_any(const Packet& a)
+{
+ // Dirty but generic implementation where "true" is assumed to be non 0 and all the sames.
+ // It is expected that "true" is either:
+ // - Scalar(1)
+ // - bits full of ones (NaN for floats),
+ // - or first bit equals to 1 (1 for ints, smallest denormal for floats).
+ // For all these cases, taking the sum is just fine, and this boils down to a no-op for scalars.
+ typedef typename unpacket_traits<Packet>::type Scalar;
+ return numext::not_equal_strict(predux(a), Scalar(0));
+}
+
/***************************************************************************
* The following functions might not have to be overwritten for vectorized types
***************************************************************************/
-/** \internal copy a packet with constant coeficient \a a (e.g., [a,a,a,a]) to \a *to. \a to must be 16 bytes aligned */
+/** \internal copy a packet with constant coefficient \a a (e.g., [a,a,a,a]) to \a *to. \a to must be 16 bytes aligned */
// NOTE: this function must really be templated on the packet type (think about different packet types for the same scalar type)
template<typename Packet>
inline void pstore1(typename unpacket_traits<Packet>::type* to, const typename unpacket_traits<Packet>::type& a)
@@ -492,47 +991,18 @@ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet ploadt_ro(const typename unpacket_t
return ploadt<Packet, LoadMode>(from);
}
-/** \internal default implementation of palign() allowing partial specialization */
-template<int Offset,typename PacketType>
-struct palign_impl
-{
- // by default data are aligned, so there is nothing to be done :)
- static inline void run(PacketType&, const PacketType&) {}
-};
-
-/** \internal update \a first using the concatenation of the packet_size minus \a Offset last elements
- * of \a first and \a Offset first elements of \a second.
- *
- * This function is currently only used to optimize matrix-vector products on unligned matrices.
- * It takes 2 packets that represent a contiguous memory array, and returns a packet starting
- * at the position \a Offset. For instance, for packets of 4 elements, we have:
- * Input:
- * - first = {f0,f1,f2,f3}
- * - second = {s0,s1,s2,s3}
- * Output:
- * - if Offset==0 then {f0,f1,f2,f3}
- * - if Offset==1 then {f1,f2,f3,s0}
- * - if Offset==2 then {f2,f3,s0,s1}
- * - if Offset==3 then {f3,s0,s1,s3}
- */
-template<int Offset,typename PacketType>
-inline void palign(PacketType& first, const PacketType& second)
-{
- palign_impl<Offset,PacketType>::run(first,second);
-}
-
/***************************************************************************
* Fast complex products (GCC generates a function call which is very slow)
***************************************************************************/
// Eigen+CUDA does not support complexes.
-#ifndef EIGEN_CUDACC
+#if !defined(EIGEN_GPUCC)
template<> inline std::complex<float> pmul(const std::complex<float>& a, const std::complex<float>& b)
-{ return std::complex<float>(real(a)*real(b) - imag(a)*imag(b), imag(a)*real(b) + real(a)*imag(b)); }
+{ return std::complex<float>(a.real()*b.real() - a.imag()*b.imag(), a.imag()*b.real() + a.real()*b.imag()); }
template<> inline std::complex<double> pmul(const std::complex<double>& a, const std::complex<double>& b)
-{ return std::complex<double>(real(a)*real(b) - imag(a)*imag(b), imag(a)*real(b) + real(a)*imag(b)); }
+{ return std::complex<double>(a.real()*b.real() - a.imag()*b.imag(), a.imag()*b.real() + a.real()*b.imag()); }
#endif
@@ -563,34 +1033,6 @@ pblend(const Selector<unpacket_traits<Packet>::size>& ifPacket, const Packet& th
return ifPacket.select[0] ? thenPacket : elsePacket;
}
-/** \internal \returns \a a with the first coefficient replaced by the scalar b */
-template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
-pinsertfirst(const Packet& a, typename unpacket_traits<Packet>::type b)
-{
- // Default implementation based on pblend.
- // It must be specialized for higher performance.
- Selector<unpacket_traits<Packet>::size> mask;
- mask.select[0] = true;
- // This for loop should be optimized away by the compiler.
- for(Index i=1; i<unpacket_traits<Packet>::size; ++i)
- mask.select[i] = false;
- return pblend(mask, pset1<Packet>(b), a);
-}
-
-/** \internal \returns \a a with the last coefficient replaced by the scalar b */
-template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
-pinsertlast(const Packet& a, typename unpacket_traits<Packet>::type b)
-{
- // Default implementation based on pblend.
- // It must be specialized for higher performance.
- Selector<unpacket_traits<Packet>::size> mask;
- // This for loop should be optimized away by the compiler.
- for(Index i=0; i<unpacket_traits<Packet>::size-1; ++i)
- mask.select[i] = false;
- mask.select[unpacket_traits<Packet>::size-1] = true;
- return pblend(mask, pset1<Packet>(b), a);
-}
-
} // end namespace internal
} // end namespace Eigen
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/GlobalFunctions.h b/examples/ThirdPartyLibs/Eigen/src/Core/GlobalFunctions.h
index 50406400b..629af94b9 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/GlobalFunctions.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/GlobalFunctions.h
@@ -66,22 +66,31 @@ namespace Eigen
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sinh,scalar_sinh_op,hyperbolic sine,\sa ArrayBase::sinh)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cosh,scalar_cosh_op,hyperbolic cosine,\sa ArrayBase::cosh)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(tanh,scalar_tanh_op,hyperbolic tangent,\sa ArrayBase::tanh)
+#if EIGEN_HAS_CXX11_MATH
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(asinh,scalar_asinh_op,inverse hyperbolic sine,\sa ArrayBase::asinh)
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(acosh,scalar_acosh_op,inverse hyperbolic cosine,\sa ArrayBase::acosh)
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(atanh,scalar_atanh_op,inverse hyperbolic tangent,\sa ArrayBase::atanh)
+#endif
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(logistic,scalar_logistic_op,logistic function,\sa ArrayBase::logistic)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(lgamma,scalar_lgamma_op,natural logarithm of the gamma function,\sa ArrayBase::lgamma)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(digamma,scalar_digamma_op,derivative of lgamma,\sa ArrayBase::digamma)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(erf,scalar_erf_op,error function,\sa ArrayBase::erf)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(erfc,scalar_erfc_op,complement error function,\sa ArrayBase::erfc)
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(ndtri,scalar_ndtri_op,inverse normal distribution function,\sa ArrayBase::ndtri)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(exp,scalar_exp_op,exponential,\sa ArrayBase::exp)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(expm1,scalar_expm1_op,exponential of a value minus 1,\sa ArrayBase::expm1)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log,scalar_log_op,natural logarithm,\sa Eigen::log10 DOXCOMMA ArrayBase::log)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log1p,scalar_log1p_op,natural logarithm of 1 plus the value,\sa ArrayBase::log1p)
- EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log10,scalar_log10_op,base 10 logarithm,\sa Eigen::log DOXCOMMA ArrayBase::log)
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log10,scalar_log10_op,base 10 logarithm,\sa Eigen::log DOXCOMMA ArrayBase::log10)
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log2,scalar_log2_op,base 2 logarithm,\sa Eigen::log DOXCOMMA ArrayBase::log2)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(abs,scalar_abs_op,absolute value,\sa ArrayBase::abs DOXCOMMA MatrixBase::cwiseAbs)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(abs2,scalar_abs2_op,squared absolute value,\sa ArrayBase::abs2 DOXCOMMA MatrixBase::cwiseAbs2)
- EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(arg,scalar_arg_op,complex argument,\sa ArrayBase::arg)
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(arg,scalar_arg_op,complex argument,\sa ArrayBase::arg DOXCOMMA MatrixBase::cwiseArg)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sqrt,scalar_sqrt_op,square root,\sa ArrayBase::sqrt DOXCOMMA MatrixBase::cwiseSqrt)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(rsqrt,scalar_rsqrt_op,reciprocal square root,\sa ArrayBase::rsqrt)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(square,scalar_square_op,square (power 2),\sa Eigen::abs2 DOXCOMMA Eigen::pow DOXCOMMA ArrayBase::square)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cube,scalar_cube_op,cube (power 3),\sa Eigen::pow DOXCOMMA ArrayBase::cube)
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(rint,scalar_rint_op,nearest integer,\sa Eigen::floor DOXCOMMA Eigen::ceil DOXCOMMA ArrayBase::round)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(round,scalar_round_op,nearest integer,\sa Eigen::floor DOXCOMMA Eigen::ceil DOXCOMMA ArrayBase::round)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(floor,scalar_floor_op,nearest integer not greater than the giben value,\sa Eigen::ceil DOXCOMMA ArrayBase::floor)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(ceil,scalar_ceil_op,nearest integer not less than the giben value,\sa Eigen::floor DOXCOMMA ArrayBase::ceil)
@@ -89,7 +98,7 @@ namespace Eigen
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(isinf,scalar_isinf_op,infinite value test,\sa Eigen::isnan DOXCOMMA Eigen::isfinite DOXCOMMA ArrayBase::isinf)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(isfinite,scalar_isfinite_op,finite value test,\sa Eigen::isinf DOXCOMMA Eigen::isnan DOXCOMMA ArrayBase::isfinite)
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sign,scalar_sign_op,sign (or 0),\sa ArrayBase::sign)
-
+
/** \returns an expression of the coefficient-wise power of \a x to the given constant \a exponent.
*
* \tparam ScalarExponent is the scalar type of \a exponent. It must be compatible with the scalar type of the given expression (\c Derived::Scalar).
@@ -124,21 +133,21 @@ namespace Eigen
*
* Example: \include Cwise_array_power_array.cpp
* Output: \verbinclude Cwise_array_power_array.out
- *
+ *
* \sa ArrayBase::pow()
*
* \relates ArrayBase
*/
template<typename Derived,typename ExponentDerived>
inline const Eigen::CwiseBinaryOp<Eigen::internal::scalar_pow_op<typename Derived::Scalar, typename ExponentDerived::Scalar>, const Derived, const ExponentDerived>
- pow(const Eigen::ArrayBase<Derived>& x, const Eigen::ArrayBase<ExponentDerived>& exponents)
+ pow(const Eigen::ArrayBase<Derived>& x, const Eigen::ArrayBase<ExponentDerived>& exponents)
{
return Eigen::CwiseBinaryOp<Eigen::internal::scalar_pow_op<typename Derived::Scalar, typename ExponentDerived::Scalar>, const Derived, const ExponentDerived>(
x.derived(),
exponents.derived()
);
}
-
+
/** \returns an expression of the coefficient-wise power of the scalar \a x to the given array of \a exponents.
*
* This function computes the coefficient-wise power between a scalar and an array of exponents.
@@ -147,7 +156,7 @@ namespace Eigen
*
* Example: \include Cwise_scalar_power_array.cpp
* Output: \verbinclude Cwise_scalar_power_array.out
- *
+ *
* \sa ArrayBase::pow()
*
* \relates ArrayBase
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/IO.h b/examples/ThirdPartyLibs/Eigen/src/Core/IO.h
index da7fd6cce..e81c31521 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/IO.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/IO.h
@@ -41,6 +41,7 @@ std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat&
* - \b rowSuffix string printed at the end of each row
* - \b matPrefix string printed at the beginning of the matrix
* - \b matSuffix string printed at the end of the matrix
+ * - \b fill character printed to fill the empty space in aligned columns
*
* Example: \include IOFormat.cpp
* Output: \verbinclude IOFormat.out
@@ -53,9 +54,9 @@ struct IOFormat
IOFormat(int _precision = StreamPrecision, int _flags = 0,
const std::string& _coeffSeparator = " ",
const std::string& _rowSeparator = "\n", const std::string& _rowPrefix="", const std::string& _rowSuffix="",
- const std::string& _matPrefix="", const std::string& _matSuffix="")
+ const std::string& _matPrefix="", const std::string& _matSuffix="", const char _fill=' ')
: matPrefix(_matPrefix), matSuffix(_matSuffix), rowPrefix(_rowPrefix), rowSuffix(_rowSuffix), rowSeparator(_rowSeparator),
- rowSpacer(""), coeffSeparator(_coeffSeparator), precision(_precision), flags(_flags)
+ rowSpacer(""), coeffSeparator(_coeffSeparator), fill(_fill), precision(_precision), flags(_flags)
{
// TODO check if rowPrefix, rowSuffix or rowSeparator contains a newline
// don't add rowSpacer if columns are not to be aligned
@@ -71,6 +72,7 @@ struct IOFormat
std::string matPrefix, matSuffix;
std::string rowPrefix, rowSuffix, rowSeparator, rowSpacer;
std::string coeffSeparator;
+ char fill;
int precision;
int flags;
};
@@ -128,6 +130,9 @@ struct significant_decimals_impl
template<typename Derived>
std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat& fmt)
{
+ using internal::is_same;
+ using internal::conditional;
+
if(_m.size() == 0)
{
s << fmt.matPrefix << fmt.matSuffix;
@@ -136,6 +141,22 @@ std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat&
typename Derived::Nested m = _m;
typedef typename Derived::Scalar Scalar;
+ typedef typename
+ conditional<
+ is_same<Scalar, char>::value ||
+ is_same<Scalar, unsigned char>::value ||
+ is_same<Scalar, numext::int8_t>::value ||
+ is_same<Scalar, numext::uint8_t>::value,
+ int,
+ typename conditional<
+ is_same<Scalar, std::complex<char> >::value ||
+ is_same<Scalar, std::complex<unsigned char> >::value ||
+ is_same<Scalar, std::complex<numext::int8_t> >::value ||
+ is_same<Scalar, std::complex<numext::uint8_t> >::value,
+ std::complex<int>,
+ const Scalar&
+ >::type
+ >::type PrintType;
Index width = 0;
@@ -172,23 +193,31 @@ std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat&
{
std::stringstream sstr;
sstr.copyfmt(s);
- sstr << m.coeff(i,j);
+ sstr << static_cast<PrintType>(m.coeff(i,j));
width = std::max<Index>(width, Index(sstr.str().length()));
}
}
+ std::streamsize old_width = s.width();
+ char old_fill_character = s.fill();
s << fmt.matPrefix;
for(Index i = 0; i < m.rows(); ++i)
{
if (i)
s << fmt.rowSpacer;
s << fmt.rowPrefix;
- if(width) s.width(width);
- s << m.coeff(i, 0);
+ if(width) {
+ s.fill(fmt.fill);
+ s.width(width);
+ }
+ s << static_cast<PrintType>(m.coeff(i, 0));
for(Index j = 1; j < m.cols(); ++j)
{
s << fmt.coeffSeparator;
- if (width) s.width(width);
- s << m.coeff(i, j);
+ if(width) {
+ s.fill(fmt.fill);
+ s.width(width);
+ }
+ s << static_cast<PrintType>(m.coeff(i, j));
}
s << fmt.rowSuffix;
if( i < m.rows() - 1)
@@ -196,6 +225,10 @@ std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat&
}
s << fmt.matSuffix;
if(explicit_precision) s.precision(old_precision);
+ if(width) {
+ s.fill(old_fill_character);
+ s.width(old_width);
+ }
return s;
}
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/IndexedView.h b/examples/ThirdPartyLibs/Eigen/src/Core/IndexedView.h
index 8c57a277c..08476251d 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/IndexedView.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/IndexedView.h
@@ -21,8 +21,8 @@ struct traits<IndexedView<XprType, RowIndices, ColIndices> >
enum {
RowsAtCompileTime = int(array_size<RowIndices>::value),
ColsAtCompileTime = int(array_size<ColIndices>::value),
- MaxRowsAtCompileTime = RowsAtCompileTime != Dynamic ? int(RowsAtCompileTime) : int(traits<XprType>::MaxRowsAtCompileTime),
- MaxColsAtCompileTime = ColsAtCompileTime != Dynamic ? int(ColsAtCompileTime) : int(traits<XprType>::MaxColsAtCompileTime),
+ MaxRowsAtCompileTime = RowsAtCompileTime != Dynamic ? int(RowsAtCompileTime) : Dynamic,
+ MaxColsAtCompileTime = ColsAtCompileTime != Dynamic ? int(ColsAtCompileTime) : Dynamic,
XprTypeIsRowMajor = (int(traits<XprType>::Flags)&RowMajorBit) != 0,
IsRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1
@@ -54,7 +54,8 @@ struct traits<IndexedView<XprType, RowIndices, ColIndices> >
DirectAccessMask = (int(InnerIncr)!=UndefinedIncr && int(OuterIncr)!=UndefinedIncr && InnerIncr>=0 && OuterIncr>=0) ? DirectAccessBit : 0,
FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0,
FlagsLvalueBit = is_lvalue<XprType>::value ? LvalueBit : 0,
- Flags = (traits<XprType>::Flags & (HereditaryBits | DirectAccessMask)) | FlagsLvalueBit | FlagsRowMajorBit
+ FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1) ? LinearAccessBit : 0,
+ Flags = (traits<XprType>::Flags & (HereditaryBits | DirectAccessMask )) | FlagsLvalueBit | FlagsRowMajorBit | FlagsLinearAccessBit
};
typedef Block<XprType,RowsAtCompileTime,ColsAtCompileTime,IsInnerPannel> BlockType;
@@ -132,7 +133,7 @@ public:
/** \returns the nested expression */
typename internal::remove_reference<XprType>::type&
- nestedExpression() { return m_xpr.const_cast_derived(); }
+ nestedExpression() { return m_xpr; }
/** \returns a const reference to the object storing/generating the row indices */
const RowIndices& rowIndices() const { return m_rowIndices; }
@@ -168,7 +169,11 @@ struct unary_evaluator<IndexedView<ArgType, RowIndices, ColIndices>, IndexBased>
enum {
CoeffReadCost = evaluator<ArgType>::CoeffReadCost /* TODO + cost of row/col index */,
- Flags = (evaluator<ArgType>::Flags & (HereditaryBits /*| LinearAccessBit | DirectAccessBit*/)),
+ FlagsLinearAccessBit = (traits<XprType>::RowsAtCompileTime == 1 || traits<XprType>::ColsAtCompileTime == 1) ? LinearAccessBit : 0,
+
+ FlagsRowMajorBit = traits<XprType>::FlagsRowMajorBit,
+
+ Flags = (evaluator<ArgType>::Flags & (HereditaryBits & ~RowMajorBit /*| LinearAccessBit | DirectAccessBit*/)) | FlagsLinearAccessBit | FlagsRowMajorBit,
Alignment = 0
};
@@ -193,6 +198,31 @@ struct unary_evaluator<IndexedView<ArgType, RowIndices, ColIndices>, IndexBased>
return m_argImpl.coeffRef(m_xpr.rowIndices()[row], m_xpr.colIndices()[col]);
}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Scalar& coeffRef(Index index)
+ {
+ EIGEN_STATIC_ASSERT_LVALUE(XprType)
+ Index row = XprType::RowsAtCompileTime == 1 ? 0 : index;
+ Index col = XprType::RowsAtCompileTime == 1 ? index : 0;
+ return m_argImpl.coeffRef( m_xpr.rowIndices()[row], m_xpr.colIndices()[col]);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const Scalar& coeffRef(Index index) const
+ {
+ Index row = XprType::RowsAtCompileTime == 1 ? 0 : index;
+ Index col = XprType::RowsAtCompileTime == 1 ? index : 0;
+ return m_argImpl.coeffRef( m_xpr.rowIndices()[row], m_xpr.colIndices()[col]);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const CoeffReturnType coeff(Index index) const
+ {
+ Index row = XprType::RowsAtCompileTime == 1 ? 0 : index;
+ Index col = XprType::RowsAtCompileTime == 1 ? index : 0;
+ return m_argImpl.coeff( m_xpr.rowIndices()[row], m_xpr.colIndices()[col]);
+ }
+
protected:
evaluator<ArgType> m_argImpl;
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/Inverse.h b/examples/ThirdPartyLibs/Eigen/src/Core/Inverse.h
index b76f0439d..c514438c4 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/Inverse.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/Inverse.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2014-2019 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -10,7 +10,7 @@
#ifndef EIGEN_INVERSE_H
#define EIGEN_INVERSE_H
-namespace Eigen {
+namespace Eigen {
template<typename XprType,typename StorageKind> class InverseImpl;
@@ -44,19 +44,18 @@ class Inverse : public InverseImpl<XprType,typename internal::traits<XprType>::S
{
public:
typedef typename XprType::StorageIndex StorageIndex;
- typedef typename XprType::PlainObject PlainObject;
typedef typename XprType::Scalar Scalar;
typedef typename internal::ref_selector<XprType>::type XprTypeNested;
typedef typename internal::remove_all<XprTypeNested>::type XprTypeNestedCleaned;
typedef typename internal::ref_selector<Inverse>::type Nested;
typedef typename internal::remove_all<XprType>::type NestedExpression;
-
+
explicit EIGEN_DEVICE_FUNC Inverse(const XprType &xpr)
: m_xpr(xpr)
{}
- EIGEN_DEVICE_FUNC Index rows() const { return m_xpr.rows(); }
- EIGEN_DEVICE_FUNC Index cols() const { return m_xpr.cols(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_xpr.cols(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_xpr.rows(); }
EIGEN_DEVICE_FUNC const XprTypeNestedCleaned& nestedExpression() const { return m_xpr; }
@@ -82,7 +81,7 @@ namespace internal {
/** \internal
* \brief Default evaluator for Inverse expression.
- *
+ *
* This default evaluator for Inverse expression simply evaluate the inverse into a temporary
* by a call to internal::call_assignment_no_alias.
* Therefore, inverse implementers only have to specialize Assignment<Dst,Inverse<...>, ...> for
@@ -97,7 +96,7 @@ struct unary_evaluator<Inverse<ArgType> >
typedef Inverse<ArgType> InverseType;
typedef typename InverseType::PlainObject PlainObject;
typedef evaluator<PlainObject> Base;
-
+
enum { Flags = Base::Flags | EvalBeforeNestingBit };
unary_evaluator(const InverseType& inv_xpr)
@@ -106,11 +105,11 @@ struct unary_evaluator<Inverse<ArgType> >
::new (static_cast<Base*>(this)) Base(m_result);
internal::call_assignment_no_alias(m_result, inv_xpr);
}
-
+
protected:
PlainObject m_result;
};
-
+
} // end namespace internal
} // end namespace Eigen
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/Map.h b/examples/ThirdPartyLibs/Eigen/src/Core/Map.h
index c437f1a92..218cc157f 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/Map.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/Map.h
@@ -11,7 +11,7 @@
#ifndef EIGEN_MAP_H
#define EIGEN_MAP_H
-namespace Eigen {
+namespace Eigen {
namespace internal {
template<typename PlainObjectType, int MapOptions, typename StrideType>
@@ -47,7 +47,7 @@ private:
* \brief A matrix or vector expression mapping an existing array of data.
*
* \tparam PlainObjectType the equivalent matrix type of the mapped data
- * \tparam MapOptions specifies the pointer alignment in bytes. It can be: \c #Aligned128, , \c #Aligned64, \c #Aligned32, \c #Aligned16, \c #Aligned8 or \c #Unaligned.
+ * \tparam MapOptions specifies the pointer alignment in bytes. It can be: \c #Aligned128, \c #Aligned64, \c #Aligned32, \c #Aligned16, \c #Aligned8 or \c #Unaligned.
* The default is \c #Unaligned.
* \tparam StrideType optionally specifies strides. By default, Map assumes the memory layout
* of an ordinary, contiguous array. This can be overridden by specifying strides.
@@ -104,13 +104,13 @@ template<typename PlainObjectType, int MapOptions, typename StrideType> class Ma
EIGEN_DEVICE_FUNC
inline PointerType cast_to_pointer_type(PointerArgType ptr) { return ptr; }
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
inline Index innerStride() const
{
return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1;
}
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
inline Index outerStride() const
{
return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer()
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/MapBase.h b/examples/ThirdPartyLibs/Eigen/src/Core/MapBase.h
index 020f939ad..d856447f0 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/MapBase.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/MapBase.h
@@ -15,7 +15,7 @@
EIGEN_STATIC_ASSERT((int(internal::evaluator<Derived>::Flags) & LinearAccessBit) || Derived::IsVectorAtCompileTime, \
YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT)
-namespace Eigen {
+namespace Eigen {
/** \ingroup Core_Module
*
@@ -43,6 +43,7 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
enum {
RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
+ InnerStrideAtCompileTime = internal::traits<Derived>::InnerStrideAtCompileTime,
SizeAtCompileTime = Base::SizeAtCompileTime
};
@@ -86,9 +87,11 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
typedef typename Base::CoeffReturnType CoeffReturnType;
/** \copydoc DenseBase::rows() */
- EIGEN_DEVICE_FUNC inline Index rows() const { return m_rows.value(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index rows() const EIGEN_NOEXCEPT { return m_rows.value(); }
/** \copydoc DenseBase::cols() */
- EIGEN_DEVICE_FUNC inline Index cols() const { return m_cols.value(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index cols() const EIGEN_NOEXCEPT { return m_cols.value(); }
/** Returns a pointer to the first coefficient of the matrix or vector.
*
@@ -181,14 +184,19 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
#endif
protected:
+ EIGEN_DEFAULT_COPY_CONSTRUCTOR(MapBase)
+ EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(MapBase)
template<typename T>
EIGEN_DEVICE_FUNC
void checkSanity(typename internal::enable_if<(internal::traits<T>::Alignment>0),void*>::type = 0) const
{
#if EIGEN_MAX_ALIGN_BYTES>0
+ // innerStride() is not set yet when this function is called, so we optimistically assume the lowest plausible value:
+ const Index minInnerStride = InnerStrideAtCompileTime == Dynamic ? 1 : Index(InnerStrideAtCompileTime);
+ EIGEN_ONLY_USED_FOR_DEBUG(minInnerStride);
eigen_assert(( ((internal::UIntPtr(m_data) % internal::traits<Derived>::Alignment) == 0)
- || (cols() * rows() * innerStride() * sizeof(Scalar)) < internal::traits<Derived>::Alignment ) && "data is not aligned");
+ || (cols() * rows() * minInnerStride * sizeof(Scalar)) < internal::traits<Derived>::Alignment ) && "data is not aligned");
#endif
}
@@ -290,6 +298,9 @@ template<typename Derived> class MapBase<Derived, WriteAccessors>
// In theory we could simply refer to Base:Base::operator=, but MSVC does not like Base::Base,
// see bugs 821 and 920.
using ReadOnlyMapBase::Base::operator=;
+ protected:
+ EIGEN_DEFAULT_COPY_CONSTRUCTOR(MapBase)
+ EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(MapBase)
};
#undef EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/MathFunctions.h b/examples/ThirdPartyLibs/Eigen/src/Core/MathFunctions.h
index 5ba5293a0..61b78f4f2 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/MathFunctions.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/MathFunctions.h
@@ -2,6 +2,7 @@
// for linear algebra.
//
// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -10,9 +11,11 @@
#ifndef EIGEN_MATHFUNCTIONS_H
#define EIGEN_MATHFUNCTIONS_H
-// source: http://www.geom.uiuc.edu/~huberty/math5337/groupe/digits.html
// TODO this should better be moved to NumTraits
-#define EIGEN_PI 3.141592653589793238462643383279502884197169399375105820974944592307816406L
+// Source: WolframAlpha
+#define EIGEN_PI 3.141592653589793238462643383279502884197169399375105820974944592307816406L
+#define EIGEN_LOG2E 1.442695040888963407359924681001892137426645954152985934135449406931109219L
+#define EIGEN_LN2 0.693147180559945309417232121458176568075500134360255254120680009493393621L
namespace Eigen {
@@ -96,7 +99,7 @@ struct real_default_impl<Scalar,true>
template<typename Scalar> struct real_impl : real_default_impl<Scalar> {};
-#ifdef EIGEN_CUDA_ARCH
+#if defined(EIGEN_GPU_COMPILE_PHASE)
template<typename T>
struct real_impl<std::complex<T> >
{
@@ -144,7 +147,7 @@ struct imag_default_impl<Scalar,true>
template<typename Scalar> struct imag_impl : imag_default_impl<Scalar> {};
-#ifdef EIGEN_CUDA_ARCH
+#if defined(EIGEN_GPU_COMPILE_PHASE)
template<typename T>
struct imag_impl<std::complex<T> >
{
@@ -212,12 +215,12 @@ struct imag_ref_default_impl
template<typename Scalar>
struct imag_ref_default_impl<Scalar, false>
{
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static inline Scalar run(Scalar&)
{
return Scalar(0);
}
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static inline const Scalar run(const Scalar&)
{
return Scalar(0);
@@ -238,7 +241,7 @@ struct imag_ref_retval
****************************************************************************/
template<typename Scalar, bool IsComplex = NumTraits<Scalar>::IsComplex>
-struct conj_impl
+struct conj_default_impl
{
EIGEN_DEVICE_FUNC
static inline Scalar run(const Scalar& x)
@@ -248,7 +251,7 @@ struct conj_impl
};
template<typename Scalar>
-struct conj_impl<Scalar,true>
+struct conj_default_impl<Scalar,true>
{
EIGEN_DEVICE_FUNC
static inline Scalar run(const Scalar& x)
@@ -258,6 +261,9 @@ struct conj_impl<Scalar,true>
}
};
+template<typename Scalar, bool IsComplex = NumTraits<Scalar>::IsComplex>
+struct conj_impl : conj_default_impl<Scalar, IsComplex> {};
+
template<typename Scalar>
struct conj_retval
{
@@ -286,7 +292,7 @@ struct abs2_impl_default<Scalar, true> // IsComplex
EIGEN_DEVICE_FUNC
static inline RealScalar run(const Scalar& x)
{
- return real(x)*real(x) + imag(x)*imag(x);
+ return x.real()*x.real() + x.imag()*x.imag();
}
};
@@ -308,18 +314,80 @@ struct abs2_retval
};
/****************************************************************************
+* Implementation of sqrt/rsqrt *
+****************************************************************************/
+
+template<typename Scalar>
+struct sqrt_impl
+{
+ EIGEN_DEVICE_FUNC
+ static EIGEN_ALWAYS_INLINE Scalar run(const Scalar& x)
+ {
+ EIGEN_USING_STD(sqrt);
+ return sqrt(x);
+ }
+};
+
+// Complex sqrt defined in MathFunctionsImpl.h.
+template<typename T> EIGEN_DEVICE_FUNC std::complex<T> complex_sqrt(const std::complex<T>& a_x);
+
+// Custom implementation is faster than `std::sqrt`, works on
+// GPU, and correctly handles special cases (unlike MSVC).
+template<typename T>
+struct sqrt_impl<std::complex<T> >
+{
+ EIGEN_DEVICE_FUNC
+ static EIGEN_ALWAYS_INLINE std::complex<T> run(const std::complex<T>& x)
+ {
+ return complex_sqrt<T>(x);
+ }
+};
+
+template<typename Scalar>
+struct sqrt_retval
+{
+ typedef Scalar type;
+};
+
+// Default implementation relies on numext::sqrt, at bottom of file.
+template<typename T>
+struct rsqrt_impl;
+
+// Complex rsqrt defined in MathFunctionsImpl.h.
+template<typename T> EIGEN_DEVICE_FUNC std::complex<T> complex_rsqrt(const std::complex<T>& a_x);
+
+template<typename T>
+struct rsqrt_impl<std::complex<T> >
+{
+ EIGEN_DEVICE_FUNC
+ static EIGEN_ALWAYS_INLINE std::complex<T> run(const std::complex<T>& x)
+ {
+ return complex_rsqrt<T>(x);
+ }
+};
+
+template<typename Scalar>
+struct rsqrt_retval
+{
+ typedef Scalar type;
+};
+
+/****************************************************************************
* Implementation of norm1 *
****************************************************************************/
template<typename Scalar, bool IsComplex>
-struct norm1_default_impl
+struct norm1_default_impl;
+
+template<typename Scalar>
+struct norm1_default_impl<Scalar,true>
{
typedef typename NumTraits<Scalar>::Real RealScalar;
EIGEN_DEVICE_FUNC
static inline RealScalar run(const Scalar& x)
{
- EIGEN_USING_STD_MATH(abs);
- return abs(real(x)) + abs(imag(x));
+ EIGEN_USING_STD(abs);
+ return abs(x.real()) + abs(x.imag());
}
};
@@ -329,7 +397,7 @@ struct norm1_default_impl<Scalar, false>
EIGEN_DEVICE_FUNC
static inline Scalar run(const Scalar& x)
{
- EIGEN_USING_STD_MATH(abs);
+ EIGEN_USING_STD(abs);
return abs(x);
}
};
@@ -347,31 +415,7 @@ struct norm1_retval
* Implementation of hypot *
****************************************************************************/
-template<typename Scalar>
-struct hypot_impl
-{
- typedef typename NumTraits<Scalar>::Real RealScalar;
- static inline RealScalar run(const Scalar& x, const Scalar& y)
- {
- EIGEN_USING_STD_MATH(abs);
- EIGEN_USING_STD_MATH(sqrt);
- RealScalar _x = abs(x);
- RealScalar _y = abs(y);
- Scalar p, qp;
- if(_x>_y)
- {
- p = _x;
- qp = _y / p;
- }
- else
- {
- p = _y;
- qp = _x / p;
- }
- if(p==RealScalar(0)) return RealScalar(0);
- return p * sqrt(RealScalar(1) + qp*qp);
- }
-};
+template<typename Scalar> struct hypot_impl;
template<typename Scalar>
struct hypot_retval
@@ -383,7 +427,7 @@ struct hypot_retval
* Implementation of cast *
****************************************************************************/
-template<typename OldType, typename NewType>
+template<typename OldType, typename NewType, typename EnableIf = void>
struct cast_impl
{
EIGEN_DEVICE_FUNC
@@ -393,6 +437,22 @@ struct cast_impl
}
};
+// Casting from S -> Complex<T> leads to an implicit conversion from S to T,
+// generating warnings on clang. Here we explicitly cast the real component.
+template<typename OldType, typename NewType>
+struct cast_impl<OldType, NewType,
+ typename internal::enable_if<
+ !NumTraits<OldType>::IsComplex && NumTraits<NewType>::IsComplex
+ >::type>
+{
+ EIGEN_DEVICE_FUNC
+ static inline NewType run(const OldType& x)
+ {
+ typedef typename NumTraits<NewType>::Real NewReal;
+ return static_cast<NewType>(static_cast<NewReal>(x));
+ }
+};
+
// here, for once, we're plainly returning NewType: we don't want cast to do weird things.
template<typename OldType, typename NewType>
@@ -406,29 +466,59 @@ inline NewType cast(const OldType& x)
* Implementation of round *
****************************************************************************/
+template<typename Scalar>
+struct round_impl
+{
+ EIGEN_DEVICE_FUNC
+ static inline Scalar run(const Scalar& x)
+ {
+ EIGEN_STATIC_ASSERT((!NumTraits<Scalar>::IsComplex), NUMERIC_TYPE_MUST_BE_REAL)
#if EIGEN_HAS_CXX11_MATH
- template<typename Scalar>
- struct round_impl {
- static inline Scalar run(const Scalar& x)
- {
- EIGEN_STATIC_ASSERT((!NumTraits<Scalar>::IsComplex), NUMERIC_TYPE_MUST_BE_REAL)
- EIGEN_USING_STD_MATH(round);
- return round(x);
- }
- };
+ EIGEN_USING_STD(round);
+#endif
+ return Scalar(round(x));
+ }
+};
+
+#if !EIGEN_HAS_CXX11_MATH
+#if EIGEN_HAS_C99_MATH
+// Use ::roundf for float.
+template<>
+struct round_impl<float> {
+ EIGEN_DEVICE_FUNC
+ static inline float run(const float& x)
+ {
+ return ::roundf(x);
+ }
+};
#else
- template<typename Scalar>
- struct round_impl
+template<typename Scalar>
+struct round_using_floor_ceil_impl
+{
+ EIGEN_DEVICE_FUNC
+ static inline Scalar run(const Scalar& x)
{
- static inline Scalar run(const Scalar& x)
- {
- EIGEN_STATIC_ASSERT((!NumTraits<Scalar>::IsComplex), NUMERIC_TYPE_MUST_BE_REAL)
- EIGEN_USING_STD_MATH(floor);
- EIGEN_USING_STD_MATH(ceil);
- return (x > Scalar(0)) ? floor(x + Scalar(0.5)) : ceil(x - Scalar(0.5));
+ EIGEN_STATIC_ASSERT((!NumTraits<Scalar>::IsComplex), NUMERIC_TYPE_MUST_BE_REAL)
+ // Without C99 round/roundf, resort to floor/ceil.
+ EIGEN_USING_STD(floor);
+ EIGEN_USING_STD(ceil);
+ // If not enough precision to resolve a decimal at all, return the input.
+ // Otherwise, adding 0.5 can trigger an increment by 1.
+ const Scalar limit = Scalar(1ull << (NumTraits<Scalar>::digits() - 1));
+ if (x >= limit || x <= -limit) {
+ return x;
}
- };
-#endif
+ return (x > Scalar(0)) ? Scalar(floor(x + Scalar(0.5))) : Scalar(ceil(x - Scalar(0.5)));
+ }
+};
+
+template<>
+struct round_impl<float> : round_using_floor_ceil_impl<float> {};
+
+template<>
+struct round_impl<double> : round_using_floor_ceil_impl<double> {};
+#endif // EIGEN_HAS_C99_MATH
+#endif // !EIGEN_HAS_CXX11_MATH
template<typename Scalar>
struct round_retval
@@ -437,43 +527,112 @@ struct round_retval
};
/****************************************************************************
-* Implementation of arg *
+* Implementation of rint *
****************************************************************************/
+template<typename Scalar>
+struct rint_impl {
+ EIGEN_DEVICE_FUNC
+ static inline Scalar run(const Scalar& x)
+ {
+ EIGEN_STATIC_ASSERT((!NumTraits<Scalar>::IsComplex), NUMERIC_TYPE_MUST_BE_REAL)
#if EIGEN_HAS_CXX11_MATH
- template<typename Scalar>
- struct arg_impl {
- static inline Scalar run(const Scalar& x)
- {
- EIGEN_USING_STD_MATH(arg);
- return arg(x);
- }
- };
-#else
- template<typename Scalar, bool IsComplex = NumTraits<Scalar>::IsComplex>
- struct arg_default_impl
+ EIGEN_USING_STD(rint);
+#endif
+ return rint(x);
+ }
+};
+
+#if !EIGEN_HAS_CXX11_MATH
+template<>
+struct rint_impl<double> {
+ EIGEN_DEVICE_FUNC
+ static inline double run(const double& x)
{
- typedef typename NumTraits<Scalar>::Real RealScalar;
- EIGEN_DEVICE_FUNC
- static inline RealScalar run(const Scalar& x)
- {
- return (x < Scalar(0)) ? Scalar(EIGEN_PI) : Scalar(0); }
- };
+ return ::rint(x);
+ }
+};
+template<>
+struct rint_impl<float> {
+ EIGEN_DEVICE_FUNC
+ static inline float run(const float& x)
+ {
+ return ::rintf(x);
+ }
+};
+#endif
- template<typename Scalar>
- struct arg_default_impl<Scalar,true>
+template<typename Scalar>
+struct rint_retval
+{
+ typedef Scalar type;
+};
+
+/****************************************************************************
+* Implementation of arg *
+****************************************************************************/
+
+// Visual Studio 2017 has a bug where arg(float) returns 0 for negative inputs.
+// This seems to be fixed in VS 2019.
+#if EIGEN_HAS_CXX11_MATH && (!EIGEN_COMP_MSVC || EIGEN_COMP_MSVC >= 1920)
+// std::arg is only defined for types of std::complex, or integer types or float/double/long double
+template<typename Scalar,
+ bool HasStdImpl = NumTraits<Scalar>::IsComplex || is_integral<Scalar>::value
+ || is_same<Scalar, float>::value || is_same<Scalar, double>::value
+ || is_same<Scalar, long double>::value >
+struct arg_default_impl;
+
+template<typename Scalar>
+struct arg_default_impl<Scalar, true> {
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ EIGEN_DEVICE_FUNC
+ static inline RealScalar run(const Scalar& x)
{
- typedef typename NumTraits<Scalar>::Real RealScalar;
- EIGEN_DEVICE_FUNC
- static inline RealScalar run(const Scalar& x)
- {
- EIGEN_USING_STD_MATH(arg);
- return arg(x);
- }
- };
+ #if defined(EIGEN_HIP_DEVICE_COMPILE)
+ // HIP does not seem to have a native device side implementation for the math routine "arg"
+ using std::arg;
+ #else
+ EIGEN_USING_STD(arg);
+ #endif
+ return static_cast<RealScalar>(arg(x));
+ }
+};
- template<typename Scalar> struct arg_impl : arg_default_impl<Scalar> {};
+// Must be non-complex floating-point type (e.g. half/bfloat16).
+template<typename Scalar>
+struct arg_default_impl<Scalar, false> {
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ EIGEN_DEVICE_FUNC
+ static inline RealScalar run(const Scalar& x)
+ {
+ return (x < Scalar(0)) ? RealScalar(EIGEN_PI) : RealScalar(0);
+ }
+};
+#else
+template<typename Scalar, bool IsComplex = NumTraits<Scalar>::IsComplex>
+struct arg_default_impl
+{
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ EIGEN_DEVICE_FUNC
+ static inline RealScalar run(const Scalar& x)
+ {
+ return (x < RealScalar(0)) ? RealScalar(EIGEN_PI) : RealScalar(0);
+ }
+};
+
+template<typename Scalar>
+struct arg_default_impl<Scalar,true>
+{
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ EIGEN_DEVICE_FUNC
+ static inline RealScalar run(const Scalar& x)
+ {
+ EIGEN_USING_STD(arg);
+ return arg(x);
+ }
+};
#endif
+template<typename Scalar> struct arg_impl : arg_default_impl<Scalar> {};
template<typename Scalar>
struct arg_retval
@@ -495,18 +654,19 @@ namespace std_fallback {
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar)
typedef typename NumTraits<Scalar>::Real RealScalar;
- EIGEN_USING_STD_MATH(exp);
+ EIGEN_USING_STD(exp);
Scalar u = exp(x);
- if (u == Scalar(1)) {
+ if (numext::equal_strict(u, Scalar(1))) {
return x;
}
Scalar um1 = u - RealScalar(1);
- if (um1 == Scalar(-1)) {
+ if (numext::equal_strict(um1, Scalar(-1))) {
return RealScalar(-1);
}
- EIGEN_USING_STD_MATH(log);
- return (u - RealScalar(1)) * x / log(u);
+ EIGEN_USING_STD(log);
+ Scalar logu = log(u);
+ return numext::equal_strict(u, logu) ? u : (u - RealScalar(1)) * x / logu;
}
}
@@ -517,13 +677,13 @@ struct expm1_impl {
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar)
#if EIGEN_HAS_CXX11_MATH
using std::expm1;
- #endif
+ #else
using std_fallback::expm1;
+ #endif
return expm1(x);
}
};
-
template<typename Scalar>
struct expm1_retval
{
@@ -531,6 +691,30 @@ struct expm1_retval
};
/****************************************************************************
+* Implementation of log *
+****************************************************************************/
+
+// Complex log defined in MathFunctionsImpl.h.
+template<typename T> EIGEN_DEVICE_FUNC std::complex<T> complex_log(const std::complex<T>& z);
+
+template<typename Scalar>
+struct log_impl {
+ EIGEN_DEVICE_FUNC static inline Scalar run(const Scalar& x)
+ {
+ EIGEN_USING_STD(log);
+ return static_cast<Scalar>(log(x));
+ }
+};
+
+template<typename Scalar>
+struct log_impl<std::complex<Scalar> > {
+ EIGEN_DEVICE_FUNC static inline std::complex<Scalar> run(const std::complex<Scalar>& z)
+ {
+ return complex_log(z);
+ }
+};
+
+/****************************************************************************
* Implementation of log1p *
****************************************************************************/
@@ -541,9 +725,12 @@ namespace std_fallback {
EIGEN_DEVICE_FUNC inline Scalar log1p(const Scalar& x) {
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar)
typedef typename NumTraits<Scalar>::Real RealScalar;
- EIGEN_USING_STD_MATH(log);
+ EIGEN_USING_STD(log);
Scalar x1p = RealScalar(1) + x;
- return ( x1p == Scalar(1) ) ? x : x * ( log(x1p) / (x1p - RealScalar(1)) );
+ Scalar log_1p = log_impl<Scalar>::run(x1p);
+ const bool is_small = numext::equal_strict(x1p, Scalar(1));
+ const bool is_inf = numext::equal_strict(x1p, log_1p);
+ return (is_small || is_inf) ? x : x * (log_1p / (x1p - RealScalar(1)));
}
}
@@ -554,12 +741,22 @@ struct log1p_impl {
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar)
#if EIGEN_HAS_CXX11_MATH
using std::log1p;
- #endif
+ #else
using std_fallback::log1p;
+ #endif
return log1p(x);
}
};
+// Specialization for complex types that are not supported by std::log1p.
+template <typename RealScalar>
+struct log1p_impl<std::complex<RealScalar> > {
+ EIGEN_DEVICE_FUNC static inline std::complex<RealScalar> run(
+ const std::complex<RealScalar>& x) {
+ EIGEN_STATIC_ASSERT_NON_INTEGER(RealScalar)
+ return std_fallback::log1p(x);
+ }
+};
template<typename Scalar>
struct log1p_retval
@@ -578,7 +775,7 @@ struct pow_impl
typedef typename ScalarBinaryOpTraits<ScalarX,ScalarY,internal::scalar_pow_op<ScalarX,ScalarY> >::ReturnType result_type;
static EIGEN_DEVICE_FUNC inline result_type run(const ScalarX& x, const ScalarY& y)
{
- EIGEN_USING_STD_MATH(pow);
+ EIGEN_USING_STD(pow);
return pow(x, y);
}
};
@@ -689,20 +886,27 @@ struct random_default_impl<Scalar, false, true>
{
static inline Scalar run(const Scalar& x, const Scalar& y)
{
- typedef typename conditional<NumTraits<Scalar>::IsSigned,std::ptrdiff_t,std::size_t>::type ScalarX;
- if(y<x)
+ if (y <= x)
return x;
- // the following difference might overflow on a 32 bits system,
- // but since y>=x the result converted to an unsigned long is still correct.
- std::size_t range = ScalarX(y)-ScalarX(x);
- std::size_t offset = 0;
- // rejection sampling
- std::size_t divisor = 1;
- std::size_t multiplier = 1;
- if(range<RAND_MAX) divisor = (std::size_t(RAND_MAX)+1)/(range+1);
- else multiplier = 1 + range/(std::size_t(RAND_MAX)+1);
+ // ScalarU is the unsigned counterpart of Scalar, possibly Scalar itself.
+ typedef typename make_unsigned<Scalar>::type ScalarU;
+ // ScalarX is the widest of ScalarU and unsigned int.
+ // We'll deal only with ScalarX and unsigned int below thus avoiding signed
+ // types and arithmetic and signed overflows (which are undefined behavior).
+ typedef typename conditional<(ScalarU(-1) > unsigned(-1)), ScalarU, unsigned>::type ScalarX;
+ // The following difference doesn't overflow, provided our integer types are two's
+ // complement and have the same number of padding bits in signed and unsigned variants.
+ // This is the case in most modern implementations of C++.
+ ScalarX range = ScalarX(y) - ScalarX(x);
+ ScalarX offset = 0;
+ ScalarX divisor = 1;
+ ScalarX multiplier = 1;
+ const unsigned rand_max = RAND_MAX;
+ if (range <= rand_max) divisor = (rand_max + 1) / (range + 1);
+ else multiplier = 1 + range / (rand_max + 1);
+ // Rejection sampling.
do {
- offset = (std::size_t(std::rand()) * multiplier) / divisor;
+ offset = (unsigned(std::rand()) * multiplier) / divisor;
} while (offset > range);
return Scalar(ScalarX(x) + offset);
}
@@ -727,8 +931,8 @@ struct random_default_impl<Scalar, true, false>
{
static inline Scalar run(const Scalar& x, const Scalar& y)
{
- return Scalar(random(real(x), real(y)),
- random(imag(x), imag(y)));
+ return Scalar(random(x.real(), y.real()),
+ random(x.imag(), y.imag()));
}
static inline Scalar run()
{
@@ -749,7 +953,7 @@ inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random()
return EIGEN_MATHFUNC_IMPL(random, Scalar)::run();
}
-// Implementatin of is* functions
+// Implementation of is* functions
// std::is* do not work with fast-math and gcc, std::is* are available on MSVC 2013 and newer, as well as in clang.
#if (EIGEN_HAS_CXX11_MATH && !(EIGEN_COMP_GNUC_STRICT && __FINITE_MATH_ONLY__)) || (EIGEN_COMP_MSVC>=1800) || (EIGEN_COMP_CLANG)
@@ -778,7 +982,7 @@ EIGEN_DEVICE_FUNC
typename internal::enable_if<(!internal::is_integral<T>::value)&&(!NumTraits<T>::IsComplex),bool>::type
isfinite_impl(const T& x)
{
- #ifdef EIGEN_CUDA_ARCH
+ #if defined(EIGEN_GPU_COMPILE_PHASE)
return (::isfinite)(x);
#elif EIGEN_USE_STD_FPCLASSIFY
using std::isfinite;
@@ -793,7 +997,7 @@ EIGEN_DEVICE_FUNC
typename internal::enable_if<(!internal::is_integral<T>::value)&&(!NumTraits<T>::IsComplex),bool>::type
isinf_impl(const T& x)
{
- #ifdef EIGEN_CUDA_ARCH
+ #if defined(EIGEN_GPU_COMPILE_PHASE)
return (::isinf)(x);
#elif EIGEN_USE_STD_FPCLASSIFY
using std::isinf;
@@ -808,7 +1012,7 @@ EIGEN_DEVICE_FUNC
typename internal::enable_if<(!internal::is_integral<T>::value)&&(!NumTraits<T>::IsComplex),bool>::type
isnan_impl(const T& x)
{
- #ifdef EIGEN_CUDA_ARCH
+ #if defined(EIGEN_GPU_COMPILE_PHASE)
return (::isnan)(x);
#elif EIGEN_USE_STD_FPCLASSIFY
using std::isnan;
@@ -865,7 +1069,6 @@ template<typename T> EIGEN_DEVICE_FUNC bool isnan_impl(const std::complex<T>& x)
template<typename T> EIGEN_DEVICE_FUNC bool isinf_impl(const std::complex<T>& x);
template<typename T> T generic_fast_tanh_float(const T& a_x);
-
} // end namespace internal
/****************************************************************************
@@ -874,12 +1077,12 @@ template<typename T> T generic_fast_tanh_float(const T& a_x);
namespace numext {
-#if !defined(EIGEN_CUDA_ARCH) && !defined(__SYCL_DEVICE_ONLY__)
+#if (!defined(EIGEN_GPUCC) || defined(EIGEN_CONSTEXPR_ARE_DEVICE_FUNC))
template<typename T>
EIGEN_DEVICE_FUNC
EIGEN_ALWAYS_INLINE T mini(const T& x, const T& y)
{
- EIGEN_USING_STD_MATH(min);
+ EIGEN_USING_STD(min)
return min EIGEN_NOT_A_MACRO (x,y);
}
@@ -887,114 +1090,140 @@ template<typename T>
EIGEN_DEVICE_FUNC
EIGEN_ALWAYS_INLINE T maxi(const T& x, const T& y)
{
- EIGEN_USING_STD_MATH(max);
+ EIGEN_USING_STD(max)
return max EIGEN_NOT_A_MACRO (x,y);
}
-
-
-#elif defined(__SYCL_DEVICE_ONLY__)
+#else
template<typename T>
+EIGEN_DEVICE_FUNC
EIGEN_ALWAYS_INLINE T mini(const T& x, const T& y)
{
-
return y < x ? y : x;
}
-
-template<typename T>
-EIGEN_ALWAYS_INLINE T maxi(const T& x, const T& y)
-{
-
- return x < y ? y : x;
-}
-
-EIGEN_ALWAYS_INLINE int mini(const int& x, const int& y)
-{
- return cl::sycl::min(x,y);
-}
-
-EIGEN_ALWAYS_INLINE int maxi(const int& x, const int& y)
-{
- return cl::sycl::max(x,y);
-}
-
-EIGEN_ALWAYS_INLINE unsigned int mini(const unsigned int& x, const unsigned int& y)
-{
- return cl::sycl::min(x,y);
-}
-
-EIGEN_ALWAYS_INLINE unsigned int maxi(const unsigned int& x, const unsigned int& y)
-{
- return cl::sycl::max(x,y);
-}
-
-EIGEN_ALWAYS_INLINE long mini(const long & x, const long & y)
-{
- return cl::sycl::min(x,y);
-}
-
-EIGEN_ALWAYS_INLINE long maxi(const long & x, const long & y)
-{
- return cl::sycl::max(x,y);
-}
-
-EIGEN_ALWAYS_INLINE unsigned long mini(const unsigned long& x, const unsigned long& y)
-{
- return cl::sycl::min(x,y);
-}
-
-EIGEN_ALWAYS_INLINE unsigned long maxi(const unsigned long& x, const unsigned long& y)
-{
- return cl::sycl::max(x,y);
-}
-
-
+template<>
+EIGEN_DEVICE_FUNC
EIGEN_ALWAYS_INLINE float mini(const float& x, const float& y)
{
- return cl::sycl::fmin(x,y);
-}
-
-EIGEN_ALWAYS_INLINE float maxi(const float& x, const float& y)
-{
- return cl::sycl::fmax(x,y);
+ return fminf(x, y);
}
-
+template<>
+EIGEN_DEVICE_FUNC
EIGEN_ALWAYS_INLINE double mini(const double& x, const double& y)
{
- return cl::sycl::fmin(x,y);
+ return fmin(x, y);
}
-
-EIGEN_ALWAYS_INLINE double maxi(const double& x, const double& y)
+template<>
+EIGEN_DEVICE_FUNC
+EIGEN_ALWAYS_INLINE long double mini(const long double& x, const long double& y)
{
- return cl::sycl::fmax(x,y);
+#if defined(EIGEN_HIPCC)
+ // no "fminl" on HIP yet
+ return (x < y) ? x : y;
+#else
+ return fminl(x, y);
+#endif
}
-#else
template<typename T>
EIGEN_DEVICE_FUNC
-EIGEN_ALWAYS_INLINE T mini(const T& x, const T& y)
+EIGEN_ALWAYS_INLINE T maxi(const T& x, const T& y)
{
- return y < x ? y : x;
+ return x < y ? y : x;
}
template<>
EIGEN_DEVICE_FUNC
-EIGEN_ALWAYS_INLINE float mini(const float& x, const float& y)
+EIGEN_ALWAYS_INLINE float maxi(const float& x, const float& y)
{
- return fminf(x, y);
+ return fmaxf(x, y);
}
-template<typename T>
+template<>
EIGEN_DEVICE_FUNC
-EIGEN_ALWAYS_INLINE T maxi(const T& x, const T& y)
+EIGEN_ALWAYS_INLINE double maxi(const double& x, const double& y)
{
- return x < y ? y : x;
+ return fmax(x, y);
}
template<>
EIGEN_DEVICE_FUNC
-EIGEN_ALWAYS_INLINE float maxi(const float& x, const float& y)
+EIGEN_ALWAYS_INLINE long double maxi(const long double& x, const long double& y)
{
- return fmaxf(x, y);
+#if defined(EIGEN_HIPCC)
+ // no "fmaxl" on HIP yet
+ return (x > y) ? x : y;
+#else
+ return fmaxl(x, y);
+#endif
}
#endif
+#if defined(SYCL_DEVICE_ONLY)
+
+
+#define SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_BINARY(NAME, FUNC) \
+ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_char) \
+ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_short) \
+ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_int) \
+ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_long)
+#define SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_UNARY(NAME, FUNC) \
+ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_char) \
+ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_short) \
+ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_int) \
+ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_long)
+#define SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_BINARY(NAME, FUNC) \
+ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_uchar) \
+ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_ushort) \
+ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_uint) \
+ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_ulong)
+#define SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_UNARY(NAME, FUNC) \
+ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_uchar) \
+ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_ushort) \
+ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_uint) \
+ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_ulong)
+#define SYCL_SPECIALIZE_INTEGER_TYPES_BINARY(NAME, FUNC) \
+ SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_BINARY(NAME, FUNC) \
+ SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_BINARY(NAME, FUNC)
+#define SYCL_SPECIALIZE_INTEGER_TYPES_UNARY(NAME, FUNC) \
+ SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_UNARY(NAME, FUNC) \
+ SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_UNARY(NAME, FUNC)
+#define SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(NAME, FUNC) \
+ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_float) \
+ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC,cl::sycl::cl_double)
+#define SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(NAME, FUNC) \
+ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_float) \
+ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC,cl::sycl::cl_double)
+#define SYCL_SPECIALIZE_FLOATING_TYPES_UNARY_FUNC_RET_TYPE(NAME, FUNC, RET_TYPE) \
+ SYCL_SPECIALIZE_GEN_UNARY_FUNC(NAME, FUNC, RET_TYPE, cl::sycl::cl_float) \
+ SYCL_SPECIALIZE_GEN_UNARY_FUNC(NAME, FUNC, RET_TYPE, cl::sycl::cl_double)
+
+#define SYCL_SPECIALIZE_GEN_UNARY_FUNC(NAME, FUNC, RET_TYPE, ARG_TYPE) \
+template<> \
+ EIGEN_DEVICE_FUNC \
+ EIGEN_ALWAYS_INLINE RET_TYPE NAME(const ARG_TYPE& x) { \
+ return cl::sycl::FUNC(x); \
+ }
+
+#define SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, TYPE) \
+ SYCL_SPECIALIZE_GEN_UNARY_FUNC(NAME, FUNC, TYPE, TYPE)
+
+#define SYCL_SPECIALIZE_GEN1_BINARY_FUNC(NAME, FUNC, RET_TYPE, ARG_TYPE1, ARG_TYPE2) \
+ template<> \
+ EIGEN_DEVICE_FUNC \
+ EIGEN_ALWAYS_INLINE RET_TYPE NAME(const ARG_TYPE1& x, const ARG_TYPE2& y) { \
+ return cl::sycl::FUNC(x, y); \
+ }
+
+#define SYCL_SPECIALIZE_GEN2_BINARY_FUNC(NAME, FUNC, RET_TYPE, ARG_TYPE) \
+ SYCL_SPECIALIZE_GEN1_BINARY_FUNC(NAME, FUNC, RET_TYPE, ARG_TYPE, ARG_TYPE)
+
+#define SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, TYPE) \
+ SYCL_SPECIALIZE_GEN2_BINARY_FUNC(NAME, FUNC, TYPE, TYPE)
+
+SYCL_SPECIALIZE_INTEGER_TYPES_BINARY(mini, min)
+SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(mini, fmin)
+SYCL_SPECIALIZE_INTEGER_TYPES_BINARY(maxi, max)
+SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(maxi, fmax)
+
+#endif
+
template<typename Scalar>
EIGEN_DEVICE_FUNC
@@ -1062,6 +1291,34 @@ inline EIGEN_MATHFUNC_RETVAL(abs2, Scalar) abs2(const Scalar& x)
EIGEN_DEVICE_FUNC
inline bool abs2(bool x) { return x; }
+template<typename T>
+EIGEN_DEVICE_FUNC
+EIGEN_ALWAYS_INLINE T absdiff(const T& x, const T& y)
+{
+ return x > y ? x - y : y - x;
+}
+template<>
+EIGEN_DEVICE_FUNC
+EIGEN_ALWAYS_INLINE float absdiff(const float& x, const float& y)
+{
+ return fabsf(x - y);
+}
+template<>
+EIGEN_DEVICE_FUNC
+EIGEN_ALWAYS_INLINE double absdiff(const double& x, const double& y)
+{
+ return fabs(x - y);
+}
+
+#if !defined(EIGEN_GPUCC)
+// HIP and CUDA do not support long double.
+template<>
+EIGEN_DEVICE_FUNC
+EIGEN_ALWAYS_INLINE long double absdiff(const long double& x, const long double& y) {
+ return fabsl(x - y);
+}
+#endif
+
template<typename Scalar>
EIGEN_DEVICE_FUNC
inline EIGEN_MATHFUNC_RETVAL(norm1, Scalar) norm1(const Scalar& x)
@@ -1076,6 +1333,10 @@ inline EIGEN_MATHFUNC_RETVAL(hypot, Scalar) hypot(const Scalar& x, const Scalar&
return EIGEN_MATHFUNC_IMPL(hypot, Scalar)::run(x, y);
}
+#if defined(SYCL_DEVICE_ONLY)
+ SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(hypot, hypot)
+#endif
+
template<typename Scalar>
EIGEN_DEVICE_FUNC
inline EIGEN_MATHFUNC_RETVAL(log1p, Scalar) log1p(const Scalar& x)
@@ -1083,12 +1344,11 @@ inline EIGEN_MATHFUNC_RETVAL(log1p, Scalar) log1p(const Scalar& x)
return EIGEN_MATHFUNC_IMPL(log1p, Scalar)::run(x);
}
-#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float log1p(float x) { return cl::sycl::log1p(x); }
-EIGEN_ALWAYS_INLINE double log1p(double x) { return cl::sycl::log1p(x); }
-#endif // defined(__SYCL_DEVICE_ONLY__)
+#if defined(SYCL_DEVICE_ONLY)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(log1p, log1p)
+#endif
-#ifdef EIGEN_CUDACC
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float log1p(const float &x) { return ::log1pf(x); }
@@ -1103,23 +1363,26 @@ inline typename internal::pow_impl<ScalarX,ScalarY>::result_type pow(const Scala
return internal::pow_impl<ScalarX,ScalarY>::run(x, y);
}
-#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float pow(float x, float y) { return cl::sycl::pow(x, y); }
-EIGEN_ALWAYS_INLINE double pow(double x, double y) { return cl::sycl::pow(x, y); }
-#endif // defined(__SYCL_DEVICE_ONLY__)
+#if defined(SYCL_DEVICE_ONLY)
+SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(pow, pow)
+#endif
template<typename T> EIGEN_DEVICE_FUNC bool (isnan) (const T &x) { return internal::isnan_impl(x); }
template<typename T> EIGEN_DEVICE_FUNC bool (isinf) (const T &x) { return internal::isinf_impl(x); }
template<typename T> EIGEN_DEVICE_FUNC bool (isfinite)(const T &x) { return internal::isfinite_impl(x); }
-#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float isnan(float x) { return cl::sycl::isnan(x); }
-EIGEN_ALWAYS_INLINE double isnan(double x) { return cl::sycl::isnan(x); }
-EIGEN_ALWAYS_INLINE float isinf(float x) { return cl::sycl::isinf(x); }
-EIGEN_ALWAYS_INLINE double isinf(double x) { return cl::sycl::isinf(x); }
-EIGEN_ALWAYS_INLINE float isfinite(float x) { return cl::sycl::isfinite(x); }
-EIGEN_ALWAYS_INLINE double isfinite(double x) { return cl::sycl::isfinite(x); }
-#endif // defined(__SYCL_DEVICE_ONLY__)
+#if defined(SYCL_DEVICE_ONLY)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY_FUNC_RET_TYPE(isnan, isnan, bool)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY_FUNC_RET_TYPE(isinf, isinf, bool)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY_FUNC_RET_TYPE(isfinite, isfinite, bool)
+#endif
+
+template<typename Scalar>
+EIGEN_DEVICE_FUNC
+inline EIGEN_MATHFUNC_RETVAL(rint, Scalar) rint(const Scalar& x)
+{
+ return EIGEN_MATHFUNC_IMPL(rint, Scalar)::run(x);
+}
template<typename Scalar>
EIGEN_DEVICE_FUNC
@@ -1128,25 +1391,23 @@ inline EIGEN_MATHFUNC_RETVAL(round, Scalar) round(const Scalar& x)
return EIGEN_MATHFUNC_IMPL(round, Scalar)::run(x);
}
-#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float round(float x) { return cl::sycl::round(x); }
-EIGEN_ALWAYS_INLINE double round(double x) { return cl::sycl::round(x); }
-#endif // defined(__SYCL_DEVICE_ONLY__)
+#if defined(SYCL_DEVICE_ONLY)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(round, round)
+#endif
template<typename T>
EIGEN_DEVICE_FUNC
T (floor)(const T& x)
{
- EIGEN_USING_STD_MATH(floor);
+ EIGEN_USING_STD(floor)
return floor(x);
}
-#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float floor(float x) { return cl::sycl::floor(x); }
-EIGEN_ALWAYS_INLINE double floor(double x) { return cl::sycl::floor(x); }
-#endif // defined(__SYCL_DEVICE_ONLY__)
+#if defined(SYCL_DEVICE_ONLY)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(floor, floor)
+#endif
-#ifdef EIGEN_CUDACC
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float floor(const float &x) { return ::floorf(x); }
@@ -1158,16 +1419,15 @@ template<typename T>
EIGEN_DEVICE_FUNC
T (ceil)(const T& x)
{
- EIGEN_USING_STD_MATH(ceil);
+ EIGEN_USING_STD(ceil);
return ceil(x);
}
-#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float ceil(float x) { return cl::sycl::ceil(x); }
-EIGEN_ALWAYS_INLINE double ceil(double x) { return cl::sycl::ceil(x); }
-#endif // defined(__SYCL_DEVICE_ONLY__)
+#if defined(SYCL_DEVICE_ONLY)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(ceil, ceil)
+#endif
-#ifdef EIGEN_CUDACC
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float ceil(const float &x) { return ::ceilf(x); }
@@ -1193,39 +1453,49 @@ inline int log2(int x)
/** \returns the square root of \a x.
*
- * It is essentially equivalent to \code using std::sqrt; return sqrt(x); \endcode,
+ * It is essentially equivalent to
+ * \code using std::sqrt; return sqrt(x); \endcode
* but slightly faster for float/double and some compilers (e.g., gcc), thanks to
* specializations when SSE is enabled.
*
* It's usage is justified in performance critical functions, like norm/normalize.
*/
+template<typename Scalar>
+EIGEN_DEVICE_FUNC
+EIGEN_ALWAYS_INLINE EIGEN_MATHFUNC_RETVAL(sqrt, Scalar) sqrt(const Scalar& x)
+{
+ return EIGEN_MATHFUNC_IMPL(sqrt, Scalar)::run(x);
+}
+
+// Boolean specialization, avoids implicit float to bool conversion (-Wimplicit-conversion-floating-point-to-bool).
+template<>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_DEVICE_FUNC
+bool sqrt<bool>(const bool &x) { return x; }
+
+#if defined(SYCL_DEVICE_ONLY)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(sqrt, sqrt)
+#endif
+
+/** \returns the reciprocal square root of \a x. **/
template<typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
-T sqrt(const T &x)
+T rsqrt(const T& x)
{
- EIGEN_USING_STD_MATH(sqrt);
- return sqrt(x);
+ return internal::rsqrt_impl<T>::run(x);
}
-#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float sqrt(float x) { return cl::sycl::sqrt(x); }
-EIGEN_ALWAYS_INLINE double sqrt(double x) { return cl::sycl::sqrt(x); }
-#endif // defined(__SYCL_DEVICE_ONLY__)
-
template<typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
T log(const T &x) {
- EIGEN_USING_STD_MATH(log);
- return log(x);
+ return internal::log_impl<T>::run(x);
}
-#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float log(float x) { return cl::sycl::log(x); }
-EIGEN_ALWAYS_INLINE double log(double x) { return cl::sycl::log(x); }
-#endif // defined(__SYCL_DEVICE_ONLY__)
+#if defined(SYCL_DEVICE_ONLY)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(log, log)
+#endif
-#ifdef EIGEN_CUDACC
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float log(const float &x) { return ::logf(x); }
@@ -1237,7 +1507,7 @@ template<typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
typename internal::enable_if<NumTraits<T>::IsSigned || NumTraits<T>::IsComplex,typename NumTraits<T>::Real>::type
abs(const T &x) {
- EIGEN_USING_STD_MATH(abs);
+ EIGEN_USING_STD(abs);
return abs(x);
}
@@ -1248,12 +1518,12 @@ abs(const T &x) {
return x;
}
-#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float abs(float x) { return cl::sycl::fabs(x); }
-EIGEN_ALWAYS_INLINE double abs(double x) { return cl::sycl::fabs(x); }
-#endif // defined(__SYCL_DEVICE_ONLY__)
+#if defined(SYCL_DEVICE_ONLY)
+SYCL_SPECIALIZE_INTEGER_TYPES_UNARY(abs, abs)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(abs, fabs)
+#endif
-#ifdef EIGEN_CUDACC
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float abs(const float &x) { return ::fabsf(x); }
@@ -1274,21 +1544,36 @@ double abs(const std::complex<double>& x) {
template<typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
T exp(const T &x) {
- EIGEN_USING_STD_MATH(exp);
+ EIGEN_USING_STD(exp);
return exp(x);
}
-#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float exp(float x) { return cl::sycl::exp(x); }
-EIGEN_ALWAYS_INLINE double exp(double x) { return cl::sycl::exp(x); }
-#endif // defined(__SYCL_DEVICE_ONLY__)
+#if defined(SYCL_DEVICE_ONLY)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(exp, exp)
+#endif
-#ifdef EIGEN_CUDACC
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float exp(const float &x) { return ::expf(x); }
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
double exp(const double &x) { return ::exp(x); }
+
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+std::complex<float> exp(const std::complex<float>& x) {
+ float com = ::expf(x.real());
+ float res_real = com * ::cosf(x.imag());
+ float res_imag = com * ::sinf(x.imag());
+ return std::complex<float>(res_real, res_imag);
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+std::complex<double> exp(const std::complex<double>& x) {
+ double com = ::exp(x.real());
+ double res_real = com * ::cos(x.imag());
+ double res_imag = com * ::sin(x.imag());
+ return std::complex<double>(res_real, res_imag);
+}
#endif
template<typename Scalar>
@@ -1298,12 +1583,11 @@ inline EIGEN_MATHFUNC_RETVAL(expm1, Scalar) expm1(const Scalar& x)
return EIGEN_MATHFUNC_IMPL(expm1, Scalar)::run(x);
}
-#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float expm1(float x) { return cl::sycl::expm1(x); }
-EIGEN_ALWAYS_INLINE double expm1(double x) { return cl::sycl::expm1(x); }
-#endif // defined(__SYCL_DEVICE_ONLY__)
+#if defined(SYCL_DEVICE_ONLY)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(expm1, expm1)
+#endif
-#ifdef EIGEN_CUDACC
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float expm1(const float &x) { return ::expm1f(x); }
@@ -1314,16 +1598,15 @@ double expm1(const double &x) { return ::expm1(x); }
template<typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
T cos(const T &x) {
- EIGEN_USING_STD_MATH(cos);
+ EIGEN_USING_STD(cos);
return cos(x);
}
-#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float cos(float x) { return cl::sycl::cos(x); }
-EIGEN_ALWAYS_INLINE double cos(double x) { return cl::sycl::cos(x); }
-#endif // defined(__SYCL_DEVICE_ONLY__)
+#if defined(SYCL_DEVICE_ONLY)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(cos,cos)
+#endif
-#ifdef EIGEN_CUDACC
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float cos(const float &x) { return ::cosf(x); }
@@ -1334,16 +1617,15 @@ double cos(const double &x) { return ::cos(x); }
template<typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
T sin(const T &x) {
- EIGEN_USING_STD_MATH(sin);
+ EIGEN_USING_STD(sin);
return sin(x);
}
-#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float sin(float x) { return cl::sycl::sin(x); }
-EIGEN_ALWAYS_INLINE double sin(double x) { return cl::sycl::sin(x); }
-#endif // defined(__SYCL_DEVICE_ONLY__)
+#if defined(SYCL_DEVICE_ONLY)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(sin, sin)
+#endif
-#ifdef EIGEN_CUDACC
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float sin(const float &x) { return ::sinf(x); }
@@ -1354,16 +1636,15 @@ double sin(const double &x) { return ::sin(x); }
template<typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
T tan(const T &x) {
- EIGEN_USING_STD_MATH(tan);
+ EIGEN_USING_STD(tan);
return tan(x);
}
-#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float tan(float x) { return cl::sycl::tan(x); }
-EIGEN_ALWAYS_INLINE double tan(double x) { return cl::sycl::tan(x); }
-#endif // defined(__SYCL_DEVICE_ONLY__)
+#if defined(SYCL_DEVICE_ONLY)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(tan, tan)
+#endif
-#ifdef EIGEN_CUDACC
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float tan(const float &x) { return ::tanf(x); }
@@ -1374,7 +1655,7 @@ double tan(const double &x) { return ::tan(x); }
template<typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
T acos(const T &x) {
- EIGEN_USING_STD_MATH(acos);
+ EIGEN_USING_STD(acos);
return acos(x);
}
@@ -1382,19 +1663,17 @@ T acos(const T &x) {
template<typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
T acosh(const T &x) {
- EIGEN_USING_STD_MATH(acosh);
- return acosh(x);
+ EIGEN_USING_STD(acosh);
+ return static_cast<T>(acosh(x));
}
#endif
-#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float acos(float x) { return cl::sycl::acos(x); }
-EIGEN_ALWAYS_INLINE double acos(double x) { return cl::sycl::acos(x); }
-EIGEN_ALWAYS_INLINE float acosh(float x) { return cl::sycl::acosh(x); }
-EIGEN_ALWAYS_INLINE double acosh(double x) { return cl::sycl::acosh(x); }
-#endif // defined(__SYCL_DEVICE_ONLY__)
+#if defined(SYCL_DEVICE_ONLY)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(acos, acos)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(acosh, acosh)
+#endif
-#ifdef EIGEN_CUDACC
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float acos(const float &x) { return ::acosf(x); }
@@ -1405,7 +1684,7 @@ double acos(const double &x) { return ::acos(x); }
template<typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
T asin(const T &x) {
- EIGEN_USING_STD_MATH(asin);
+ EIGEN_USING_STD(asin);
return asin(x);
}
@@ -1413,19 +1692,17 @@ T asin(const T &x) {
template<typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
T asinh(const T &x) {
- EIGEN_USING_STD_MATH(asinh);
- return asinh(x);
+ EIGEN_USING_STD(asinh);
+ return static_cast<T>(asinh(x));
}
#endif
-#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float asin(float x) { return cl::sycl::asin(x); }
-EIGEN_ALWAYS_INLINE double asin(double x) { return cl::sycl::asin(x); }
-EIGEN_ALWAYS_INLINE float asinh(float x) { return cl::sycl::asinh(x); }
-EIGEN_ALWAYS_INLINE double asinh(double x) { return cl::sycl::asinh(x); }
-#endif // defined(__SYCL_DEVICE_ONLY__)
+#if defined(SYCL_DEVICE_ONLY)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(asin, asin)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(asinh, asinh)
+#endif
-#ifdef EIGEN_CUDACC
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float asin(const float &x) { return ::asinf(x); }
@@ -1436,27 +1713,25 @@ double asin(const double &x) { return ::asin(x); }
template<typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
T atan(const T &x) {
- EIGEN_USING_STD_MATH(atan);
- return atan(x);
+ EIGEN_USING_STD(atan);
+ return static_cast<T>(atan(x));
}
#if EIGEN_HAS_CXX11_MATH
template<typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
T atanh(const T &x) {
- EIGEN_USING_STD_MATH(atanh);
- return atanh(x);
+ EIGEN_USING_STD(atanh);
+ return static_cast<T>(atanh(x));
}
#endif
-#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float atan(float x) { return cl::sycl::atan(x); }
-EIGEN_ALWAYS_INLINE double atan(double x) { return cl::sycl::atan(x); }
-EIGEN_ALWAYS_INLINE float atanh(float x) { return cl::sycl::atanh(x); }
-EIGEN_ALWAYS_INLINE double atanh(double x) { return cl::sycl::atanh(x); }
-#endif // defined(__SYCL_DEVICE_ONLY__)
+#if defined(SYCL_DEVICE_ONLY)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(atan, atan)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(atanh, atanh)
+#endif
-#ifdef EIGEN_CUDACC
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float atan(const float &x) { return ::atanf(x); }
@@ -1468,16 +1743,15 @@ double atan(const double &x) { return ::atan(x); }
template<typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
T cosh(const T &x) {
- EIGEN_USING_STD_MATH(cosh);
- return cosh(x);
+ EIGEN_USING_STD(cosh);
+ return static_cast<T>(cosh(x));
}
-#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float cosh(float x) { return cl::sycl::cosh(x); }
-EIGEN_ALWAYS_INLINE double cosh(double x) { return cl::sycl::cosh(x); }
-#endif // defined(__SYCL_DEVICE_ONLY__)
+#if defined(SYCL_DEVICE_ONLY)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(cosh, cosh)
+#endif
-#ifdef EIGEN_CUDACC
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float cosh(const float &x) { return ::coshf(x); }
@@ -1488,16 +1762,15 @@ double cosh(const double &x) { return ::cosh(x); }
template<typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
T sinh(const T &x) {
- EIGEN_USING_STD_MATH(sinh);
- return sinh(x);
+ EIGEN_USING_STD(sinh);
+ return static_cast<T>(sinh(x));
}
-#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float sinh(float x) { return cl::sycl::sinh(x); }
-EIGEN_ALWAYS_INLINE double sinh(double x) { return cl::sycl::sinh(x); }
-#endif // defined(__SYCL_DEVICE_ONLY__)
+#if defined(SYCL_DEVICE_ONLY)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(sinh, sinh)
+#endif
-#ifdef EIGEN_CUDACC
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float sinh(const float &x) { return ::sinhf(x); }
@@ -1508,19 +1781,20 @@ double sinh(const double &x) { return ::sinh(x); }
template<typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
T tanh(const T &x) {
- EIGEN_USING_STD_MATH(tanh);
+ EIGEN_USING_STD(tanh);
return tanh(x);
}
-#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float tanh(float x) { return cl::sycl::tanh(x); }
-EIGEN_ALWAYS_INLINE double tanh(double x) { return cl::sycl::tanh(x); }
-#elif (!defined(EIGEN_CUDACC)) && EIGEN_FAST_MATH
+#if (!defined(EIGEN_GPUCC)) && EIGEN_FAST_MATH && !defined(SYCL_DEVICE_ONLY)
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float tanh(float x) { return internal::generic_fast_tanh_float(x); }
#endif
-#ifdef EIGEN_CUDACC
+#if defined(SYCL_DEVICE_ONLY)
+SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(tanh, tanh)
+#endif
+
+#if defined(EIGEN_GPUCC)
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float tanh(const float &x) { return ::tanhf(x); }
@@ -1531,16 +1805,15 @@ double tanh(const double &x) { return ::tanh(x); }
template <typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
T fmod(const T& a, const T& b) {
- EIGEN_USING_STD_MATH(fmod);
+ EIGEN_USING_STD(fmod);
return fmod(a, b);
}
-#if defined(__SYCL_DEVICE_ONLY__)
-EIGEN_ALWAYS_INLINE float fmod(float x, float y) { return cl::sycl::fmod(x, y); }
-EIGEN_ALWAYS_INLINE double fmod(double x, double y) { return cl::sycl::fmod(x, y); }
-#endif // defined(__SYCL_DEVICE_ONLY__)
+#if defined(SYCL_DEVICE_ONLY)
+SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(fmod, fmod)
+#endif
-#ifdef EIGEN_CUDACC
+#if defined(EIGEN_GPUCC)
template <>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float fmod(const float& a, const float& b) {
@@ -1554,6 +1827,23 @@ double fmod(const double& a, const double& b) {
}
#endif
+#if defined(SYCL_DEVICE_ONLY)
+#undef SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_BINARY
+#undef SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_UNARY
+#undef SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_BINARY
+#undef SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_UNARY
+#undef SYCL_SPECIALIZE_INTEGER_TYPES_BINARY
+#undef SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_UNARY
+#undef SYCL_SPECIALIZE_FLOATING_TYPES_BINARY
+#undef SYCL_SPECIALIZE_FLOATING_TYPES_UNARY
+#undef SYCL_SPECIALIZE_FLOATING_TYPES_UNARY_FUNC_RET_TYPE
+#undef SYCL_SPECIALIZE_GEN_UNARY_FUNC
+#undef SYCL_SPECIALIZE_UNARY_FUNC
+#undef SYCL_SPECIALIZE_GEN1_BINARY_FUNC
+#undef SYCL_SPECIALIZE_GEN2_BINARY_FUNC
+#undef SYCL_SPECIALIZE_BINARY_FUNC
+#endif
+
} // end namespace numext
namespace internal {
@@ -1677,6 +1967,11 @@ template<> struct random_impl<bool>
{
return random<int>(0,1)==0 ? false : true;
}
+
+ static inline bool run(const bool& a, const bool& b)
+ {
+ return random<int>(a, b)==0 ? false : true;
+ }
};
template<> struct scalar_fuzzy_impl<bool>
@@ -1703,6 +1998,57 @@ template<> struct scalar_fuzzy_impl<bool>
};
+} // end namespace internal
+
+// Default implementations that rely on other numext implementations
+namespace internal {
+
+// Specialization for complex types that are not supported by std::expm1.
+template <typename RealScalar>
+struct expm1_impl<std::complex<RealScalar> > {
+ EIGEN_DEVICE_FUNC static inline std::complex<RealScalar> run(
+ const std::complex<RealScalar>& x) {
+ EIGEN_STATIC_ASSERT_NON_INTEGER(RealScalar)
+ RealScalar xr = x.real();
+ RealScalar xi = x.imag();
+ // expm1(z) = exp(z) - 1
+ // = exp(x + i * y) - 1
+ // = exp(x) * (cos(y) + i * sin(y)) - 1
+ // = exp(x) * cos(y) - 1 + i * exp(x) * sin(y)
+ // Imag(expm1(z)) = exp(x) * sin(y)
+ // Real(expm1(z)) = exp(x) * cos(y) - 1
+ // = exp(x) * cos(y) - 1.
+ // = expm1(x) + exp(x) * (cos(y) - 1)
+ // = expm1(x) + exp(x) * (2 * sin(y / 2) ** 2)
+ RealScalar erm1 = numext::expm1<RealScalar>(xr);
+ RealScalar er = erm1 + RealScalar(1.);
+ RealScalar sin2 = numext::sin(xi / RealScalar(2.));
+ sin2 = sin2 * sin2;
+ RealScalar s = numext::sin(xi);
+ RealScalar real_part = erm1 - RealScalar(2.) * er * sin2;
+ return std::complex<RealScalar>(real_part, er * s);
+ }
+};
+
+template<typename T>
+struct rsqrt_impl {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_ALWAYS_INLINE T run(const T& x) {
+ return T(1)/numext::sqrt(x);
+ }
+};
+
+#if defined(EIGEN_GPU_COMPILE_PHASE)
+template<typename T>
+struct conj_impl<std::complex<T>, true>
+{
+ EIGEN_DEVICE_FUNC
+ static inline std::complex<T> run(const std::complex<T>& x)
+ {
+ return std::complex<T>(numext::real(x), -numext::imag(x));
+ }
+};
+#endif
} // end namespace internal
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/MathFunctionsImpl.h b/examples/ThirdPartyLibs/Eigen/src/Core/MathFunctionsImpl.h
index ae1386b4c..4eaaaa784 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/MathFunctionsImpl.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/MathFunctionsImpl.h
@@ -17,19 +17,28 @@ namespace internal {
/** \internal \returns the hyperbolic tan of \a a (coeff-wise)
Doesn't do anything fancy, just a 13/6-degree rational interpolant which
- is accurate up to a couple of ulp in the range [-9, 9], outside of which
- the tanh(x) = +/-1.
+ is accurate up to a couple of ulps in the (approximate) range [-8, 8],
+ outside of which tanh(x) = +/-1 in single precision. The input is clamped
+ to the range [-c, c]. The value c is chosen as the smallest value where
+ the approximation evaluates to exactly 1. In the reange [-0.0004, 0.0004]
+ the approxmation tanh(x) ~= x is used for better accuracy as x tends to zero.
This implementation works on both scalars and packets.
*/
template<typename T>
T generic_fast_tanh_float(const T& a_x)
{
- // Clamp the inputs to the range [-9, 9] since anything outside
- // this range is +/-1.0f in single-precision.
- const T plus_9 = pset1<T>(9.f);
- const T minus_9 = pset1<T>(-9.f);
- const T x = pmax(pmin(a_x, plus_9), minus_9);
+ // Clamp the inputs to the range [-c, c]
+#ifdef EIGEN_VECTORIZE_FMA
+ const T plus_clamp = pset1<T>(7.99881172180175781f);
+ const T minus_clamp = pset1<T>(-7.99881172180175781f);
+#else
+ const T plus_clamp = pset1<T>(7.90531110763549805f);
+ const T minus_clamp = pset1<T>(-7.90531110763549805f);
+#endif
+ const T tiny = pset1<T>(0.0004f);
+ const T x = pmax(pmin(a_x, plus_clamp), minus_clamp);
+ const T tiny_mask = pcmp_lt(pabs(a_x), tiny);
// The monomial coefficients of the numerator polynomial (odd).
const T alpha_1 = pset1<T>(4.89352455891786e-03f);
const T alpha_3 = pset1<T>(6.37261928875436e-04f);
@@ -57,13 +66,131 @@ T generic_fast_tanh_float(const T& a_x)
p = pmadd(x2, p, alpha_1);
p = pmul(x, p);
- // Evaluate the denominator polynomial p.
+ // Evaluate the denominator polynomial q.
T q = pmadd(x2, beta_6, beta_4);
q = pmadd(x2, q, beta_2);
q = pmadd(x2, q, beta_0);
// Divide the numerator by the denominator.
- return pdiv(p, q);
+ return pselect(tiny_mask, x, pdiv(p, q));
+}
+
+template<typename RealScalar>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+RealScalar positive_real_hypot(const RealScalar& x, const RealScalar& y)
+{
+ // IEEE IEC 6059 special cases.
+ if ((numext::isinf)(x) || (numext::isinf)(y))
+ return NumTraits<RealScalar>::infinity();
+ if ((numext::isnan)(x) || (numext::isnan)(y))
+ return NumTraits<RealScalar>::quiet_NaN();
+
+ EIGEN_USING_STD(sqrt);
+ RealScalar p, qp;
+ p = numext::maxi(x,y);
+ if(p==RealScalar(0)) return RealScalar(0);
+ qp = numext::mini(y,x) / p;
+ return p * sqrt(RealScalar(1) + qp*qp);
+}
+
+template<typename Scalar>
+struct hypot_impl
+{
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ static EIGEN_DEVICE_FUNC
+ inline RealScalar run(const Scalar& x, const Scalar& y)
+ {
+ EIGEN_USING_STD(abs);
+ return positive_real_hypot<RealScalar>(abs(x), abs(y));
+ }
+};
+
+// Generic complex sqrt implementation that correctly handles corner cases
+// according to https://en.cppreference.com/w/cpp/numeric/complex/sqrt
+template<typename T>
+EIGEN_DEVICE_FUNC std::complex<T> complex_sqrt(const std::complex<T>& z) {
+ // Computes the principal sqrt of the input.
+ //
+ // For a complex square root of the number x + i*y. We want to find real
+ // numbers u and v such that
+ // (u + i*v)^2 = x + i*y <=>
+ // u^2 - v^2 + i*2*u*v = x + i*v.
+ // By equating the real and imaginary parts we get:
+ // u^2 - v^2 = x
+ // 2*u*v = y.
+ //
+ // For x >= 0, this has the numerically stable solution
+ // u = sqrt(0.5 * (x + sqrt(x^2 + y^2)))
+ // v = y / (2 * u)
+ // and for x < 0,
+ // v = sign(y) * sqrt(0.5 * (-x + sqrt(x^2 + y^2)))
+ // u = y / (2 * v)
+ //
+ // Letting w = sqrt(0.5 * (|x| + |z|)),
+ // if x == 0: u = w, v = sign(y) * w
+ // if x > 0: u = w, v = y / (2 * w)
+ // if x < 0: u = |y| / (2 * w), v = sign(y) * w
+
+ const T x = numext::real(z);
+ const T y = numext::imag(z);
+ const T zero = T(0);
+ const T w = numext::sqrt(T(0.5) * (numext::abs(x) + numext::hypot(x, y)));
+
+ return
+ (numext::isinf)(y) ? std::complex<T>(NumTraits<T>::infinity(), y)
+ : x == zero ? std::complex<T>(w, y < zero ? -w : w)
+ : x > zero ? std::complex<T>(w, y / (2 * w))
+ : std::complex<T>(numext::abs(y) / (2 * w), y < zero ? -w : w );
+}
+
+// Generic complex rsqrt implementation.
+template<typename T>
+EIGEN_DEVICE_FUNC std::complex<T> complex_rsqrt(const std::complex<T>& z) {
+ // Computes the principal reciprocal sqrt of the input.
+ //
+ // For a complex reciprocal square root of the number z = x + i*y. We want to
+ // find real numbers u and v such that
+ // (u + i*v)^2 = 1 / (x + i*y) <=>
+ // u^2 - v^2 + i*2*u*v = x/|z|^2 - i*v/|z|^2.
+ // By equating the real and imaginary parts we get:
+ // u^2 - v^2 = x/|z|^2
+ // 2*u*v = y/|z|^2.
+ //
+ // For x >= 0, this has the numerically stable solution
+ // u = sqrt(0.5 * (x + |z|)) / |z|
+ // v = -y / (2 * u * |z|)
+ // and for x < 0,
+ // v = -sign(y) * sqrt(0.5 * (-x + |z|)) / |z|
+ // u = -y / (2 * v * |z|)
+ //
+ // Letting w = sqrt(0.5 * (|x| + |z|)),
+ // if x == 0: u = w / |z|, v = -sign(y) * w / |z|
+ // if x > 0: u = w / |z|, v = -y / (2 * w * |z|)
+ // if x < 0: u = |y| / (2 * w * |z|), v = -sign(y) * w / |z|
+
+ const T x = numext::real(z);
+ const T y = numext::imag(z);
+ const T zero = T(0);
+
+ const T abs_z = numext::hypot(x, y);
+ const T w = numext::sqrt(T(0.5) * (numext::abs(x) + abs_z));
+ const T woz = w / abs_z;
+ // Corner cases consistent with 1/sqrt(z) on gcc/clang.
+ return
+ abs_z == zero ? std::complex<T>(NumTraits<T>::infinity(), NumTraits<T>::quiet_NaN())
+ : ((numext::isinf)(x) || (numext::isinf)(y)) ? std::complex<T>(zero, zero)
+ : x == zero ? std::complex<T>(woz, y < zero ? woz : -woz)
+ : x > zero ? std::complex<T>(woz, -y / (2 * w * abs_z))
+ : std::complex<T>(numext::abs(y) / (2 * w * abs_z), y < zero ? woz : -woz );
+}
+
+template<typename T>
+EIGEN_DEVICE_FUNC std::complex<T> complex_log(const std::complex<T>& z) {
+ // Computes complex log.
+ T a = numext::abs(z);
+ EIGEN_USING_STD(atan2);
+ T b = atan2(z.imag(), z.real());
+ return std::complex<T>(numext::log(a), b);
}
} // end namespace internal
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/Matrix.h b/examples/ThirdPartyLibs/Eigen/src/Core/Matrix.h
index 90c336d8c..f0e59a911 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/Matrix.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/Matrix.h
@@ -29,7 +29,7 @@ private:
required_alignment = unpacket_traits<PacketScalar>::alignment,
packet_access_bit = (packet_traits<_Scalar>::Vectorizable && (EIGEN_UNALIGNED_VECTORIZE || (actual_alignment>=required_alignment))) ? PacketAccessBit : 0
};
-
+
public:
typedef _Scalar Scalar;
typedef Dense StorageKind;
@@ -44,7 +44,7 @@ public:
Options = _Options,
InnerStrideAtCompileTime = 1,
OuterStrideAtCompileTime = (Options&RowMajor) ? ColsAtCompileTime : RowsAtCompileTime,
-
+
// FIXME, the following flag in only used to define NeedsToAlign in PlainObjectBase
EvaluatorFlags = LinearAccessBit | DirectAccessBit | packet_access_bit | row_major_bit,
Alignment = actual_alignment
@@ -255,55 +255,93 @@ class Matrix
*
* \sa resize(Index,Index)
*/
- EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE Matrix() : Base()
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Matrix() : Base()
{
Base::_check_template_params();
EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
}
// FIXME is it still needed
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
explicit Matrix(internal::constructor_without_unaligned_array_assert)
: Base(internal::constructor_without_unaligned_array_assert())
{ Base::_check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED }
#if EIGEN_HAS_RVALUE_REFERENCES
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Matrix(Matrix&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_constructible<Scalar>::value)
: Base(std::move(other))
{
Base::_check_template_params();
- if (RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic)
- Base::_set_noalias(other);
}
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Matrix& operator=(Matrix&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_assignable<Scalar>::value)
{
- other.swap(*this);
+ Base::operator=(std::move(other));
return *this;
}
#endif
- #ifndef EIGEN_PARSED_BY_DOXYGEN
+#if EIGEN_HAS_CXX11
+ /** \copydoc PlainObjectBase(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&... args)
+ *
+ * Example: \include Matrix_variadic_ctor_cxx11.cpp
+ * Output: \verbinclude Matrix_variadic_ctor_cxx11.out
+ *
+ * \sa Matrix(const std::initializer_list<std::initializer_list<Scalar>>&)
+ */
+ template <typename... ArgTypes>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Matrix(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args)
+ : Base(a0, a1, a2, a3, args...) {}
+
+ /** \brief Constructs a Matrix and initializes it from the coefficients given as initializer-lists grouped by row. \cpp11
+ *
+ * In the general case, the constructor takes a list of rows, each row being represented as a list of coefficients:
+ *
+ * Example: \include Matrix_initializer_list_23_cxx11.cpp
+ * Output: \verbinclude Matrix_initializer_list_23_cxx11.out
+ *
+ * Each of the inner initializer lists must contain the exact same number of elements, otherwise an assertion is triggered.
+ *
+ * In the case of a compile-time column vector, implicit transposition from a single row is allowed.
+ * Therefore <code>VectorXd{{1,2,3,4,5}}</code> is legal and the more verbose syntax
+ * <code>RowVectorXd{{1},{2},{3},{4},{5}}</code> can be avoided:
+ *
+ * Example: \include Matrix_initializer_list_vector_cxx11.cpp
+ * Output: \verbinclude Matrix_initializer_list_vector_cxx11.out
+ *
+ * In the case of fixed-sized matrices, the initializer list sizes must exactly match the matrix sizes,
+ * and implicit transposition is allowed for compile-time vectors only.
+ *
+ * \sa Matrix(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args)
+ */
+ EIGEN_DEVICE_FUNC
+ explicit EIGEN_STRONG_INLINE Matrix(const std::initializer_list<std::initializer_list<Scalar>>& list) : Base(list) {}
+#endif // end EIGEN_HAS_CXX11
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
// This constructor is for both 1x1 matrices and dynamic vectors
template<typename T>
- EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE explicit Matrix(const T& x)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit Matrix(const T& x)
{
Base::_check_template_params();
Base::template _init1<T>(x);
}
template<typename T0, typename T1>
- EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE Matrix(const T0& x, const T1& y)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Matrix(const T0& x, const T1& y)
{
Base::_check_template_params();
Base::template _init2<T0,T1>(x, y);
}
- #else
+
+
+#else
/** \brief Constructs a fixed-sized matrix initialized with coefficients starting at \a data */
EIGEN_DEVICE_FUNC
explicit Matrix(const Scalar *data);
@@ -313,7 +351,7 @@ class Matrix
* This is useful for dynamic-size vectors. For fixed-size vectors,
* it is redundant to pass these parameters, so one should use the default constructor
* Matrix() instead.
- *
+ *
* \warning This constructor is disabled for fixed-size \c 1x1 matrices. For instance,
* calling Matrix<double,1,1>(1) will call the initialization constructor: Matrix(const Scalar&).
* For fixed-size \c 1x1 matrices it is therefore recommended to use the default
@@ -321,14 +359,15 @@ class Matrix
* \c EIGEN_INITIALIZE_MATRICES_BY_{ZERO,\c NAN} macros (see \ref TopicPreprocessorDirectives).
*/
EIGEN_STRONG_INLINE explicit Matrix(Index dim);
- /** \brief Constructs an initialized 1x1 matrix with the given coefficient */
+ /** \brief Constructs an initialized 1x1 matrix with the given coefficient
+ * \sa Matrix(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...) */
Matrix(const Scalar& x);
/** \brief Constructs an uninitialized matrix with \a rows rows and \a cols columns.
*
* This is useful for dynamic-size matrices. For fixed-size matrices,
* it is redundant to pass these parameters, so one should use the default constructor
* Matrix() instead.
- *
+ *
* \warning This constructor is disabled for fixed-size \c 1x2 and \c 2x1 vectors. For instance,
* calling Matrix2f(2,1) will call the initialization constructor: Matrix(const Scalar& x, const Scalar& y).
* For fixed-size \c 1x2 or \c 2x1 vectors it is therefore recommended to use the default
@@ -337,12 +376,15 @@ class Matrix
*/
EIGEN_DEVICE_FUNC
Matrix(Index rows, Index cols);
-
- /** \brief Constructs an initialized 2D vector with given coefficients */
+
+ /** \brief Constructs an initialized 2D vector with given coefficients
+ * \sa Matrix(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...) */
Matrix(const Scalar& x, const Scalar& y);
- #endif
+ #endif // end EIGEN_PARSED_BY_DOXYGEN
- /** \brief Constructs an initialized 3D vector with given coefficients */
+ /** \brief Constructs an initialized 3D vector with given coefficients
+ * \sa Matrix(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...)
+ */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z)
{
@@ -352,7 +394,9 @@ class Matrix
m_storage.data()[1] = y;
m_storage.data()[2] = z;
}
- /** \brief Constructs an initialized 4D vector with given coefficients */
+ /** \brief Constructs an initialized 4D vector with given coefficients
+ * \sa Matrix(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...)
+ */
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z, const Scalar& w)
{
@@ -379,8 +423,10 @@ class Matrix
: Base(other.derived())
{ }
- EIGEN_DEVICE_FUNC inline Index innerStride() const { return 1; }
- EIGEN_DEVICE_FUNC inline Index outerStride() const { return this->innerSize(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index innerStride() const EIGEN_NOEXCEPT { return 1; }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index outerStride() const EIGEN_NOEXCEPT { return this->innerSize(); }
/////////// Geometry module ///////////
@@ -407,7 +453,7 @@ class Matrix
*
* \ingroup Core_Module
*
- * Eigen defines several typedef shortcuts for most common matrix and vector types.
+ * %Eigen defines several typedef shortcuts for most common matrix and vector types.
*
* The general patterns are the following:
*
@@ -420,6 +466,15 @@ class Matrix
* There are also \c VectorSizeType and \c RowVectorSizeType which are self-explanatory. For example, \c Vector4cf is
* a fixed-size vector of 4 complex floats.
*
+ * With \cpp11, template alias are also defined for common sizes.
+ * They follow the same pattern as above except that the scalar type suffix is replaced by a
+ * template parameter, i.e.:
+ * - `MatrixSize<Type>` where `Size` can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size.
+ * - `MatrixXSize<Type>` and `MatrixSizeX<Type>` where `Size` can be \c 2,\c 3,\c 4 for hybrid dynamic/fixed matrices.
+ * - `VectorSize<Type>` and `RowVectorSize<Type>` for column and row vectors.
+ *
+ * With \cpp11, you can also use fully generic column and row vector types: `Vector<Type,Size>` and `RowVector<Type,Size>`.
+ *
* \sa class Matrix
*/
@@ -456,6 +511,55 @@ EIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex<double>, cd)
#undef EIGEN_MAKE_TYPEDEFS
#undef EIGEN_MAKE_FIXED_TYPEDEFS
+#if EIGEN_HAS_CXX11
+
+#define EIGEN_MAKE_TYPEDEFS(Size, SizeSuffix) \
+/** \ingroup matrixtypedefs */ \
+/** \brief \cpp11 */ \
+template <typename Type> \
+using Matrix##SizeSuffix = Matrix<Type, Size, Size>; \
+/** \ingroup matrixtypedefs */ \
+/** \brief \cpp11 */ \
+template <typename Type> \
+using Vector##SizeSuffix = Matrix<Type, Size, 1>; \
+/** \ingroup matrixtypedefs */ \
+/** \brief \cpp11 */ \
+template <typename Type> \
+using RowVector##SizeSuffix = Matrix<Type, 1, Size>;
+
+#define EIGEN_MAKE_FIXED_TYPEDEFS(Size) \
+/** \ingroup matrixtypedefs */ \
+/** \brief \cpp11 */ \
+template <typename Type> \
+using Matrix##Size##X = Matrix<Type, Size, Dynamic>; \
+/** \ingroup matrixtypedefs */ \
+/** \brief \cpp11 */ \
+template <typename Type> \
+using Matrix##X##Size = Matrix<Type, Dynamic, Size>;
+
+EIGEN_MAKE_TYPEDEFS(2, 2)
+EIGEN_MAKE_TYPEDEFS(3, 3)
+EIGEN_MAKE_TYPEDEFS(4, 4)
+EIGEN_MAKE_TYPEDEFS(Dynamic, X)
+EIGEN_MAKE_FIXED_TYPEDEFS(2)
+EIGEN_MAKE_FIXED_TYPEDEFS(3)
+EIGEN_MAKE_FIXED_TYPEDEFS(4)
+
+/** \ingroup matrixtypedefs
+ * \brief \cpp11 */
+template <typename Type, int Size>
+using Vector = Matrix<Type, Size, 1>;
+
+/** \ingroup matrixtypedefs
+ * \brief \cpp11 */
+template <typename Type, int Size>
+using RowVector = Matrix<Type, 1, Size>;
+
+#undef EIGEN_MAKE_TYPEDEFS
+#undef EIGEN_MAKE_FIXED_TYPEDEFS
+
+#endif // EIGEN_HAS_CXX11
+
} // end namespace Eigen
#endif // EIGEN_MATRIX_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/MatrixBase.h b/examples/ThirdPartyLibs/Eigen/src/Core/MatrixBase.h
index 11435903b..45c3a596e 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/MatrixBase.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/MatrixBase.h
@@ -328,6 +328,7 @@ template<typename Derived> class MatrixBase
inline const PartialPivLU<PlainObject> lu() const;
+ EIGEN_DEVICE_FUNC
inline const Inverse<Derived> inverse() const;
template<typename ResultType>
@@ -337,12 +338,15 @@ template<typename Derived> class MatrixBase
bool& invertible,
const RealScalar& absDeterminantThreshold = NumTraits<Scalar>::dummy_precision()
) const;
+
template<typename ResultType>
inline void computeInverseWithCheck(
ResultType& inverse,
bool& invertible,
const RealScalar& absDeterminantThreshold = NumTraits<Scalar>::dummy_precision()
) const;
+
+ EIGEN_DEVICE_FUNC
Scalar determinant() const;
/////////// Cholesky module ///////////
@@ -414,15 +418,19 @@ template<typename Derived> class MatrixBase
////////// Householder module ///////////
+ EIGEN_DEVICE_FUNC
void makeHouseholderInPlace(Scalar& tau, RealScalar& beta);
template<typename EssentialPart>
+ EIGEN_DEVICE_FUNC
void makeHouseholder(EssentialPart& essential,
Scalar& tau, RealScalar& beta) const;
template<typename EssentialPart>
+ EIGEN_DEVICE_FUNC
void applyHouseholderOnTheLeft(const EssentialPart& essential,
const Scalar& tau,
Scalar* workspace);
template<typename EssentialPart>
+ EIGEN_DEVICE_FUNC
void applyHouseholderOnTheRight(const EssentialPart& essential,
const Scalar& tau,
Scalar* workspace);
@@ -448,19 +456,33 @@ template<typename Derived> class MatrixBase
///////// MatrixFunctions module /////////
typedef typename internal::stem_function<Scalar>::type StemFunction;
- const MatrixExponentialReturnValue<Derived> exp() const;
+#define EIGEN_MATRIX_FUNCTION(ReturnType, Name, Description) \
+ /** \returns an expression of the matrix Description of \c *this. \brief This function requires the <a href="unsupported/group__MatrixFunctions__Module.html"> unsupported MatrixFunctions module</a>. To compute the coefficient-wise Description use ArrayBase::##Name . */ \
+ const ReturnType<Derived> Name() const;
+#define EIGEN_MATRIX_FUNCTION_1(ReturnType, Name, Description, Argument) \
+ /** \returns an expression of the matrix Description of \c *this. \brief This function requires the <a href="unsupported/group__MatrixFunctions__Module.html"> unsupported MatrixFunctions module</a>. To compute the coefficient-wise Description use ArrayBase::##Name . */ \
+ const ReturnType<Derived> Name(Argument) const;
+
+ EIGEN_MATRIX_FUNCTION(MatrixExponentialReturnValue, exp, exponential)
+ /** \brief Helper function for the <a href="unsupported/group__MatrixFunctions__Module.html"> unsupported MatrixFunctions module</a>.*/
const MatrixFunctionReturnValue<Derived> matrixFunction(StemFunction f) const;
- const MatrixFunctionReturnValue<Derived> cosh() const;
- const MatrixFunctionReturnValue<Derived> sinh() const;
- const MatrixFunctionReturnValue<Derived> cos() const;
- const MatrixFunctionReturnValue<Derived> sin() const;
- const MatrixSquareRootReturnValue<Derived> sqrt() const;
- const MatrixLogarithmReturnValue<Derived> log() const;
- const MatrixPowerReturnValue<Derived> pow(const RealScalar& p) const;
- const MatrixComplexPowerReturnValue<Derived> pow(const std::complex<RealScalar>& p) const;
+ EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, cosh, hyperbolic cosine)
+ EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, sinh, hyperbolic sine)
+#if EIGEN_HAS_CXX11_MATH
+ EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, atanh, inverse hyperbolic cosine)
+ EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, acosh, inverse hyperbolic cosine)
+ EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, asinh, inverse hyperbolic sine)
+#endif
+ EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, cos, cosine)
+ EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, sin, sine)
+ EIGEN_MATRIX_FUNCTION(MatrixSquareRootReturnValue, sqrt, square root)
+ EIGEN_MATRIX_FUNCTION(MatrixLogarithmReturnValue, log, logarithm)
+ EIGEN_MATRIX_FUNCTION_1(MatrixPowerReturnValue, pow, power to \c p, const RealScalar& p)
+ EIGEN_MATRIX_FUNCTION_1(MatrixComplexPowerReturnValue, pow, power to \c p, const std::complex<RealScalar>& p)
protected:
- EIGEN_DEVICE_FUNC MatrixBase() : Base() {}
+ EIGEN_DEFAULT_COPY_CONSTRUCTOR(MatrixBase)
+ EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(MatrixBase)
private:
EIGEN_DEVICE_FUNC explicit MatrixBase(int);
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/NestByValue.h b/examples/ThirdPartyLibs/Eigen/src/Core/NestByValue.h
index 01cf192e9..b4275768a 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/NestByValue.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/NestByValue.h
@@ -16,7 +16,11 @@ namespace Eigen {
namespace internal {
template<typename ExpressionType>
struct traits<NestByValue<ExpressionType> > : public traits<ExpressionType>
-{};
+{
+ enum {
+ Flags = traits<ExpressionType>::Flags & ~NestByRefBit
+ };
+};
}
/** \class NestByValue
@@ -41,57 +45,13 @@ template<typename ExpressionType> class NestByValue
EIGEN_DEVICE_FUNC explicit inline NestByValue(const ExpressionType& matrix) : m_expression(matrix) {}
- EIGEN_DEVICE_FUNC inline Index rows() const { return m_expression.rows(); }
- EIGEN_DEVICE_FUNC inline Index cols() const { return m_expression.cols(); }
- EIGEN_DEVICE_FUNC inline Index outerStride() const { return m_expression.outerStride(); }
- EIGEN_DEVICE_FUNC inline Index innerStride() const { return m_expression.innerStride(); }
-
- EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index row, Index col) const
- {
- return m_expression.coeff(row, col);
- }
-
- EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index row, Index col)
- {
- return m_expression.const_cast_derived().coeffRef(row, col);
- }
-
- EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index index) const
- {
- return m_expression.coeff(index);
- }
-
- EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index index)
- {
- return m_expression.const_cast_derived().coeffRef(index);
- }
-
- template<int LoadMode>
- EIGEN_DEVICE_FUNC inline const PacketScalar packet(Index row, Index col) const
- {
- return m_expression.template packet<LoadMode>(row, col);
- }
-
- template<int LoadMode>
- EIGEN_DEVICE_FUNC inline void writePacket(Index row, Index col, const PacketScalar& x)
- {
- m_expression.const_cast_derived().template writePacket<LoadMode>(row, col, x);
- }
-
- template<int LoadMode>
- EIGEN_DEVICE_FUNC inline const PacketScalar packet(Index index) const
- {
- return m_expression.template packet<LoadMode>(index);
- }
-
- template<int LoadMode>
- EIGEN_DEVICE_FUNC inline void writePacket(Index index, const PacketScalar& x)
- {
- m_expression.const_cast_derived().template writePacket<LoadMode>(index, x);
- }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const EIGEN_NOEXCEPT { return m_expression.rows(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { return m_expression.cols(); }
EIGEN_DEVICE_FUNC operator const ExpressionType&() const { return m_expression; }
+ EIGEN_DEVICE_FUNC const ExpressionType& nestedExpression() const { return m_expression; }
+
protected:
const ExpressionType m_expression;
};
@@ -105,6 +65,21 @@ DenseBase<Derived>::nestByValue() const
return NestByValue<Derived>(derived());
}
+namespace internal {
+
+// Evaluator of Solve -> eval into a temporary
+template<typename ArgType>
+struct evaluator<NestByValue<ArgType> >
+ : public evaluator<ArgType>
+{
+ typedef evaluator<ArgType> Base;
+
+ EIGEN_DEVICE_FUNC explicit evaluator(const NestByValue<ArgType>& xpr)
+ : Base(xpr.nestedExpression())
+ {}
+};
+}
+
} // end namespace Eigen
#endif // EIGEN_NESTBYVALUE_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/NoAlias.h b/examples/ThirdPartyLibs/Eigen/src/Core/NoAlias.h
index e94c8ee96..570283d90 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/NoAlias.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/NoAlias.h
@@ -75,10 +75,10 @@ class NoAlias
*
* More precisely, noalias() allows to bypass the EvalBeforeAssignBit flag.
* Currently, even though several expressions may alias, only product
- * expressions have this flag. Therefore, noalias() is only usefull when
+ * expressions have this flag. Therefore, noalias() is only useful when
* the source expression contains a matrix product.
*
- * Here are some examples where noalias is usefull:
+ * Here are some examples where noalias is useful:
* \code
* D.noalias() = A * B;
* D.noalias() += A.transpose() * B;
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/NumTraits.h b/examples/ThirdPartyLibs/Eigen/src/Core/NumTraits.h
index 92a9ae1ea..72eac5a93 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/NumTraits.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/NumTraits.h
@@ -21,12 +21,14 @@ template< typename T,
bool is_integer = NumTraits<T>::IsInteger>
struct default_digits10_impl
{
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static int run() { return std::numeric_limits<T>::digits10; }
};
template<typename T>
struct default_digits10_impl<T,false,false> // Floating point
{
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static int run() {
using std::log10;
using std::ceil;
@@ -38,11 +40,64 @@ struct default_digits10_impl<T,false,false> // Floating point
template<typename T>
struct default_digits10_impl<T,false,true> // Integer
{
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ static int run() { return 0; }
+};
+
+
+// default implementation of digits(), based on numeric_limits if specialized,
+// 0 for integer types, and log2(epsilon()) otherwise.
+template< typename T,
+ bool use_numeric_limits = std::numeric_limits<T>::is_specialized,
+ bool is_integer = NumTraits<T>::IsInteger>
+struct default_digits_impl
+{
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ static int run() { return std::numeric_limits<T>::digits; }
+};
+
+template<typename T>
+struct default_digits_impl<T,false,false> // Floating point
+{
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ static int run() {
+ using std::log;
+ using std::ceil;
+ typedef typename NumTraits<T>::Real Real;
+ return int(ceil(-log(NumTraits<Real>::epsilon())/log(static_cast<Real>(2))));
+ }
+};
+
+template<typename T>
+struct default_digits_impl<T,false,true> // Integer
+{
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static int run() { return 0; }
};
} // end namespace internal
+namespace numext {
+/** \internal bit-wise cast without changing the underlying bit representation. */
+
+// TODO: Replace by std::bit_cast (available in C++20)
+template <typename Tgt, typename Src>
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Tgt bit_cast(const Src& src) {
+#if EIGEN_HAS_TYPE_TRAITS
+ // The behaviour of memcpy is not specified for non-trivially copyable types
+ EIGEN_STATIC_ASSERT(std::is_trivially_copyable<Src>::value, THIS_TYPE_IS_NOT_SUPPORTED);
+ EIGEN_STATIC_ASSERT(std::is_trivially_copyable<Tgt>::value && std::is_default_constructible<Tgt>::value,
+ THIS_TYPE_IS_NOT_SUPPORTED);
+#endif
+
+ EIGEN_STATIC_ASSERT(sizeof(Src) == sizeof(Tgt), THIS_TYPE_IS_NOT_SUPPORTED);
+ Tgt tgt;
+ EIGEN_USING_STD(memcpy)
+ memcpy(&tgt, &src, sizeof(Tgt));
+ return tgt;
+}
+} // namespace numext
+
/** \class NumTraits
* \ingroup Core_Module
*
@@ -80,9 +135,18 @@ struct default_digits10_impl<T,false,true> // Integer
* \li A dummy_precision() function returning a weak epsilon value. It is mainly used as a default
* value by the fuzzy comparison operators.
* \li highest() and lowest() functions returning the highest and lowest possible values respectively.
+ * \li digits() function returning the number of radix digits (non-sign digits for integers, mantissa for floating-point). This is
+ * the analogue of <a href="http://en.cppreference.com/w/cpp/types/numeric_limits/digits">std::numeric_limits<T>::digits</a>
+ * which is used as the default implementation if specialized.
* \li digits10() function returning the number of decimal digits that can be represented without change. This is
* the analogue of <a href="http://en.cppreference.com/w/cpp/types/numeric_limits/digits10">std::numeric_limits<T>::digits10</a>
* which is used as the default implementation if specialized.
+ * \li min_exponent() and max_exponent() functions returning the highest and lowest possible values, respectively,
+ * such that the radix raised to the power exponent-1 is a normalized floating-point number. These are equivalent to
+ * <a href="http://en.cppreference.com/w/cpp/types/numeric_limits/min_exponent">std::numeric_limits<T>::min_exponent</a>/
+ * <a href="http://en.cppreference.com/w/cpp/types/numeric_limits/max_exponent">std::numeric_limits<T>::max_exponent</a>.
+ * \li infinity() function returning a representation of positive infinity, if available.
+ * \li quiet_NaN function returning a non-signaling "not-a-number", if available.
*/
template<typename T> struct GenericNumTraits
@@ -106,42 +170,60 @@ template<typename T> struct GenericNumTraits
typedef T Nested;
typedef T Literal;
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static inline Real epsilon()
{
return numext::numeric_limits<T>::epsilon();
}
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static inline int digits10()
{
return internal::default_digits10_impl<T>::run();
}
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ static inline int digits()
+ {
+ return internal::default_digits_impl<T>::run();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ static inline int min_exponent()
+ {
+ return numext::numeric_limits<T>::min_exponent;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ static inline int max_exponent()
+ {
+ return numext::numeric_limits<T>::max_exponent;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static inline Real dummy_precision()
{
// make sure to override this for floating-point types
return Real(0);
}
-
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static inline T highest() {
return (numext::numeric_limits<T>::max)();
}
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static inline T lowest() {
- return IsInteger ? (numext::numeric_limits<T>::min)() : (-(numext::numeric_limits<T>::max)());
+ return IsInteger ? (numext::numeric_limits<T>::min)()
+ : static_cast<T>(-(numext::numeric_limits<T>::max)());
}
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static inline T infinity() {
return numext::numeric_limits<T>::infinity();
}
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static inline T quiet_NaN() {
return numext::numeric_limits<T>::quiet_NaN();
}
@@ -153,19 +235,20 @@ template<typename T> struct NumTraits : GenericNumTraits<T>
template<> struct NumTraits<float>
: GenericNumTraits<float>
{
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static inline float dummy_precision() { return 1e-5f; }
};
template<> struct NumTraits<double> : GenericNumTraits<double>
{
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static inline double dummy_precision() { return 1e-12; }
};
template<> struct NumTraits<long double>
: GenericNumTraits<long double>
{
+ EIGEN_CONSTEXPR
static inline long double dummy_precision() { return 1e-15l; }
};
@@ -182,11 +265,11 @@ template<typename _Real> struct NumTraits<std::complex<_Real> >
MulCost = 4 * NumTraits<Real>::MulCost + 2 * NumTraits<Real>::AddCost
};
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static inline Real epsilon() { return NumTraits<Real>::epsilon(); }
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static inline Real dummy_precision() { return NumTraits<Real>::dummy_precision(); }
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static inline int digits10() { return NumTraits<Real>::digits10(); }
};
@@ -206,16 +289,17 @@ struct NumTraits<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >
IsInteger = NumTraits<Scalar>::IsInteger,
IsSigned = NumTraits<Scalar>::IsSigned,
RequireInitialization = 1,
- ReadCost = ArrayType::SizeAtCompileTime==Dynamic ? HugeCost : ArrayType::SizeAtCompileTime * NumTraits<Scalar>::ReadCost,
- AddCost = ArrayType::SizeAtCompileTime==Dynamic ? HugeCost : ArrayType::SizeAtCompileTime * NumTraits<Scalar>::AddCost,
- MulCost = ArrayType::SizeAtCompileTime==Dynamic ? HugeCost : ArrayType::SizeAtCompileTime * NumTraits<Scalar>::MulCost
+ ReadCost = ArrayType::SizeAtCompileTime==Dynamic ? HugeCost : ArrayType::SizeAtCompileTime * int(NumTraits<Scalar>::ReadCost),
+ AddCost = ArrayType::SizeAtCompileTime==Dynamic ? HugeCost : ArrayType::SizeAtCompileTime * int(NumTraits<Scalar>::AddCost),
+ MulCost = ArrayType::SizeAtCompileTime==Dynamic ? HugeCost : ArrayType::SizeAtCompileTime * int(NumTraits<Scalar>::MulCost)
};
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static inline RealScalar epsilon() { return NumTraits<RealScalar>::epsilon(); }
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static inline RealScalar dummy_precision() { return NumTraits<RealScalar>::dummy_precision(); }
+ EIGEN_CONSTEXPR
static inline int digits10() { return NumTraits<Scalar>::digits10(); }
};
@@ -229,6 +313,7 @@ template<> struct NumTraits<std::string>
MulCost = HugeCost
};
+ EIGEN_CONSTEXPR
static inline int digits10() { return 0; }
private:
@@ -243,6 +328,8 @@ private:
// Empty specialization for void to allow template specialization based on NumTraits<T>::Real with T==void and SFINAE.
template<> struct NumTraits<void> {};
+template<> struct NumTraits<bool> : GenericNumTraits<bool> {};
+
} // end namespace Eigen
#endif // EIGEN_NUMTRAITS_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/PartialReduxEvaluator.h b/examples/ThirdPartyLibs/Eigen/src/Core/PartialReduxEvaluator.h
new file mode 100644
index 000000000..17c06f078
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/PartialReduxEvaluator.h
@@ -0,0 +1,237 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011-2018 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_PARTIALREDUX_H
+#define EIGEN_PARTIALREDUX_H
+
+namespace Eigen {
+
+namespace internal {
+
+
+/***************************************************************************
+*
+* This file provides evaluators for partial reductions.
+* There are two modes:
+*
+* - scalar path: simply calls the respective function on the column or row.
+* -> nothing special here, all the tricky part is handled by the return
+* types of VectorwiseOp's members. They embed the functor calling the
+* respective DenseBase's member function.
+*
+* - vectorized path: implements a packet-wise reductions followed by
+* some (optional) processing of the outcome, e.g., division by n for mean.
+*
+* For the vectorized path let's observe that the packet-size and outer-unrolling
+* are both decided by the assignement logic. So all we have to do is to decide
+* on the inner unrolling.
+*
+* For the unrolling, we can reuse "internal::redux_vec_unroller" from Redux.h,
+* but be need to be careful to specify correct increment.
+*
+***************************************************************************/
+
+
+/* logic deciding a strategy for unrolling of vectorized paths */
+template<typename Func, typename Evaluator>
+struct packetwise_redux_traits
+{
+ enum {
+ OuterSize = int(Evaluator::IsRowMajor) ? Evaluator::RowsAtCompileTime : Evaluator::ColsAtCompileTime,
+ Cost = OuterSize == Dynamic ? HugeCost
+ : OuterSize * Evaluator::CoeffReadCost + (OuterSize-1) * functor_traits<Func>::Cost,
+ Unrolling = Cost <= EIGEN_UNROLLING_LIMIT ? CompleteUnrolling : NoUnrolling
+ };
+
+};
+
+/* Value to be returned when size==0 , by default let's return 0 */
+template<typename PacketType,typename Func>
+EIGEN_DEVICE_FUNC
+PacketType packetwise_redux_empty_value(const Func& ) {
+ const typename unpacket_traits<PacketType>::type zero(0);
+ return pset1<PacketType>(zero);
+}
+
+/* For products the default is 1 */
+template<typename PacketType,typename Scalar>
+EIGEN_DEVICE_FUNC
+PacketType packetwise_redux_empty_value(const scalar_product_op<Scalar,Scalar>& ) {
+ return pset1<PacketType>(Scalar(1));
+}
+
+/* Perform the actual reduction */
+template<typename Func, typename Evaluator,
+ int Unrolling = packetwise_redux_traits<Func, Evaluator>::Unrolling
+>
+struct packetwise_redux_impl;
+
+/* Perform the actual reduction with unrolling */
+template<typename Func, typename Evaluator>
+struct packetwise_redux_impl<Func, Evaluator, CompleteUnrolling>
+{
+ typedef redux_novec_unroller<Func,Evaluator, 0, Evaluator::SizeAtCompileTime> Base;
+ typedef typename Evaluator::Scalar Scalar;
+
+ template<typename PacketType>
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE
+ PacketType run(const Evaluator &eval, const Func& func, Index /*size*/)
+ {
+ return redux_vec_unroller<Func, Evaluator, 0, packetwise_redux_traits<Func, Evaluator>::OuterSize>::template run<PacketType>(eval,func);
+ }
+};
+
+/* Add a specialization of redux_vec_unroller for size==0 at compiletime.
+ * This specialization is not required for general reductions, which is
+ * why it is defined here.
+ */
+template<typename Func, typename Evaluator, int Start>
+struct redux_vec_unroller<Func, Evaluator, Start, 0>
+{
+ template<typename PacketType>
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE PacketType run(const Evaluator &, const Func& f)
+ {
+ return packetwise_redux_empty_value<PacketType>(f);
+ }
+};
+
+/* Perform the actual reduction for dynamic sizes */
+template<typename Func, typename Evaluator>
+struct packetwise_redux_impl<Func, Evaluator, NoUnrolling>
+{
+ typedef typename Evaluator::Scalar Scalar;
+ typedef typename redux_traits<Func, Evaluator>::PacketType PacketScalar;
+
+ template<typename PacketType>
+ EIGEN_DEVICE_FUNC
+ static PacketType run(const Evaluator &eval, const Func& func, Index size)
+ {
+ if(size==0)
+ return packetwise_redux_empty_value<PacketType>(func);
+
+ const Index size4 = (size-1)&(~3);
+ PacketType p = eval.template packetByOuterInner<Unaligned,PacketType>(0,0);
+ Index i = 1;
+ // This loop is optimized for instruction pipelining:
+ // - each iteration generates two independent instructions
+ // - thanks to branch prediction and out-of-order execution we have independent instructions across loops
+ for(; i<size4; i+=4)
+ p = func.packetOp(p,
+ func.packetOp(
+ func.packetOp(eval.template packetByOuterInner<Unaligned,PacketType>(i+0,0),eval.template packetByOuterInner<Unaligned,PacketType>(i+1,0)),
+ func.packetOp(eval.template packetByOuterInner<Unaligned,PacketType>(i+2,0),eval.template packetByOuterInner<Unaligned,PacketType>(i+3,0))));
+ for(; i<size; ++i)
+ p = func.packetOp(p, eval.template packetByOuterInner<Unaligned,PacketType>(i,0));
+ return p;
+ }
+};
+
+template< typename ArgType, typename MemberOp, int Direction>
+struct evaluator<PartialReduxExpr<ArgType, MemberOp, Direction> >
+ : evaluator_base<PartialReduxExpr<ArgType, MemberOp, Direction> >
+{
+ typedef PartialReduxExpr<ArgType, MemberOp, Direction> XprType;
+ typedef typename internal::nested_eval<ArgType,1>::type ArgTypeNested;
+ typedef typename internal::add_const_on_value_type<ArgTypeNested>::type ConstArgTypeNested;
+ typedef typename internal::remove_all<ArgTypeNested>::type ArgTypeNestedCleaned;
+ typedef typename ArgType::Scalar InputScalar;
+ typedef typename XprType::Scalar Scalar;
+ enum {
+ TraversalSize = Direction==int(Vertical) ? int(ArgType::RowsAtCompileTime) : int(ArgType::ColsAtCompileTime)
+ };
+ typedef typename MemberOp::template Cost<int(TraversalSize)> CostOpType;
+ enum {
+ CoeffReadCost = TraversalSize==Dynamic ? HugeCost
+ : TraversalSize==0 ? 1
+ : int(TraversalSize) * int(evaluator<ArgType>::CoeffReadCost) + int(CostOpType::value),
+
+ _ArgFlags = evaluator<ArgType>::Flags,
+
+ _Vectorizable = bool(int(_ArgFlags)&PacketAccessBit)
+ && bool(MemberOp::Vectorizable)
+ && (Direction==int(Vertical) ? bool(_ArgFlags&RowMajorBit) : (_ArgFlags&RowMajorBit)==0)
+ && (TraversalSize!=0),
+
+ Flags = (traits<XprType>::Flags&RowMajorBit)
+ | (evaluator<ArgType>::Flags&(HereditaryBits&(~RowMajorBit)))
+ | (_Vectorizable ? PacketAccessBit : 0)
+ | LinearAccessBit,
+
+ Alignment = 0 // FIXME this will need to be improved once PartialReduxExpr is vectorized
+ };
+
+ EIGEN_DEVICE_FUNC explicit evaluator(const XprType xpr)
+ : m_arg(xpr.nestedExpression()), m_functor(xpr.functor())
+ {
+ EIGEN_INTERNAL_CHECK_COST_VALUE(TraversalSize==Dynamic ? HugeCost : (TraversalSize==0 ? 1 : int(CostOpType::value)));
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
+ }
+
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const Scalar coeff(Index i, Index j) const
+ {
+ return coeff(Direction==Vertical ? j : i);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const Scalar coeff(Index index) const
+ {
+ return m_functor(m_arg.template subVector<DirectionType(Direction)>(index));
+ }
+
+ template<int LoadMode,typename PacketType>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ PacketType packet(Index i, Index j) const
+ {
+ return packet<LoadMode,PacketType>(Direction==Vertical ? j : i);
+ }
+
+ template<int LoadMode,typename PacketType>
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
+ PacketType packet(Index idx) const
+ {
+ enum { PacketSize = internal::unpacket_traits<PacketType>::size };
+ typedef Block<const ArgTypeNestedCleaned,
+ Direction==Vertical ? int(ArgType::RowsAtCompileTime) : int(PacketSize),
+ Direction==Vertical ? int(PacketSize) : int(ArgType::ColsAtCompileTime),
+ true /* InnerPanel */> PanelType;
+
+ PanelType panel(m_arg,
+ Direction==Vertical ? 0 : idx,
+ Direction==Vertical ? idx : 0,
+ Direction==Vertical ? m_arg.rows() : Index(PacketSize),
+ Direction==Vertical ? Index(PacketSize) : m_arg.cols());
+
+ // FIXME
+ // See bug 1612, currently if PacketSize==1 (i.e. complex<double> with 128bits registers) then the storage-order of panel get reversed
+ // and methods like packetByOuterInner do not make sense anymore in this context.
+ // So let's just by pass "vectorization" in this case:
+ if(PacketSize==1)
+ return internal::pset1<PacketType>(coeff(idx));
+
+ typedef typename internal::redux_evaluator<PanelType> PanelEvaluator;
+ PanelEvaluator panel_eval(panel);
+ typedef typename MemberOp::BinaryOp BinaryOp;
+ PacketType p = internal::packetwise_redux_impl<BinaryOp,PanelEvaluator>::template run<PacketType>(panel_eval,m_functor.binaryFunc(),m_arg.outerSize());
+ return p;
+ }
+
+protected:
+ ConstArgTypeNested m_arg;
+ const MemberOp m_functor;
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_PARTIALREDUX_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/PermutationMatrix.h b/examples/ThirdPartyLibs/Eigen/src/Core/PermutationMatrix.h
index b1fb455b9..69401bf41 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/PermutationMatrix.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/PermutationMatrix.h
@@ -87,25 +87,14 @@ class PermutationBase : public EigenBase<Derived>
return derived();
}
- #ifndef EIGEN_PARSED_BY_DOXYGEN
- /** This is a special case of the templated operator=. Its purpose is to
- * prevent a default operator= from hiding the templated operator=.
- */
- Derived& operator=(const PermutationBase& other)
- {
- indices() = other.indices();
- return derived();
- }
- #endif
-
/** \returns the number of rows */
- inline Index rows() const { return Index(indices().size()); }
+ inline EIGEN_DEVICE_FUNC Index rows() const { return Index(indices().size()); }
/** \returns the number of columns */
- inline Index cols() const { return Index(indices().size()); }
+ inline EIGEN_DEVICE_FUNC Index cols() const { return Index(indices().size()); }
/** \returns the size of a side of the respective square matrix, i.e., the number of indices */
- inline Index size() const { return Index(indices().size()); }
+ inline EIGEN_DEVICE_FUNC Index size() const { return Index(indices().size()); }
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename DenseDerived>
@@ -333,12 +322,6 @@ class PermutationMatrix : public PermutationBase<PermutationMatrix<SizeAtCompile
inline PermutationMatrix(const PermutationBase<OtherDerived>& other)
: m_indices(other.indices()) {}
- #ifndef EIGEN_PARSED_BY_DOXYGEN
- /** Standard copy constructor. Defined only to prevent a default copy constructor
- * from hiding the other templated constructor */
- inline PermutationMatrix(const PermutationMatrix& other) : m_indices(other.indices()) {}
- #endif
-
/** Generic constructor from expression of the indices. The indices
* array has the meaning that the permutations sends each integer i to indices[i].
*
@@ -373,17 +356,6 @@ class PermutationMatrix : public PermutationBase<PermutationMatrix<SizeAtCompile
return Base::operator=(tr.derived());
}
- #ifndef EIGEN_PARSED_BY_DOXYGEN
- /** This is a special case of the templated operator=. Its purpose is to
- * prevent a default operator= from hiding the templated operator=.
- */
- PermutationMatrix& operator=(const PermutationMatrix& other)
- {
- m_indices = other.m_indices;
- return *this;
- }
- #endif
-
/** const version of indices(). */
const IndicesType& indices() const { return m_indices; }
/** \returns a reference to the stored array representing the permutation. */
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/PlainObjectBase.h b/examples/ThirdPartyLibs/Eigen/src/Core/PlainObjectBase.h
index 1dc7e223a..e2ddbd1d5 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/PlainObjectBase.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/PlainObjectBase.h
@@ -13,10 +13,10 @@
#if defined(EIGEN_INITIALIZE_MATRICES_BY_ZERO)
# define EIGEN_INITIALIZE_COEFFS
-# define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED for(int i=0;i<base().size();++i) coeffRef(i)=Scalar(0);
+# define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED for(Index i=0;i<base().size();++i) coeffRef(i)=Scalar(0);
#elif defined(EIGEN_INITIALIZE_MATRICES_BY_NAN)
# define EIGEN_INITIALIZE_COEFFS
-# define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED for(int i=0;i<base().size();++i) coeffRef(i)=std::numeric_limits<Scalar>::quiet_NaN();
+# define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED for(Index i=0;i<base().size();++i) coeffRef(i)=std::numeric_limits<Scalar>::quiet_NaN();
#else
# undef EIGEN_INITIALIZE_COEFFS
# define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
@@ -104,7 +104,7 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
typedef typename internal::traits<Derived>::StorageKind StorageKind;
typedef typename internal::traits<Derived>::Scalar Scalar;
-
+
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Derived DenseType;
@@ -118,16 +118,8 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
using Base::IsVectorAtCompileTime;
using Base::Flags;
- template<typename PlainObjectType, int MapOptions, typename StrideType> friend class Eigen::Map;
- friend class Eigen::Map<Derived, Unaligned>;
typedef Eigen::Map<Derived, Unaligned> MapType;
- friend class Eigen::Map<const Derived, Unaligned>;
typedef const Eigen::Map<const Derived, Unaligned> ConstMapType;
-#if EIGEN_MAX_ALIGN_BYTES>0
- // for EIGEN_MAX_ALIGN_BYTES==0, AlignedMax==Unaligned, and many compilers generate warnings for friend-ing a class twice.
- friend class Eigen::Map<Derived, AlignedMax>;
- friend class Eigen::Map<const Derived, AlignedMax>;
-#endif
typedef Eigen::Map<Derived, AlignedMax> AlignedMapType;
typedef const Eigen::Map<const Derived, AlignedMax> ConstAlignedMapType;
template<typename StrideType> struct StridedMapType { typedef Eigen::Map<Derived, Unaligned, StrideType> type; };
@@ -147,10 +139,10 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
EIGEN_DEVICE_FUNC
const Base& base() const { return *static_cast<const Base*>(this); }
- EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE Index rows() const { return m_storage.rows(); }
- EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE Index cols() const { return m_storage.cols(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ Index rows() const EIGEN_NOEXCEPT { return m_storage.rows(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ Index cols() const EIGEN_NOEXCEPT { return m_storage.cols(); }
/** This is an overloaded version of DenseCoeffsBase<Derived,ReadOnlyAccessors>::coeff(Index,Index) const
* provided to by-pass the creation of an evaluator of the expression, thus saving compilation efforts.
@@ -358,7 +350,7 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
* remain row-vectors and vectors remain vectors.
*/
template<typename OtherDerived>
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void resizeLike(const EigenBase<OtherDerived>& _other)
{
const OtherDerived& other = _other.derived();
@@ -383,7 +375,7 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
* of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or
* conservativeResize(Index, NoChange_t).
*
- * Matrices are resized relative to the top-left element. In case values need to be
+ * Matrices are resized relative to the top-left element. In case values need to be
* appended to the matrix they will be uninitialized.
*/
EIGEN_DEVICE_FUNC
@@ -440,7 +432,7 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
* of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or
* conservativeResize(Index, NoChange_t).
*
- * Matrices are resized relative to the top-left element. In case values need to be
+ * Matrices are resized relative to the top-left element. In case values need to be
* appended to the matrix they will copied from \c other.
*/
template<typename OtherDerived>
@@ -508,8 +500,8 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
EIGEN_DEVICE_FUNC
PlainObjectBase& operator=(PlainObjectBase&& other) EIGEN_NOEXCEPT
{
- using std::swap;
- swap(m_storage, other.m_storage);
+ _check_template_params();
+ m_storage = std::move(other.m_storage);
return *this;
}
#endif
@@ -526,6 +518,71 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
// EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
}
+ #if EIGEN_HAS_CXX11
+ /** \brief Construct a row of column vector with fixed size from an arbitrary number of coefficients. \cpp11
+ *
+ * \only_for_vectors
+ *
+ * This constructor is for 1D array or vectors with more than 4 coefficients.
+ * There exists C++98 analogue constructors for fixed-size array/vector having 1, 2, 3, or 4 coefficients.
+ *
+ * \warning To construct a column (resp. row) vector of fixed length, the number of values passed to this
+ * constructor must match the the fixed number of rows (resp. columns) of \c *this.
+ */
+ template <typename... ArgTypes>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ PlainObjectBase(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args)
+ : m_storage()
+ {
+ _check_template_params();
+ EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, sizeof...(args) + 4);
+ m_storage.data()[0] = a0;
+ m_storage.data()[1] = a1;
+ m_storage.data()[2] = a2;
+ m_storage.data()[3] = a3;
+ Index i = 4;
+ auto x = {(m_storage.data()[i++] = args, 0)...};
+ static_cast<void>(x);
+ }
+
+ /** \brief Constructs a Matrix or Array and initializes it by elements given by an initializer list of initializer
+ * lists \cpp11
+ */
+ EIGEN_DEVICE_FUNC
+ explicit EIGEN_STRONG_INLINE PlainObjectBase(const std::initializer_list<std::initializer_list<Scalar>>& list)
+ : m_storage()
+ {
+ _check_template_params();
+
+ size_t list_size = 0;
+ if (list.begin() != list.end()) {
+ list_size = list.begin()->size();
+ }
+
+ // This is to allow syntax like VectorXi {{1, 2, 3, 4}}
+ if (ColsAtCompileTime == 1 && list.size() == 1) {
+ eigen_assert(list_size == static_cast<size_t>(RowsAtCompileTime) || RowsAtCompileTime == Dynamic);
+ resize(list_size, ColsAtCompileTime);
+ std::copy(list.begin()->begin(), list.begin()->end(), m_storage.data());
+ } else {
+ eigen_assert(list.size() == static_cast<size_t>(RowsAtCompileTime) || RowsAtCompileTime == Dynamic);
+ eigen_assert(list_size == static_cast<size_t>(ColsAtCompileTime) || ColsAtCompileTime == Dynamic);
+ resize(list.size(), list_size);
+
+ Index row_index = 0;
+ for (const std::initializer_list<Scalar>& row : list) {
+ eigen_assert(list_size == row.size());
+ Index col_index = 0;
+ for (const Scalar& e : row) {
+ coeffRef(row_index, col_index) = e;
+ ++col_index;
+ }
+ ++row_index;
+ }
+ }
+ }
+ #endif // end EIGEN_HAS_CXX11
+
/** \sa PlainObjectBase::operator=(const EigenBase<OtherDerived>&) */
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
@@ -564,7 +621,7 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
* \copydetails DenseBase::operator=(const EigenBase<OtherDerived> &other)
*/
template<typename OtherDerived>
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Derived& operator=(const EigenBase<OtherDerived> &other)
{
_resize_to_match(other);
@@ -652,18 +709,26 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
using Base::setConstant;
EIGEN_DEVICE_FUNC Derived& setConstant(Index size, const Scalar& val);
EIGEN_DEVICE_FUNC Derived& setConstant(Index rows, Index cols, const Scalar& val);
+ EIGEN_DEVICE_FUNC Derived& setConstant(NoChange_t, Index cols, const Scalar& val);
+ EIGEN_DEVICE_FUNC Derived& setConstant(Index rows, NoChange_t, const Scalar& val);
using Base::setZero;
EIGEN_DEVICE_FUNC Derived& setZero(Index size);
EIGEN_DEVICE_FUNC Derived& setZero(Index rows, Index cols);
+ EIGEN_DEVICE_FUNC Derived& setZero(NoChange_t, Index cols);
+ EIGEN_DEVICE_FUNC Derived& setZero(Index rows, NoChange_t);
using Base::setOnes;
EIGEN_DEVICE_FUNC Derived& setOnes(Index size);
EIGEN_DEVICE_FUNC Derived& setOnes(Index rows, Index cols);
+ EIGEN_DEVICE_FUNC Derived& setOnes(NoChange_t, Index cols);
+ EIGEN_DEVICE_FUNC Derived& setOnes(Index rows, NoChange_t);
using Base::setRandom;
Derived& setRandom(Index size);
Derived& setRandom(Index rows, Index cols);
+ Derived& setRandom(NoChange_t, Index cols);
+ Derived& setRandom(Index rows, NoChange_t);
#ifdef EIGEN_PLAINOBJECTBASE_PLUGIN
#include EIGEN_PLAINOBJECTBASE_PLUGIN
@@ -678,7 +743,7 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
* remain row-vectors and vectors remain vectors.
*/
template<typename OtherDerived>
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void _resize_to_match(const EigenBase<OtherDerived>& other)
{
#ifdef EIGEN_NO_AUTOMATIC_RESIZING
@@ -705,10 +770,10 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
*
* \internal
*/
- // aliasing is dealt once in internall::call_assignment
+ // aliasing is dealt once in internal::call_assignment
// so at this stage we have to assume aliasing... and resising has to be done later.
template<typename OtherDerived>
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Derived& _set(const DenseBase<OtherDerived>& other)
{
internal::call_assignment(this->derived(), other.derived());
@@ -721,7 +786,7 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
* \sa operator=(const MatrixBase<OtherDerived>&), _set()
*/
template<typename OtherDerived>
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Derived& _set_noalias(const DenseBase<OtherDerived>& other)
{
// I don't think we need this resize call since the lazyAssign will anyways resize
@@ -737,23 +802,25 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void _init2(Index rows, Index cols, typename internal::enable_if<Base::SizeAtCompileTime!=2,T0>::type* = 0)
{
- EIGEN_STATIC_ASSERT(bool(NumTraits<T0>::IsInteger) &&
- bool(NumTraits<T1>::IsInteger),
+ const bool t0_is_integer_alike = internal::is_valid_index_type<T0>::value;
+ const bool t1_is_integer_alike = internal::is_valid_index_type<T1>::value;
+ EIGEN_STATIC_ASSERT(t0_is_integer_alike &&
+ t1_is_integer_alike,
FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED)
resize(rows,cols);
}
-
+
template<typename T0, typename T1>
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void _init2(const T0& val0, const T1& val1, typename internal::enable_if<Base::SizeAtCompileTime==2,T0>::type* = 0)
{
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 2)
m_storage.data()[0] = Scalar(val0);
m_storage.data()[1] = Scalar(val1);
}
-
+
template<typename T0, typename T1>
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void _init2(const Index& val0, const Index& val1,
typename internal::enable_if< (!internal::is_same<Index,Scalar>::value)
&& (internal::is_same<T0,Index>::value)
@@ -773,14 +840,14 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
&& ((!internal::is_same<typename internal::traits<Derived>::XprKind,ArrayXpr>::value || Base::SizeAtCompileTime==Dynamic)),T>::type* = 0)
{
// NOTE MSVC 2008 complains if we directly put bool(NumTraits<T>::IsInteger) as the EIGEN_STATIC_ASSERT argument.
- const bool is_integer = NumTraits<T>::IsInteger;
- EIGEN_UNUSED_VARIABLE(is_integer);
- EIGEN_STATIC_ASSERT(is_integer,
+ const bool is_integer_alike = internal::is_valid_index_type<T>::value;
+ EIGEN_UNUSED_VARIABLE(is_integer_alike);
+ EIGEN_STATIC_ASSERT(is_integer_alike,
FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED)
resize(size);
}
-
- // We have a 1x1 matrix/array => the argument is interpreted as the value of the unique coefficient (case where scalar type can be implicitely converted)
+
+ // We have a 1x1 matrix/array => the argument is interpreted as the value of the unique coefficient (case where scalar type can be implicitly converted)
template<typename T>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void _init1(const Scalar& val0, typename internal::enable_if<Base::SizeAtCompileTime==1 && internal::is_convertible<T, Scalar>::value,T>::type* = 0)
@@ -788,7 +855,7 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 1)
m_storage.data()[0] = val0;
}
-
+
// We have a 1x1 matrix/array => the argument is interpreted as the value of the unique coefficient (case where scalar type match the index type)
template<typename T>
EIGEN_DEVICE_FUNC
@@ -844,7 +911,7 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
{
this->derived() = r;
}
-
+
// For fixed-size Array<Scalar,...>
template<typename T>
EIGEN_DEVICE_FUNC
@@ -856,7 +923,7 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
{
Base::setConstant(val0);
}
-
+
// For fixed-size Array<Index,...>
template<typename T>
EIGEN_DEVICE_FUNC
@@ -870,38 +937,38 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
{
Base::setConstant(val0);
}
-
+
template<typename MatrixTypeA, typename MatrixTypeB, bool SwapPointers>
friend struct internal::matrix_swap_impl;
public:
-
+
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** \internal
* \brief Override DenseBase::swap() since for dynamic-sized matrices
* of same type it is enough to swap the data pointers.
*/
template<typename OtherDerived>
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void swap(DenseBase<OtherDerived> & other)
{
enum { SwapPointers = internal::is_same<Derived, OtherDerived>::value && Base::SizeAtCompileTime==Dynamic };
internal::matrix_swap_impl<Derived, OtherDerived, bool(SwapPointers)>::run(this->derived(), other.derived());
}
-
+
/** \internal
* \brief const version forwarded to DenseBase::swap
*/
template<typename OtherDerived>
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void swap(DenseBase<OtherDerived> const & other)
{ Base::swap(other.derived()); }
-
- EIGEN_DEVICE_FUNC
+
+ EIGEN_DEVICE_FUNC
static EIGEN_STRONG_INLINE void _check_template_params()
{
- EIGEN_STATIC_ASSERT((EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, (Options&RowMajor)==RowMajor)
- && EIGEN_IMPLIES(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, (Options&RowMajor)==0)
+ EIGEN_STATIC_ASSERT((EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, (int(Options)&RowMajor)==RowMajor)
+ && EIGEN_IMPLIES(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, (int(Options)&RowMajor)==0)
&& ((RowsAtCompileTime == Dynamic) || (RowsAtCompileTime >= 0))
&& ((ColsAtCompileTime == Dynamic) || (ColsAtCompileTime >= 0))
&& ((MaxRowsAtCompileTime == Dynamic) || (MaxRowsAtCompileTime >= 0))
@@ -914,6 +981,17 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
enum { IsPlainObjectBase = 1 };
#endif
+ public:
+ // These apparently need to be down here for nvcc+icc to prevent duplicate
+ // Map symbol.
+ template<typename PlainObjectType, int MapOptions, typename StrideType> friend class Eigen::Map;
+ friend class Eigen::Map<Derived, Unaligned>;
+ friend class Eigen::Map<const Derived, Unaligned>;
+#if EIGEN_MAX_ALIGN_BYTES>0
+ // for EIGEN_MAX_ALIGN_BYTES==0, AlignedMax==Unaligned, and many compilers generate warnings for friend-ing a class twice.
+ friend class Eigen::Map<Derived, AlignedMax>;
+ friend class Eigen::Map<const Derived, AlignedMax>;
+#endif
};
namespace internal {
@@ -921,13 +999,19 @@ namespace internal {
template <typename Derived, typename OtherDerived, bool IsVector>
struct conservative_resize_like_impl
{
+ #if EIGEN_HAS_TYPE_TRAITS
+ static const bool IsRelocatable = std::is_trivially_copyable<typename Derived::Scalar>::value;
+ #else
+ static const bool IsRelocatable = !NumTraits<typename Derived::Scalar>::RequireInitialization;
+ #endif
static void run(DenseBase<Derived>& _this, Index rows, Index cols)
{
if (_this.rows() == rows && _this.cols() == cols) return;
EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived)
- if ( ( Derived::IsRowMajor && _this.cols() == cols) || // row-major and we change only the number of rows
- (!Derived::IsRowMajor && _this.rows() == rows) ) // column-major and we change only the number of columns
+ if ( IsRelocatable
+ && (( Derived::IsRowMajor && _this.cols() == cols) || // row-major and we change only the number of rows
+ (!Derived::IsRowMajor && _this.rows() == rows) )) // column-major and we change only the number of columns
{
internal::check_rows_cols_for_overflow<Derived::MaxSizeAtCompileTime>::run(rows, cols);
_this.derived().m_storage.conservativeResize(rows*cols,rows,cols);
@@ -935,7 +1019,7 @@ struct conservative_resize_like_impl
else
{
// The storage order does not allow us to use reallocation.
- typename Derived::PlainObject tmp(rows,cols);
+ Derived tmp(rows,cols);
const Index common_rows = numext::mini(rows, _this.rows());
const Index common_cols = numext::mini(cols, _this.cols());
tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols);
@@ -955,8 +1039,9 @@ struct conservative_resize_like_impl
EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived)
EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(OtherDerived)
- if ( ( Derived::IsRowMajor && _this.cols() == other.cols()) || // row-major and we change only the number of rows
- (!Derived::IsRowMajor && _this.rows() == other.rows()) ) // column-major and we change only the number of columns
+ if ( IsRelocatable &&
+ (( Derived::IsRowMajor && _this.cols() == other.cols()) || // row-major and we change only the number of rows
+ (!Derived::IsRowMajor && _this.rows() == other.rows()) )) // column-major and we change only the number of columns
{
const Index new_rows = other.rows() - _this.rows();
const Index new_cols = other.cols() - _this.cols();
@@ -969,7 +1054,7 @@ struct conservative_resize_like_impl
else
{
// The storage order does not allow us to use reallocation.
- typename Derived::PlainObject tmp(other);
+ Derived tmp(other);
const Index common_rows = numext::mini(tmp.rows(), _this.rows());
const Index common_cols = numext::mini(tmp.cols(), _this.cols());
tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols);
@@ -984,13 +1069,18 @@ template <typename Derived, typename OtherDerived>
struct conservative_resize_like_impl<Derived,OtherDerived,true>
: conservative_resize_like_impl<Derived,OtherDerived,false>
{
- using conservative_resize_like_impl<Derived,OtherDerived,false>::run;
-
+ typedef conservative_resize_like_impl<Derived,OtherDerived,false> Base;
+ using Base::run;
+ using Base::IsRelocatable;
+
static void run(DenseBase<Derived>& _this, Index size)
{
const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : size;
const Index new_cols = Derived::RowsAtCompileTime==1 ? size : 1;
- _this.derived().m_storage.conservativeResize(size,new_rows,new_cols);
+ if(IsRelocatable)
+ _this.derived().m_storage.conservativeResize(size,new_rows,new_cols);
+ else
+ Base::run(_this.derived(), new_rows, new_cols);
}
static void run(DenseBase<Derived>& _this, const DenseBase<OtherDerived>& other)
@@ -1001,7 +1091,10 @@ struct conservative_resize_like_impl<Derived,OtherDerived,true>
const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : other.rows();
const Index new_cols = Derived::RowsAtCompileTime==1 ? other.cols() : 1;
- _this.derived().m_storage.conservativeResize(other.size(),new_rows,new_cols);
+ if(IsRelocatable)
+ _this.derived().m_storage.conservativeResize(other.size(),new_rows,new_cols);
+ else
+ Base::run(_this.derived(), new_rows, new_cols);
if (num_new_elements > 0)
_this.tail(num_new_elements) = other.tail(num_new_elements);
@@ -1012,7 +1105,7 @@ template<typename MatrixTypeA, typename MatrixTypeB, bool SwapPointers>
struct matrix_swap_impl
{
EIGEN_DEVICE_FUNC
- static inline void run(MatrixTypeA& a, MatrixTypeB& b)
+ static EIGEN_STRONG_INLINE void run(MatrixTypeA& a, MatrixTypeB& b)
{
a.base().swap(b);
}
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/Product.h b/examples/ThirdPartyLibs/Eigen/src/Core/Product.h
index ae0c94b38..70a6c1063 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/Product.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/Product.h
@@ -23,25 +23,25 @@ struct traits<Product<Lhs, Rhs, Option> >
typedef typename remove_all<Rhs>::type RhsCleaned;
typedef traits<LhsCleaned> LhsTraits;
typedef traits<RhsCleaned> RhsTraits;
-
+
typedef MatrixXpr XprKind;
-
+
typedef typename ScalarBinaryOpTraits<typename traits<LhsCleaned>::Scalar, typename traits<RhsCleaned>::Scalar>::ReturnType Scalar;
typedef typename product_promote_storage_type<typename LhsTraits::StorageKind,
typename RhsTraits::StorageKind,
internal::product_type<Lhs,Rhs>::ret>::ret StorageKind;
typedef typename promote_index_type<typename LhsTraits::StorageIndex,
typename RhsTraits::StorageIndex>::type StorageIndex;
-
+
enum {
RowsAtCompileTime = LhsTraits::RowsAtCompileTime,
ColsAtCompileTime = RhsTraits::ColsAtCompileTime,
MaxRowsAtCompileTime = LhsTraits::MaxRowsAtCompileTime,
MaxColsAtCompileTime = RhsTraits::MaxColsAtCompileTime,
-
+
// FIXME: only needed by GeneralMatrixMatrixTriangular
InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(LhsTraits::ColsAtCompileTime, RhsTraits::RowsAtCompileTime),
-
+
// The storage order is somewhat arbitrary here. The correct one will be determined through the evaluator.
Flags = (MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1) ? RowMajorBit
: (MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1) ? 0
@@ -74,10 +74,10 @@ class Product : public ProductImpl<_Lhs,_Rhs,Option,
internal::product_type<_Lhs,_Rhs>::ret>::ret>
{
public:
-
+
typedef _Lhs Lhs;
typedef _Rhs Rhs;
-
+
typedef typename ProductImpl<
Lhs, Rhs, Option,
typename internal::product_promote_storage_type<typename internal::traits<Lhs>::StorageKind,
@@ -90,18 +90,23 @@ class Product : public ProductImpl<_Lhs,_Rhs,Option,
typedef typename internal::remove_all<LhsNested>::type LhsNestedCleaned;
typedef typename internal::remove_all<RhsNested>::type RhsNestedCleaned;
- EIGEN_DEVICE_FUNC Product(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Product(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs)
{
eigen_assert(lhs.cols() == rhs.rows()
&& "invalid matrix product"
&& "if you wanted a coeff-wise or a dot product use the respective explicit functions");
}
- EIGEN_DEVICE_FUNC inline Index rows() const { return m_lhs.rows(); }
- EIGEN_DEVICE_FUNC inline Index cols() const { return m_rhs.cols(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ Index rows() const EIGEN_NOEXCEPT { return m_lhs.rows(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ Index cols() const EIGEN_NOEXCEPT { return m_rhs.cols(); }
- EIGEN_DEVICE_FUNC const LhsNestedCleaned& lhs() const { return m_lhs; }
- EIGEN_DEVICE_FUNC const RhsNestedCleaned& rhs() const { return m_rhs; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const LhsNestedCleaned& lhs() const { return m_lhs; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const RhsNestedCleaned& rhs() const { return m_rhs; }
protected:
@@ -110,13 +115,13 @@ class Product : public ProductImpl<_Lhs,_Rhs,Option,
};
namespace internal {
-
+
template<typename Lhs, typename Rhs, int Option, int ProductTag = internal::product_type<Lhs,Rhs>::ret>
class dense_product_base
: public internal::dense_xpr_base<Product<Lhs,Rhs,Option> >::type
{};
-/** Convertion to scalar for inner-products */
+/** Conversion to scalar for inner-products */
template<typename Lhs, typename Rhs, int Option>
class dense_product_base<Lhs, Rhs, Option, InnerProduct>
: public internal::dense_xpr_base<Product<Lhs,Rhs,Option> >::type
@@ -126,8 +131,8 @@ class dense_product_base<Lhs, Rhs, Option, InnerProduct>
public:
using Base::derived;
typedef typename Base::Scalar Scalar;
-
- operator const Scalar() const
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE operator const Scalar() const
{
return internal::evaluator<ProductXpr>(derived()).coeff(0,0);
}
@@ -148,37 +153,37 @@ class ProductImpl<Lhs,Rhs,Option,Dense>
: public internal::dense_product_base<Lhs,Rhs,Option>
{
typedef Product<Lhs, Rhs, Option> Derived;
-
+
public:
-
+
typedef typename internal::dense_product_base<Lhs, Rhs, Option> Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
protected:
enum {
- IsOneByOne = (RowsAtCompileTime == 1 || RowsAtCompileTime == Dynamic) &&
+ IsOneByOne = (RowsAtCompileTime == 1 || RowsAtCompileTime == Dynamic) &&
(ColsAtCompileTime == 1 || ColsAtCompileTime == Dynamic),
EnableCoeff = IsOneByOne || Option==LazyProduct
};
-
+
public:
-
- EIGEN_DEVICE_FUNC Scalar coeff(Index row, Index col) const
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(Index row, Index col) const
{
EIGEN_STATIC_ASSERT(EnableCoeff, THIS_METHOD_IS_ONLY_FOR_INNER_OR_LAZY_PRODUCTS);
eigen_assert( (Option==LazyProduct) || (this->rows() == 1 && this->cols() == 1) );
-
+
return internal::evaluator<Derived>(derived()).coeff(row,col);
}
- EIGEN_DEVICE_FUNC Scalar coeff(Index i) const
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(Index i) const
{
EIGEN_STATIC_ASSERT(EnableCoeff, THIS_METHOD_IS_ONLY_FOR_INNER_OR_LAZY_PRODUCTS);
eigen_assert( (Option==LazyProduct) || (this->rows() == 1 && this->cols() == 1) );
-
+
return internal::evaluator<Derived>(derived()).coeff(i);
}
-
-
+
+
};
} // end namespace Eigen
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/ProductEvaluators.h b/examples/ThirdPartyLibs/Eigen/src/Core/ProductEvaluators.h
index 86966abdb..8cf294b28 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/ProductEvaluators.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/ProductEvaluators.h
@@ -14,27 +14,27 @@
#define EIGEN_PRODUCTEVALUATORS_H
namespace Eigen {
-
+
namespace internal {
/** \internal
* Evaluator of a product expression.
* Since products require special treatments to handle all possible cases,
- * we simply deffer the evaluation logic to a product_evaluator class
+ * we simply defer the evaluation logic to a product_evaluator class
* which offers more partial specialization possibilities.
- *
+ *
* \sa class product_evaluator
*/
template<typename Lhs, typename Rhs, int Options>
-struct evaluator<Product<Lhs, Rhs, Options> >
+struct evaluator<Product<Lhs, Rhs, Options> >
: public product_evaluator<Product<Lhs, Rhs, Options> >
{
typedef Product<Lhs, Rhs, Options> XprType;
typedef product_evaluator<XprType> Base;
-
- EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {}
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& xpr) : Base(xpr) {}
};
-
+
// Catch "scalar * ( A * B )" and transform it to "(A*scalar) * B"
// TODO we should apply that rule only if that's really helpful
template<typename Lhs, typename Rhs, typename Scalar1, typename Scalar2, typename Plain1>
@@ -55,20 +55,20 @@ struct evaluator<CwiseBinaryOp<internal::scalar_product_op<Scalar1,Scalar2>,
const Product<Lhs, Rhs, DefaultProduct> > XprType;
typedef evaluator<Product<EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(Scalar1,Lhs,product), Rhs, DefaultProduct> > Base;
- EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& xpr)
: Base(xpr.lhs().functor().m_other * xpr.rhs().lhs() * xpr.rhs().rhs())
{}
};
template<typename Lhs, typename Rhs, int DiagIndex>
-struct evaluator<Diagonal<const Product<Lhs, Rhs, DefaultProduct>, DiagIndex> >
+struct evaluator<Diagonal<const Product<Lhs, Rhs, DefaultProduct>, DiagIndex> >
: public evaluator<Diagonal<const Product<Lhs, Rhs, LazyProduct>, DiagIndex> >
{
typedef Diagonal<const Product<Lhs, Rhs, DefaultProduct>, DiagIndex> XprType;
typedef evaluator<Diagonal<const Product<Lhs, Rhs, LazyProduct>, DiagIndex> > Base;
-
- EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr)
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& xpr)
: Base(Diagonal<const Product<Lhs, Rhs, LazyProduct>, DiagIndex>(
Product<Lhs, Rhs, LazyProduct>(xpr.nestedExpression().lhs(), xpr.nestedExpression().rhs()),
xpr.index() ))
@@ -108,27 +108,27 @@ struct product_evaluator<Product<Lhs, Rhs, Options>, ProductTag, LhsShape, RhsSh
: m_result(xpr.rows(), xpr.cols())
{
::new (static_cast<Base*>(this)) Base(m_result);
-
+
// FIXME shall we handle nested_eval here?,
// if so, then we must take care at removing the call to nested_eval in the specializations (e.g., in permutation_matrix_product, transposition_matrix_product, etc.)
// typedef typename internal::nested_eval<Lhs,Rhs::ColsAtCompileTime>::type LhsNested;
// typedef typename internal::nested_eval<Rhs,Lhs::RowsAtCompileTime>::type RhsNested;
// typedef typename internal::remove_all<LhsNested>::type LhsNestedCleaned;
// typedef typename internal::remove_all<RhsNested>::type RhsNestedCleaned;
-//
+//
// const LhsNested lhs(xpr.lhs());
// const RhsNested rhs(xpr.rhs());
-//
+//
// generic_product_impl<LhsNestedCleaned, RhsNestedCleaned>::evalTo(m_result, lhs, rhs);
generic_product_impl<Lhs, Rhs, LhsShape, RhsShape, ProductTag>::evalTo(m_result, xpr.lhs(), xpr.rhs());
}
-
-protected:
+
+protected:
PlainObject m_result;
};
-// The following three shortcuts are enabled only if the scalar types match excatly.
+// The following three shortcuts are enabled only if the scalar types match exactly.
// TODO: we could enable them for different scalar types when the product is not vectorized.
// Dense = Product
@@ -137,7 +137,7 @@ struct Assignment<DstXprType, Product<Lhs,Rhs,Options>, internal::assign_op<Scal
typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct)>::type>
{
typedef Product<Lhs,Rhs,Options> SrcXprType;
- static EIGEN_STRONG_INLINE
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
{
Index dstRows = src.rows();
@@ -155,7 +155,7 @@ struct Assignment<DstXprType, Product<Lhs,Rhs,Options>, internal::add_assign_op<
typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct)>::type>
{
typedef Product<Lhs,Rhs,Options> SrcXprType;
- static EIGEN_STRONG_INLINE
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<Scalar,Scalar> &)
{
eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());
@@ -170,7 +170,7 @@ struct Assignment<DstXprType, Product<Lhs,Rhs,Options>, internal::sub_assign_op<
typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct)>::type>
{
typedef Product<Lhs,Rhs,Options> SrcXprType;
- static EIGEN_STRONG_INLINE
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<Scalar,Scalar> &)
{
eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());
@@ -190,7 +190,7 @@ struct Assignment<DstXprType, CwiseBinaryOp<internal::scalar_product_op<ScalarBi
typedef CwiseBinaryOp<internal::scalar_product_op<ScalarBis,Scalar>,
const CwiseNullaryOp<internal::scalar_constant_op<ScalarBis>,Plain>,
const Product<Lhs,Rhs,DefaultProduct> > SrcXprType;
- static EIGEN_STRONG_INLINE
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void run(DstXprType &dst, const SrcXprType &src, const AssignFunc& func)
{
call_assignment_no_alias(dst, (src.lhs().functor().m_other * src.rhs().lhs())*src.rhs().rhs(), func);
@@ -217,7 +217,7 @@ template<typename DstXprType, typename OtherXpr, typename ProductType, typename
struct assignment_from_xpr_op_product
{
template<typename SrcXprType, typename InitialFunc>
- static EIGEN_STRONG_INLINE
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void run(DstXprType &dst, const SrcXprType &src, const InitialFunc& /*func*/)
{
call_assignment_no_alias(dst, src.lhs(), Func1());
@@ -246,19 +246,19 @@ template<typename Lhs, typename Rhs>
struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,InnerProduct>
{
template<typename Dst>
- static inline void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
dst.coeffRef(0,0) = (lhs.transpose().cwiseProduct(rhs)).sum();
}
-
+
template<typename Dst>
- static inline void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
dst.coeffRef(0,0) += (lhs.transpose().cwiseProduct(rhs)).sum();
}
-
+
template<typename Dst>
- static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{ dst.coeffRef(0,0) -= (lhs.transpose().cwiseProduct(rhs)).sum(); }
};
@@ -269,10 +269,10 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,InnerProduct>
// Column major result
template<typename Dst, typename Lhs, typename Rhs, typename Func>
-void outer_product_selector_run(Dst& dst, const Lhs &lhs, const Rhs &rhs, const Func& func, const false_type&)
+void EIGEN_DEVICE_FUNC outer_product_selector_run(Dst& dst, const Lhs &lhs, const Rhs &rhs, const Func& func, const false_type&)
{
evaluator<Rhs> rhsEval(rhs);
- typename nested_eval<Lhs,Rhs::SizeAtCompileTime>::type actual_lhs(lhs);
+ ei_declare_local_nested_eval(Lhs,lhs,Rhs::SizeAtCompileTime,actual_lhs);
// FIXME if cols is large enough, then it might be useful to make sure that lhs is sequentially stored
// FIXME not very good if rhs is real and lhs complex while alpha is real too
const Index cols = dst.cols();
@@ -282,10 +282,10 @@ void outer_product_selector_run(Dst& dst, const Lhs &lhs, const Rhs &rhs, const
// Row major result
template<typename Dst, typename Lhs, typename Rhs, typename Func>
-void outer_product_selector_run(Dst& dst, const Lhs &lhs, const Rhs &rhs, const Func& func, const true_type&)
+void EIGEN_DEVICE_FUNC outer_product_selector_run(Dst& dst, const Lhs &lhs, const Rhs &rhs, const Func& func, const true_type&)
{
evaluator<Lhs> lhsEval(lhs);
- typename nested_eval<Rhs,Lhs::SizeAtCompileTime>::type actual_rhs(rhs);
+ ei_declare_local_nested_eval(Rhs,rhs,Lhs::SizeAtCompileTime,actual_rhs);
// FIXME if rows is large enough, then it might be useful to make sure that rhs is sequentially stored
// FIXME not very good if lhs is real and rhs complex while alpha is real too
const Index rows = dst.rows();
@@ -298,43 +298,43 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,OuterProduct>
{
template<typename T> struct is_row_major : internal::conditional<(int(T::Flags)&RowMajorBit), internal::true_type, internal::false_type>::type {};
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
-
+
// TODO it would be nice to be able to exploit our *_assign_op functors for that purpose
- struct set { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() = src; } };
- struct add { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() += src; } };
- struct sub { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() -= src; } };
+ struct set { template<typename Dst, typename Src> EIGEN_DEVICE_FUNC void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() = src; } };
+ struct add { template<typename Dst, typename Src> EIGEN_DEVICE_FUNC void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() += src; } };
+ struct sub { template<typename Dst, typename Src> EIGEN_DEVICE_FUNC void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() -= src; } };
struct adds {
Scalar m_scale;
explicit adds(const Scalar& s) : m_scale(s) {}
- template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const {
+ template<typename Dst, typename Src> void EIGEN_DEVICE_FUNC operator()(const Dst& dst, const Src& src) const {
dst.const_cast_derived() += m_scale * src;
}
};
-
+
template<typename Dst>
- static inline void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
internal::outer_product_selector_run(dst, lhs, rhs, set(), is_row_major<Dst>());
}
-
+
template<typename Dst>
- static inline void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
internal::outer_product_selector_run(dst, lhs, rhs, add(), is_row_major<Dst>());
}
-
+
template<typename Dst>
- static inline void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
internal::outer_product_selector_run(dst, lhs, rhs, sub(), is_row_major<Dst>());
}
-
+
template<typename Dst>
- static inline void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
{
internal::outer_product_selector_run(dst, lhs, rhs, adds(alpha), is_row_major<Dst>());
}
-
+
};
@@ -343,21 +343,21 @@ template<typename Lhs, typename Rhs, typename Derived>
struct generic_product_impl_base
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
-
+
template<typename Dst>
- static EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{ dst.setZero(); scaleAndAddTo(dst, lhs, rhs, Scalar(1)); }
template<typename Dst>
- static EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{ scaleAndAddTo(dst,lhs, rhs, Scalar(1)); }
template<typename Dst>
- static EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{ scaleAndAddTo(dst, lhs, rhs, Scalar(-1)); }
-
+
template<typename Dst>
- static EIGEN_STRONG_INLINE void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
{ Derived::scaleAndAddTo(dst,lhs,rhs,alpha); }
};
@@ -373,8 +373,13 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemvProduct>
typedef typename internal::remove_all<typename internal::conditional<int(Side)==OnTheRight,LhsNested,RhsNested>::type>::type MatrixType;
template<typename Dest>
- static EIGEN_STRONG_INLINE void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
{
+ // Fallback to inner product if both the lhs and rhs is a runtime vector.
+ if (lhs.rows() == 1 && rhs.cols() == 1) {
+ dst.coeffRef(0,0) += alpha * lhs.row(0).conjugate().dot(rhs.col(0));
+ return;
+ }
LhsNested actual_lhs(lhs);
RhsNested actual_rhs(rhs);
internal::gemv_dense_selector<Side,
@@ -385,35 +390,84 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemvProduct>
};
template<typename Lhs, typename Rhs>
-struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode>
+struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode>
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
-
+
template<typename Dst>
- static EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
// Same as: dst.noalias() = lhs.lazyProduct(rhs);
// but easier on the compiler side
call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::assign_op<typename Dst::Scalar,Scalar>());
}
-
+
template<typename Dst>
- static EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
// dst.noalias() += lhs.lazyProduct(rhs);
call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::add_assign_op<typename Dst::Scalar,Scalar>());
}
-
+
template<typename Dst>
- static EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
// dst.noalias() -= lhs.lazyProduct(rhs);
call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::sub_assign_op<typename Dst::Scalar,Scalar>());
}
-
-// template<typename Dst>
-// static inline void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
-// { dst.noalias() += alpha * lhs.lazyProduct(rhs); }
+
+ // This is a special evaluation path called from generic_product_impl<...,GemmProduct> in file GeneralMatrixMatrix.h
+ // This variant tries to extract scalar multiples from both the LHS and RHS and factor them out. For instance:
+ // dst {,+,-}= (s1*A)*(B*s2)
+ // will be rewritten as:
+ // dst {,+,-}= (s1*s2) * (A.lazyProduct(B))
+ // There are at least four benefits of doing so:
+ // 1 - huge performance gain for heap-allocated matrix types as it save costly allocations.
+ // 2 - it is faster than simply by-passing the heap allocation through stack allocation.
+ // 3 - it makes this fallback consistent with the heavy GEMM routine.
+ // 4 - it fully by-passes huge stack allocation attempts when multiplying huge fixed-size matrices.
+ // (see https://stackoverflow.com/questions/54738495)
+ // For small fixed sizes matrices, howver, the gains are less obvious, it is sometimes x2 faster, but sometimes x3 slower,
+ // and the behavior depends also a lot on the compiler... This is why this re-writting strategy is currently
+ // enabled only when falling back from the main GEMM.
+ template<typename Dst, typename Func>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ void eval_dynamic(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Func &func)
+ {
+ enum {
+ HasScalarFactor = blas_traits<Lhs>::HasScalarFactor || blas_traits<Rhs>::HasScalarFactor,
+ ConjLhs = blas_traits<Lhs>::NeedToConjugate,
+ ConjRhs = blas_traits<Rhs>::NeedToConjugate
+ };
+ // FIXME: in c++11 this should be auto, and extractScalarFactor should also return auto
+ // this is important for real*complex_mat
+ Scalar actualAlpha = combine_scalar_factors<Scalar>(lhs, rhs);
+
+ eval_dynamic_impl(dst,
+ blas_traits<Lhs>::extract(lhs).template conjugateIf<ConjLhs>(),
+ blas_traits<Rhs>::extract(rhs).template conjugateIf<ConjRhs>(),
+ func,
+ actualAlpha,
+ typename conditional<HasScalarFactor,true_type,false_type>::type());
+ }
+
+protected:
+
+ template<typename Dst, typename LhsT, typename RhsT, typename Func, typename Scalar>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ void eval_dynamic_impl(Dst& dst, const LhsT& lhs, const RhsT& rhs, const Func &func, const Scalar& s /* == 1 */, false_type)
+ {
+ EIGEN_UNUSED_VARIABLE(s);
+ eigen_internal_assert(s==Scalar(1));
+ call_restricted_packet_assignment_no_alias(dst, lhs.lazyProduct(rhs), func);
+ }
+
+ template<typename Dst, typename LhsT, typename RhsT, typename Func, typename Scalar>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ void eval_dynamic_impl(Dst& dst, const LhsT& lhs, const RhsT& rhs, const Func &func, const Scalar& s, true_type)
+ {
+ call_restricted_packet_assignment_no_alias(dst, s * lhs.lazyProduct(rhs), func);
+ }
};
// This specialization enforces the use of a coefficient-based evaluation strategy
@@ -471,7 +525,7 @@ struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape,
typedef typename internal::nested_eval<Lhs,Rhs::ColsAtCompileTime>::type LhsNested;
typedef typename internal::nested_eval<Rhs,Lhs::RowsAtCompileTime>::type RhsNested;
-
+
typedef typename internal::remove_all<LhsNested>::type LhsNestedCleaned;
typedef typename internal::remove_all<RhsNested>::type RhsNestedCleaned;
@@ -490,19 +544,19 @@ struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape,
typedef typename find_best_packet<Scalar,ColsAtCompileTime>::type RhsVecPacketType;
enum {
-
+
LhsCoeffReadCost = LhsEtorType::CoeffReadCost,
RhsCoeffReadCost = RhsEtorType::CoeffReadCost,
CoeffReadCost = InnerSize==0 ? NumTraits<Scalar>::ReadCost
: InnerSize == Dynamic ? HugeCost
- : InnerSize * (NumTraits<Scalar>::MulCost + LhsCoeffReadCost + RhsCoeffReadCost)
+ : InnerSize * (NumTraits<Scalar>::MulCost + int(LhsCoeffReadCost) + int(RhsCoeffReadCost))
+ (InnerSize - 1) * NumTraits<Scalar>::AddCost,
Unroll = CoeffReadCost <= EIGEN_UNROLLING_LIMIT,
-
+
LhsFlags = LhsEtorType::Flags,
RhsFlags = RhsEtorType::Flags,
-
+
LhsRowMajor = LhsFlags & RowMajorBit,
RhsRowMajor = RhsFlags & RowMajorBit,
@@ -512,7 +566,7 @@ struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape,
// Here, we don't care about alignment larger than the usable packet size.
LhsAlignment = EIGEN_PLAIN_ENUM_MIN(LhsEtorType::Alignment,LhsVecPacketSize*int(sizeof(typename LhsNestedCleaned::Scalar))),
RhsAlignment = EIGEN_PLAIN_ENUM_MIN(RhsEtorType::Alignment,RhsVecPacketSize*int(sizeof(typename RhsNestedCleaned::Scalar))),
-
+
SameType = is_same<typename LhsNestedCleaned::Scalar,typename RhsNestedCleaned::Scalar>::value,
CanVectorizeRhs = bool(RhsRowMajor) && (RhsFlags & PacketAccessBit) && (ColsAtCompileTime!=1),
@@ -522,12 +576,12 @@ struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape,
: (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0
: (bool(RhsRowMajor) && !CanVectorizeLhs),
- Flags = ((unsigned int)(LhsFlags | RhsFlags) & HereditaryBits & ~RowMajorBit)
+ Flags = ((int(LhsFlags) | int(RhsFlags)) & HereditaryBits & ~RowMajorBit)
| (EvalToRowMajor ? RowMajorBit : 0)
// TODO enable vectorization for mixed types
| (SameType && (CanVectorizeLhs || CanVectorizeRhs) ? PacketAccessBit : 0)
| (XprType::IsVectorAtCompileTime ? LinearAccessBit : 0),
-
+
LhsOuterStrideBytes = int(LhsNestedCleaned::OuterStrideAtCompileTime) * int(sizeof(typename LhsNestedCleaned::Scalar)),
RhsOuterStrideBytes = int(RhsNestedCleaned::OuterStrideAtCompileTime) * int(sizeof(typename RhsNestedCleaned::Scalar)),
@@ -543,10 +597,10 @@ struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape,
CanVectorizeInner = SameType
&& LhsRowMajor
&& (!RhsRowMajor)
- && (LhsFlags & RhsFlags & ActualPacketAccessBit)
- && (InnerSize % packet_traits<Scalar>::size == 0)
+ && (int(LhsFlags) & int(RhsFlags) & ActualPacketAccessBit)
+ && (int(InnerSize) % packet_traits<Scalar>::size == 0)
};
-
+
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index row, Index col) const
{
return (m_lhs.row(row).transpose().cwiseProduct( m_rhs.col(col) )).sum();
@@ -556,7 +610,8 @@ struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape,
* which is why we don't set the LinearAccessBit.
* TODO: this seems possible when the result is a vector
*/
- EIGEN_DEVICE_FUNC const CoeffReturnType coeff(Index index) const
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const CoeffReturnType coeff(Index index) const
{
const Index row = (RowsAtCompileTime == 1 || MaxRowsAtCompileTime==1) ? 0 : index;
const Index col = (RowsAtCompileTime == 1 || MaxRowsAtCompileTime==1) ? index : 0;
@@ -564,6 +619,7 @@ struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape,
}
template<int LoadMode, typename PacketType>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const PacketType packet(Index row, Index col) const
{
PacketType res;
@@ -575,6 +631,7 @@ struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape,
}
template<int LoadMode, typename PacketType>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const PacketType packet(Index index) const
{
const Index row = (RowsAtCompileTime == 1 || MaxRowsAtCompileTime==1) ? 0 : index;
@@ -585,7 +642,7 @@ struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape,
protected:
typename internal::add_const_on_value_type<LhsNested>::type m_lhs;
typename internal::add_const_on_value_type<RhsNested>::type m_rhs;
-
+
LhsEtorType m_lhsImpl;
RhsEtorType m_rhsImpl;
@@ -603,7 +660,8 @@ struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, LazyCoeffBasedProduc
enum {
Flags = Base::Flags | EvalBeforeNestingBit
};
- EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit product_evaluator(const XprType& xpr)
: Base(BaseProduct(xpr.lhs(),xpr.rhs()))
{}
};
@@ -615,7 +673,7 @@ struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, LazyCoeffBasedProduc
template<int UnrollingIndex, typename Lhs, typename Rhs, typename Packet, int LoadMode>
struct etor_product_packet_impl<RowMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode>
{
- static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res)
{
etor_product_packet_impl<RowMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, innerDim, res);
res = pmadd(pset1<Packet>(lhs.coeff(row, Index(UnrollingIndex-1))), rhs.template packet<LoadMode,Packet>(Index(UnrollingIndex-1), col), res);
@@ -625,7 +683,7 @@ struct etor_product_packet_impl<RowMajor, UnrollingIndex, Lhs, Rhs, Packet, Load
template<int UnrollingIndex, typename Lhs, typename Rhs, typename Packet, int LoadMode>
struct etor_product_packet_impl<ColMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode>
{
- static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res)
{
etor_product_packet_impl<ColMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, innerDim, res);
res = pmadd(lhs.template packet<LoadMode,Packet>(row, Index(UnrollingIndex-1)), pset1<Packet>(rhs.coeff(Index(UnrollingIndex-1), col)), res);
@@ -635,7 +693,7 @@ struct etor_product_packet_impl<ColMajor, UnrollingIndex, Lhs, Rhs, Packet, Load
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
struct etor_product_packet_impl<RowMajor, 1, Lhs, Rhs, Packet, LoadMode>
{
- static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res)
{
res = pmul(pset1<Packet>(lhs.coeff(row, Index(0))),rhs.template packet<LoadMode,Packet>(Index(0), col));
}
@@ -644,7 +702,7 @@ struct etor_product_packet_impl<RowMajor, 1, Lhs, Rhs, Packet, LoadMode>
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
struct etor_product_packet_impl<ColMajor, 1, Lhs, Rhs, Packet, LoadMode>
{
- static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res)
{
res = pmul(lhs.template packet<LoadMode,Packet>(row, Index(0)), pset1<Packet>(rhs.coeff(Index(0), col)));
}
@@ -653,7 +711,7 @@ struct etor_product_packet_impl<ColMajor, 1, Lhs, Rhs, Packet, LoadMode>
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
struct etor_product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode>
{
- static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Index /*innerDim*/, Packet &res)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Index /*innerDim*/, Packet &res)
{
res = pset1<Packet>(typename unpacket_traits<Packet>::type(0));
}
@@ -662,7 +720,7 @@ struct etor_product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode>
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
struct etor_product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode>
{
- static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Index /*innerDim*/, Packet &res)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Index /*innerDim*/, Packet &res)
{
res = pset1<Packet>(typename unpacket_traits<Packet>::type(0));
}
@@ -671,7 +729,7 @@ struct etor_product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode>
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
struct etor_product_packet_impl<RowMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
{
- static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res)
{
res = pset1<Packet>(typename unpacket_traits<Packet>::type(0));
for(Index i = 0; i < innerDim; ++i)
@@ -682,7 +740,7 @@ struct etor_product_packet_impl<RowMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
struct etor_product_packet_impl<ColMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
{
- static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res)
{
res = pset1<Packet>(typename unpacket_traits<Packet>::type(0));
for(Index i = 0; i < innerDim; ++i)
@@ -704,7 +762,7 @@ struct generic_product_impl<Lhs,Rhs,TriangularShape,DenseShape,ProductTag>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,TriangularShape,DenseShape,ProductTag> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
-
+
template<typename Dest>
static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
{
@@ -718,7 +776,7 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,TriangularShape,ProductTag>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,TriangularShape,ProductTag> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
-
+
template<typename Dest>
static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
{
@@ -739,9 +797,10 @@ struct generic_product_impl<Lhs,Rhs,SelfAdjointShape,DenseShape,ProductTag>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,SelfAdjointShape,DenseShape,ProductTag> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
-
+
template<typename Dest>
- static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
+ static EIGEN_DEVICE_FUNC
+ void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
{
selfadjoint_product_impl<typename Lhs::MatrixType,Lhs::Mode,false,Rhs,0,Rhs::IsVectorAtCompileTime>::run(dst, lhs.nestedExpression(), rhs, alpha);
}
@@ -752,7 +811,7 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,SelfAdjointShape,ProductTag>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,SelfAdjointShape,ProductTag> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
-
+
template<typename Dest>
static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
{
@@ -764,7 +823,7 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,SelfAdjointShape,ProductTag>
/***************************************************************************
* Diagonal products
***************************************************************************/
-
+
template<typename MatrixType, typename DiagonalType, typename Derived, int ProductOrder>
struct diagonal_product_evaluator_base
: evaluator_base<Derived>
@@ -772,34 +831,49 @@ struct diagonal_product_evaluator_base
typedef typename ScalarBinaryOpTraits<typename MatrixType::Scalar, typename DiagonalType::Scalar>::ReturnType Scalar;
public:
enum {
- CoeffReadCost = NumTraits<Scalar>::MulCost + evaluator<MatrixType>::CoeffReadCost + evaluator<DiagonalType>::CoeffReadCost,
-
+ CoeffReadCost = int(NumTraits<Scalar>::MulCost) + int(evaluator<MatrixType>::CoeffReadCost) + int(evaluator<DiagonalType>::CoeffReadCost),
+
MatrixFlags = evaluator<MatrixType>::Flags,
DiagFlags = evaluator<DiagonalType>::Flags,
- _StorageOrder = MatrixFlags & RowMajorBit ? RowMajor : ColMajor,
+
+ _StorageOrder = (Derived::MaxRowsAtCompileTime==1 && Derived::MaxColsAtCompileTime!=1) ? RowMajor
+ : (Derived::MaxColsAtCompileTime==1 && Derived::MaxRowsAtCompileTime!=1) ? ColMajor
+ : MatrixFlags & RowMajorBit ? RowMajor : ColMajor,
+ _SameStorageOrder = _StorageOrder == (MatrixFlags & RowMajorBit ? RowMajor : ColMajor),
+
_ScalarAccessOnDiag = !((int(_StorageOrder) == ColMajor && int(ProductOrder) == OnTheLeft)
||(int(_StorageOrder) == RowMajor && int(ProductOrder) == OnTheRight)),
_SameTypes = is_same<typename MatrixType::Scalar, typename DiagonalType::Scalar>::value,
// FIXME currently we need same types, but in the future the next rule should be the one
//_Vectorizable = bool(int(MatrixFlags)&PacketAccessBit) && ((!_PacketOnDiag) || (_SameTypes && bool(int(DiagFlags)&PacketAccessBit))),
- _Vectorizable = bool(int(MatrixFlags)&PacketAccessBit) && _SameTypes && (_ScalarAccessOnDiag || (bool(int(DiagFlags)&PacketAccessBit))),
+ _Vectorizable = bool(int(MatrixFlags)&PacketAccessBit)
+ && _SameTypes
+ && (_SameStorageOrder || (MatrixFlags&LinearAccessBit)==LinearAccessBit)
+ && (_ScalarAccessOnDiag || (bool(int(DiagFlags)&PacketAccessBit))),
_LinearAccessMask = (MatrixType::RowsAtCompileTime==1 || MatrixType::ColsAtCompileTime==1) ? LinearAccessBit : 0,
Flags = ((HereditaryBits|_LinearAccessMask) & (unsigned int)(MatrixFlags)) | (_Vectorizable ? PacketAccessBit : 0),
- Alignment = evaluator<MatrixType>::Alignment
+ Alignment = evaluator<MatrixType>::Alignment,
+
+ AsScalarProduct = (DiagonalType::SizeAtCompileTime==1)
+ || (DiagonalType::SizeAtCompileTime==Dynamic && MatrixType::RowsAtCompileTime==1 && ProductOrder==OnTheLeft)
+ || (DiagonalType::SizeAtCompileTime==Dynamic && MatrixType::ColsAtCompileTime==1 && ProductOrder==OnTheRight)
};
-
- diagonal_product_evaluator_base(const MatrixType &mat, const DiagonalType &diag)
+
+ EIGEN_DEVICE_FUNC diagonal_product_evaluator_base(const MatrixType &mat, const DiagonalType &diag)
: m_diagImpl(diag), m_matImpl(mat)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(NumTraits<Scalar>::MulCost);
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
-
+
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index idx) const
{
- return m_diagImpl.coeff(idx) * m_matImpl.coeff(idx);
+ if(AsScalarProduct)
+ return m_diagImpl.coeff(0) * m_matImpl.coeff(idx);
+ else
+ return m_diagImpl.coeff(idx) * m_matImpl.coeff(idx);
}
-
+
protected:
template<int LoadMode,typename PacketType>
EIGEN_STRONG_INLINE PacketType packet_impl(Index row, Index col, Index id, internal::true_type) const
@@ -807,7 +881,7 @@ protected:
return internal::pmul(m_matImpl.template packet<LoadMode,PacketType>(row, col),
internal::pset1<PacketType>(m_diagImpl.coeff(id)));
}
-
+
template<int LoadMode,typename PacketType>
EIGEN_STRONG_INLINE PacketType packet_impl(Index row, Index col, Index id, internal::false_type) const
{
@@ -818,7 +892,7 @@ protected:
return internal::pmul(m_matImpl.template packet<LoadMode,PacketType>(row, col),
m_diagImpl.template packet<DiagonalPacketLoadMode,PacketType>(id));
}
-
+
evaluator<DiagonalType> m_diagImpl;
evaluator<MatrixType> m_matImpl;
};
@@ -833,25 +907,25 @@ struct product_evaluator<Product<Lhs, Rhs, ProductKind>, ProductTag, DiagonalSha
using Base::m_matImpl;
using Base::coeff;
typedef typename Base::Scalar Scalar;
-
+
typedef Product<Lhs, Rhs, ProductKind> XprType;
typedef typename XprType::PlainObject PlainObject;
-
- enum {
- StorageOrder = int(Rhs::Flags) & RowMajorBit ? RowMajor : ColMajor
- };
+ typedef typename Lhs::DiagonalVectorType DiagonalType;
+
+
+ enum { StorageOrder = Base::_StorageOrder };
EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr)
: Base(xpr.rhs(), xpr.lhs().diagonal())
{
}
-
+
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const
{
return m_diagImpl.coeff(row) * m_matImpl.coeff(row, col);
}
-
-#ifndef EIGEN_CUDACC
+
+#ifndef EIGEN_GPUCC
template<int LoadMode,typename PacketType>
EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const
{
@@ -860,7 +934,7 @@ struct product_evaluator<Product<Lhs, Rhs, ProductKind>, ProductTag, DiagonalSha
return this->template packet_impl<LoadMode,PacketType>(row,col, row,
typename internal::conditional<int(StorageOrder)==RowMajor, internal::true_type, internal::false_type>::type());
}
-
+
template<int LoadMode,typename PacketType>
EIGEN_STRONG_INLINE PacketType packet(Index idx) const
{
@@ -879,30 +953,30 @@ struct product_evaluator<Product<Lhs, Rhs, ProductKind>, ProductTag, DenseShape,
using Base::m_matImpl;
using Base::coeff;
typedef typename Base::Scalar Scalar;
-
+
typedef Product<Lhs, Rhs, ProductKind> XprType;
typedef typename XprType::PlainObject PlainObject;
-
- enum { StorageOrder = int(Lhs::Flags) & RowMajorBit ? RowMajor : ColMajor };
+
+ enum { StorageOrder = Base::_StorageOrder };
EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr)
: Base(xpr.lhs(), xpr.rhs().diagonal())
{
}
-
+
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const
{
return m_matImpl.coeff(row, col) * m_diagImpl.coeff(col);
}
-
-#ifndef EIGEN_CUDACC
+
+#ifndef EIGEN_GPUCC
template<int LoadMode,typename PacketType>
EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const
{
return this->template packet_impl<LoadMode,PacketType>(row,col, col,
typename internal::conditional<int(StorageOrder)==ColMajor, internal::true_type, internal::false_type>::type());
}
-
+
template<int LoadMode,typename PacketType>
EIGEN_STRONG_INLINE PacketType packet(Index idx) const
{
@@ -930,7 +1004,7 @@ struct permutation_matrix_product<ExpressionType, Side, Transposed, DenseShape>
typedef typename remove_all<MatrixType>::type MatrixTypeCleaned;
template<typename Dest, typename PermutationType>
- static inline void run(Dest& dst, const PermutationType& perm, const ExpressionType& xpr)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Dest& dst, const PermutationType& perm, const ExpressionType& xpr)
{
MatrixType mat(xpr);
const Index n = Side==OnTheLeft ? mat.rows() : mat.cols();
@@ -984,7 +1058,7 @@ template<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape>
struct generic_product_impl<Lhs, Rhs, PermutationShape, MatrixShape, ProductTag>
{
template<typename Dest>
- static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)
{
permutation_matrix_product<Rhs, OnTheLeft, false, MatrixShape>::run(dst, lhs, rhs);
}
@@ -994,7 +1068,7 @@ template<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape>
struct generic_product_impl<Lhs, Rhs, MatrixShape, PermutationShape, ProductTag>
{
template<typename Dest>
- static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)
{
permutation_matrix_product<Lhs, OnTheRight, false, MatrixShape>::run(dst, rhs, lhs);
}
@@ -1004,7 +1078,7 @@ template<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape>
struct generic_product_impl<Inverse<Lhs>, Rhs, PermutationShape, MatrixShape, ProductTag>
{
template<typename Dest>
- static void evalTo(Dest& dst, const Inverse<Lhs>& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Inverse<Lhs>& lhs, const Rhs& rhs)
{
permutation_matrix_product<Rhs, OnTheLeft, true, MatrixShape>::run(dst, lhs.nestedExpression(), rhs);
}
@@ -1014,7 +1088,7 @@ template<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape>
struct generic_product_impl<Lhs, Inverse<Rhs>, MatrixShape, PermutationShape, ProductTag>
{
template<typename Dest>
- static void evalTo(Dest& dst, const Lhs& lhs, const Inverse<Rhs>& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Inverse<Rhs>& rhs)
{
permutation_matrix_product<Lhs, OnTheRight, true, MatrixShape>::run(dst, rhs.nestedExpression(), lhs);
}
@@ -1036,9 +1110,9 @@ struct transposition_matrix_product
{
typedef typename nested_eval<ExpressionType, 1>::type MatrixType;
typedef typename remove_all<MatrixType>::type MatrixTypeCleaned;
-
+
template<typename Dest, typename TranspositionType>
- static inline void run(Dest& dst, const TranspositionType& tr, const ExpressionType& xpr)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Dest& dst, const TranspositionType& tr, const ExpressionType& xpr)
{
MatrixType mat(xpr);
typedef typename TranspositionType::StorageIndex StorageIndex;
@@ -1061,7 +1135,7 @@ template<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape>
struct generic_product_impl<Lhs, Rhs, TranspositionsShape, MatrixShape, ProductTag>
{
template<typename Dest>
- static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)
{
transposition_matrix_product<Rhs, OnTheLeft, false, MatrixShape>::run(dst, lhs, rhs);
}
@@ -1071,7 +1145,7 @@ template<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape>
struct generic_product_impl<Lhs, Rhs, MatrixShape, TranspositionsShape, ProductTag>
{
template<typename Dest>
- static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)
{
transposition_matrix_product<Lhs, OnTheRight, false, MatrixShape>::run(dst, rhs, lhs);
}
@@ -1082,7 +1156,7 @@ template<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape>
struct generic_product_impl<Transpose<Lhs>, Rhs, TranspositionsShape, MatrixShape, ProductTag>
{
template<typename Dest>
- static void evalTo(Dest& dst, const Transpose<Lhs>& lhs, const Rhs& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Transpose<Lhs>& lhs, const Rhs& rhs)
{
transposition_matrix_product<Rhs, OnTheLeft, true, MatrixShape>::run(dst, lhs.nestedExpression(), rhs);
}
@@ -1092,7 +1166,7 @@ template<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape>
struct generic_product_impl<Lhs, Transpose<Rhs>, MatrixShape, TranspositionsShape, ProductTag>
{
template<typename Dest>
- static void evalTo(Dest& dst, const Lhs& lhs, const Transpose<Rhs>& rhs)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Transpose<Rhs>& rhs)
{
transposition_matrix_product<Lhs, OnTheRight, true, MatrixShape>::run(dst, rhs.nestedExpression(), lhs);
}
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/Random.h b/examples/ThirdPartyLibs/Eigen/src/Core/Random.h
index 486e9ed52..dab2ac8e9 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/Random.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/Random.h
@@ -177,6 +177,42 @@ PlainObjectBase<Derived>::setRandom(Index rows, Index cols)
return setRandom();
}
+/** Resizes to the given size, changing only the number of columns, and sets all
+ * coefficients in this expression to random values. For the parameter of type
+ * NoChange_t, just pass the special value \c NoChange.
+ *
+ * Numbers are uniformly spread through their whole definition range for integer types,
+ * and in the [-1:1] range for floating point scalar types.
+ *
+ * \not_reentrant
+ *
+ * \sa DenseBase::setRandom(), setRandom(Index), setRandom(Index, NoChange_t), class CwiseNullaryOp, DenseBase::Random()
+ */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setRandom(NoChange_t, Index cols)
+{
+ return setRandom(rows(), cols);
+}
+
+/** Resizes to the given size, changing only the number of rows, and sets all
+ * coefficients in this expression to random values. For the parameter of type
+ * NoChange_t, just pass the special value \c NoChange.
+ *
+ * Numbers are uniformly spread through their whole definition range for integer types,
+ * and in the [-1:1] range for floating point scalar types.
+ *
+ * \not_reentrant
+ *
+ * \sa DenseBase::setRandom(), setRandom(Index), setRandom(NoChange_t, Index), class CwiseNullaryOp, DenseBase::Random()
+ */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setRandom(Index rows, NoChange_t)
+{
+ return setRandom(rows, cols());
+}
+
} // end namespace Eigen
#endif // EIGEN_RANDOM_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/Redux.h b/examples/ThirdPartyLibs/Eigen/src/Core/Redux.h
index 2b5b73bf7..b6790d110 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/Redux.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/Redux.h
@@ -23,23 +23,29 @@ namespace internal {
* Part 1 : the logic deciding a strategy for vectorization and unrolling
***************************************************************************/
-template<typename Func, typename Derived>
+template<typename Func, typename Evaluator>
struct redux_traits
{
public:
- typedef typename find_best_packet<typename Derived::Scalar,Derived::SizeAtCompileTime>::type PacketType;
+ typedef typename find_best_packet<typename Evaluator::Scalar,Evaluator::SizeAtCompileTime>::type PacketType;
enum {
PacketSize = unpacket_traits<PacketType>::size,
- InnerMaxSize = int(Derived::IsRowMajor)
- ? Derived::MaxColsAtCompileTime
- : Derived::MaxRowsAtCompileTime
+ InnerMaxSize = int(Evaluator::IsRowMajor)
+ ? Evaluator::MaxColsAtCompileTime
+ : Evaluator::MaxRowsAtCompileTime,
+ OuterMaxSize = int(Evaluator::IsRowMajor)
+ ? Evaluator::MaxRowsAtCompileTime
+ : Evaluator::MaxColsAtCompileTime,
+ SliceVectorizedWork = int(InnerMaxSize)==Dynamic ? Dynamic
+ : int(OuterMaxSize)==Dynamic ? (int(InnerMaxSize)>=int(PacketSize) ? Dynamic : 0)
+ : (int(InnerMaxSize)/int(PacketSize)) * int(OuterMaxSize)
};
enum {
- MightVectorize = (int(Derived::Flags)&ActualPacketAccessBit)
+ MightVectorize = (int(Evaluator::Flags)&ActualPacketAccessBit)
&& (functor_traits<Func>::PacketAccess),
- MayLinearVectorize = bool(MightVectorize) && (int(Derived::Flags)&LinearAccessBit),
- MaySliceVectorize = bool(MightVectorize) && int(InnerMaxSize)>=3*PacketSize
+ MayLinearVectorize = bool(MightVectorize) && (int(Evaluator::Flags)&LinearAccessBit),
+ MaySliceVectorize = bool(MightVectorize) && (int(SliceVectorizedWork)==Dynamic || int(SliceVectorizedWork)>=3)
};
public:
@@ -51,8 +57,8 @@ public:
public:
enum {
- Cost = Derived::SizeAtCompileTime == Dynamic ? HugeCost
- : Derived::SizeAtCompileTime * Derived::CoeffReadCost + (Derived::SizeAtCompileTime-1) * functor_traits<Func>::Cost,
+ Cost = Evaluator::SizeAtCompileTime == Dynamic ? HugeCost
+ : int(Evaluator::SizeAtCompileTime) * int(Evaluator::CoeffReadCost) + (Evaluator::SizeAtCompileTime-1) * functor_traits<Func>::Cost,
UnrollingLimit = EIGEN_UNROLLING_LIMIT * (int(Traversal) == int(DefaultTraversal) ? 1 : int(PacketSize))
};
@@ -64,18 +70,20 @@ public:
#ifdef EIGEN_DEBUG_ASSIGN
static void debug()
{
- std::cerr << "Xpr: " << typeid(typename Derived::XprType).name() << std::endl;
+ std::cerr << "Xpr: " << typeid(typename Evaluator::XprType).name() << std::endl;
std::cerr.setf(std::ios::hex, std::ios::basefield);
- EIGEN_DEBUG_VAR(Derived::Flags)
+ EIGEN_DEBUG_VAR(Evaluator::Flags)
std::cerr.unsetf(std::ios::hex);
EIGEN_DEBUG_VAR(InnerMaxSize)
+ EIGEN_DEBUG_VAR(OuterMaxSize)
+ EIGEN_DEBUG_VAR(SliceVectorizedWork)
EIGEN_DEBUG_VAR(PacketSize)
EIGEN_DEBUG_VAR(MightVectorize)
EIGEN_DEBUG_VAR(MayLinearVectorize)
EIGEN_DEBUG_VAR(MaySliceVectorize)
- EIGEN_DEBUG_VAR(Traversal)
+ std::cerr << "Traversal" << " = " << Traversal << " (" << demangle_traversal(Traversal) << ")" << std::endl;
EIGEN_DEBUG_VAR(UnrollingLimit)
- EIGEN_DEBUG_VAR(Unrolling)
+ std::cerr << "Unrolling" << " = " << Unrolling << " (" << demangle_unrolling(Unrolling) << ")" << std::endl;
std::cerr << std::endl;
}
#endif
@@ -87,88 +95,86 @@ public:
/*** no vectorization ***/
-template<typename Func, typename Derived, int Start, int Length>
+template<typename Func, typename Evaluator, int Start, int Length>
struct redux_novec_unroller
{
enum {
HalfLength = Length/2
};
- typedef typename Derived::Scalar Scalar;
+ typedef typename Evaluator::Scalar Scalar;
EIGEN_DEVICE_FUNC
- static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func& func)
+ static EIGEN_STRONG_INLINE Scalar run(const Evaluator &eval, const Func& func)
{
- return func(redux_novec_unroller<Func, Derived, Start, HalfLength>::run(mat,func),
- redux_novec_unroller<Func, Derived, Start+HalfLength, Length-HalfLength>::run(mat,func));
+ return func(redux_novec_unroller<Func, Evaluator, Start, HalfLength>::run(eval,func),
+ redux_novec_unroller<Func, Evaluator, Start+HalfLength, Length-HalfLength>::run(eval,func));
}
};
-template<typename Func, typename Derived, int Start>
-struct redux_novec_unroller<Func, Derived, Start, 1>
+template<typename Func, typename Evaluator, int Start>
+struct redux_novec_unroller<Func, Evaluator, Start, 1>
{
enum {
- outer = Start / Derived::InnerSizeAtCompileTime,
- inner = Start % Derived::InnerSizeAtCompileTime
+ outer = Start / Evaluator::InnerSizeAtCompileTime,
+ inner = Start % Evaluator::InnerSizeAtCompileTime
};
- typedef typename Derived::Scalar Scalar;
+ typedef typename Evaluator::Scalar Scalar;
EIGEN_DEVICE_FUNC
- static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func&)
+ static EIGEN_STRONG_INLINE Scalar run(const Evaluator &eval, const Func&)
{
- return mat.coeffByOuterInner(outer, inner);
+ return eval.coeffByOuterInner(outer, inner);
}
};
// This is actually dead code and will never be called. It is required
// to prevent false warnings regarding failed inlining though
// for 0 length run() will never be called at all.
-template<typename Func, typename Derived, int Start>
-struct redux_novec_unroller<Func, Derived, Start, 0>
+template<typename Func, typename Evaluator, int Start>
+struct redux_novec_unroller<Func, Evaluator, Start, 0>
{
- typedef typename Derived::Scalar Scalar;
+ typedef typename Evaluator::Scalar Scalar;
EIGEN_DEVICE_FUNC
- static EIGEN_STRONG_INLINE Scalar run(const Derived&, const Func&) { return Scalar(); }
+ static EIGEN_STRONG_INLINE Scalar run(const Evaluator&, const Func&) { return Scalar(); }
};
/*** vectorization ***/
-template<typename Func, typename Derived, int Start, int Length>
+template<typename Func, typename Evaluator, int Start, int Length>
struct redux_vec_unroller
{
- enum {
- PacketSize = redux_traits<Func, Derived>::PacketSize,
- HalfLength = Length/2
- };
-
- typedef typename Derived::Scalar Scalar;
- typedef typename redux_traits<Func, Derived>::PacketType PacketScalar;
-
- static EIGEN_STRONG_INLINE PacketScalar run(const Derived &mat, const Func& func)
+ template<typename PacketType>
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE PacketType run(const Evaluator &eval, const Func& func)
{
+ enum {
+ PacketSize = unpacket_traits<PacketType>::size,
+ HalfLength = Length/2
+ };
+
return func.packetOp(
- redux_vec_unroller<Func, Derived, Start, HalfLength>::run(mat,func),
- redux_vec_unroller<Func, Derived, Start+HalfLength, Length-HalfLength>::run(mat,func) );
+ redux_vec_unroller<Func, Evaluator, Start, HalfLength>::template run<PacketType>(eval,func),
+ redux_vec_unroller<Func, Evaluator, Start+HalfLength, Length-HalfLength>::template run<PacketType>(eval,func) );
}
};
-template<typename Func, typename Derived, int Start>
-struct redux_vec_unroller<Func, Derived, Start, 1>
+template<typename Func, typename Evaluator, int Start>
+struct redux_vec_unroller<Func, Evaluator, Start, 1>
{
- enum {
- index = Start * redux_traits<Func, Derived>::PacketSize,
- outer = index / int(Derived::InnerSizeAtCompileTime),
- inner = index % int(Derived::InnerSizeAtCompileTime),
- alignment = Derived::Alignment
- };
-
- typedef typename Derived::Scalar Scalar;
- typedef typename redux_traits<Func, Derived>::PacketType PacketScalar;
-
- static EIGEN_STRONG_INLINE PacketScalar run(const Derived &mat, const Func&)
+ template<typename PacketType>
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE PacketType run(const Evaluator &eval, const Func&)
{
- return mat.template packetByOuterInner<alignment,PacketScalar>(outer, inner);
+ enum {
+ PacketSize = unpacket_traits<PacketType>::size,
+ index = Start * PacketSize,
+ outer = index / int(Evaluator::InnerSizeAtCompileTime),
+ inner = index % int(Evaluator::InnerSizeAtCompileTime),
+ alignment = Evaluator::Alignment
+ };
+ return eval.template packetByOuterInner<alignment,PacketType>(outer, inner);
}
};
@@ -176,53 +182,65 @@ struct redux_vec_unroller<Func, Derived, Start, 1>
* Part 3 : implementation of all cases
***************************************************************************/
-template<typename Func, typename Derived,
- int Traversal = redux_traits<Func, Derived>::Traversal,
- int Unrolling = redux_traits<Func, Derived>::Unrolling
+template<typename Func, typename Evaluator,
+ int Traversal = redux_traits<Func, Evaluator>::Traversal,
+ int Unrolling = redux_traits<Func, Evaluator>::Unrolling
>
struct redux_impl;
-template<typename Func, typename Derived>
-struct redux_impl<Func, Derived, DefaultTraversal, NoUnrolling>
+template<typename Func, typename Evaluator>
+struct redux_impl<Func, Evaluator, DefaultTraversal, NoUnrolling>
{
- typedef typename Derived::Scalar Scalar;
- EIGEN_DEVICE_FUNC
- static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func& func)
+ typedef typename Evaluator::Scalar Scalar;
+
+ template<typename XprType>
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE
+ Scalar run(const Evaluator &eval, const Func& func, const XprType& xpr)
{
- eigen_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix");
+ eigen_assert(xpr.rows()>0 && xpr.cols()>0 && "you are using an empty matrix");
Scalar res;
- res = mat.coeffByOuterInner(0, 0);
- for(Index i = 1; i < mat.innerSize(); ++i)
- res = func(res, mat.coeffByOuterInner(0, i));
- for(Index i = 1; i < mat.outerSize(); ++i)
- for(Index j = 0; j < mat.innerSize(); ++j)
- res = func(res, mat.coeffByOuterInner(i, j));
+ res = eval.coeffByOuterInner(0, 0);
+ for(Index i = 1; i < xpr.innerSize(); ++i)
+ res = func(res, eval.coeffByOuterInner(0, i));
+ for(Index i = 1; i < xpr.outerSize(); ++i)
+ for(Index j = 0; j < xpr.innerSize(); ++j)
+ res = func(res, eval.coeffByOuterInner(i, j));
return res;
}
};
-template<typename Func, typename Derived>
-struct redux_impl<Func,Derived, DefaultTraversal, CompleteUnrolling>
- : public redux_novec_unroller<Func,Derived, 0, Derived::SizeAtCompileTime>
-{};
+template<typename Func, typename Evaluator>
+struct redux_impl<Func,Evaluator, DefaultTraversal, CompleteUnrolling>
+ : redux_novec_unroller<Func,Evaluator, 0, Evaluator::SizeAtCompileTime>
+{
+ typedef redux_novec_unroller<Func,Evaluator, 0, Evaluator::SizeAtCompileTime> Base;
+ typedef typename Evaluator::Scalar Scalar;
+ template<typename XprType>
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE
+ Scalar run(const Evaluator &eval, const Func& func, const XprType& /*xpr*/)
+ {
+ return Base::run(eval,func);
+ }
+};
-template<typename Func, typename Derived>
-struct redux_impl<Func, Derived, LinearVectorizedTraversal, NoUnrolling>
+template<typename Func, typename Evaluator>
+struct redux_impl<Func, Evaluator, LinearVectorizedTraversal, NoUnrolling>
{
- typedef typename Derived::Scalar Scalar;
- typedef typename redux_traits<Func, Derived>::PacketType PacketScalar;
+ typedef typename Evaluator::Scalar Scalar;
+ typedef typename redux_traits<Func, Evaluator>::PacketType PacketScalar;
- static Scalar run(const Derived &mat, const Func& func)
+ template<typename XprType>
+ static Scalar run(const Evaluator &eval, const Func& func, const XprType& xpr)
{
- const Index size = mat.size();
+ const Index size = xpr.size();
- const Index packetSize = redux_traits<Func, Derived>::PacketSize;
+ const Index packetSize = redux_traits<Func, Evaluator>::PacketSize;
const int packetAlignment = unpacket_traits<PacketScalar>::alignment;
enum {
- alignment0 = (bool(Derived::Flags & DirectAccessBit) && bool(packet_traits<Scalar>::AlignedOnScalar)) ? int(packetAlignment) : int(Unaligned),
- alignment = EIGEN_PLAIN_ENUM_MAX(alignment0, Derived::Alignment)
+ alignment0 = (bool(Evaluator::Flags & DirectAccessBit) && bool(packet_traits<Scalar>::AlignedOnScalar)) ? int(packetAlignment) : int(Unaligned),
+ alignment = EIGEN_PLAIN_ENUM_MAX(alignment0, Evaluator::Alignment)
};
- const Index alignedStart = internal::first_default_aligned(mat.nestedExpression());
+ const Index alignedStart = internal::first_default_aligned(xpr);
const Index alignedSize2 = ((size-alignedStart)/(2*packetSize))*(2*packetSize);
const Index alignedSize = ((size-alignedStart)/(packetSize))*(packetSize);
const Index alignedEnd2 = alignedStart + alignedSize2;
@@ -230,34 +248,34 @@ struct redux_impl<Func, Derived, LinearVectorizedTraversal, NoUnrolling>
Scalar res;
if(alignedSize)
{
- PacketScalar packet_res0 = mat.template packet<alignment,PacketScalar>(alignedStart);
+ PacketScalar packet_res0 = eval.template packet<alignment,PacketScalar>(alignedStart);
if(alignedSize>packetSize) // we have at least two packets to partly unroll the loop
{
- PacketScalar packet_res1 = mat.template packet<alignment,PacketScalar>(alignedStart+packetSize);
+ PacketScalar packet_res1 = eval.template packet<alignment,PacketScalar>(alignedStart+packetSize);
for(Index index = alignedStart + 2*packetSize; index < alignedEnd2; index += 2*packetSize)
{
- packet_res0 = func.packetOp(packet_res0, mat.template packet<alignment,PacketScalar>(index));
- packet_res1 = func.packetOp(packet_res1, mat.template packet<alignment,PacketScalar>(index+packetSize));
+ packet_res0 = func.packetOp(packet_res0, eval.template packet<alignment,PacketScalar>(index));
+ packet_res1 = func.packetOp(packet_res1, eval.template packet<alignment,PacketScalar>(index+packetSize));
}
packet_res0 = func.packetOp(packet_res0,packet_res1);
if(alignedEnd>alignedEnd2)
- packet_res0 = func.packetOp(packet_res0, mat.template packet<alignment,PacketScalar>(alignedEnd2));
+ packet_res0 = func.packetOp(packet_res0, eval.template packet<alignment,PacketScalar>(alignedEnd2));
}
res = func.predux(packet_res0);
for(Index index = 0; index < alignedStart; ++index)
- res = func(res,mat.coeff(index));
+ res = func(res,eval.coeff(index));
for(Index index = alignedEnd; index < size; ++index)
- res = func(res,mat.coeff(index));
+ res = func(res,eval.coeff(index));
}
else // too small to vectorize anything.
// since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize.
{
- res = mat.coeff(0);
+ res = eval.coeff(0);
for(Index index = 1; index < size; ++index)
- res = func(res,mat.coeff(index));
+ res = func(res,eval.coeff(index));
}
return res;
@@ -265,130 +283,108 @@ struct redux_impl<Func, Derived, LinearVectorizedTraversal, NoUnrolling>
};
// NOTE: for SliceVectorizedTraversal we simply bypass unrolling
-template<typename Func, typename Derived, int Unrolling>
-struct redux_impl<Func, Derived, SliceVectorizedTraversal, Unrolling>
+template<typename Func, typename Evaluator, int Unrolling>
+struct redux_impl<Func, Evaluator, SliceVectorizedTraversal, Unrolling>
{
- typedef typename Derived::Scalar Scalar;
- typedef typename redux_traits<Func, Derived>::PacketType PacketType;
+ typedef typename Evaluator::Scalar Scalar;
+ typedef typename redux_traits<Func, Evaluator>::PacketType PacketType;
- EIGEN_DEVICE_FUNC static Scalar run(const Derived &mat, const Func& func)
+ template<typename XprType>
+ EIGEN_DEVICE_FUNC static Scalar run(const Evaluator &eval, const Func& func, const XprType& xpr)
{
- eigen_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix");
- const Index innerSize = mat.innerSize();
- const Index outerSize = mat.outerSize();
+ eigen_assert(xpr.rows()>0 && xpr.cols()>0 && "you are using an empty matrix");
+ const Index innerSize = xpr.innerSize();
+ const Index outerSize = xpr.outerSize();
enum {
- packetSize = redux_traits<Func, Derived>::PacketSize
+ packetSize = redux_traits<Func, Evaluator>::PacketSize
};
const Index packetedInnerSize = ((innerSize)/packetSize)*packetSize;
Scalar res;
if(packetedInnerSize)
{
- PacketType packet_res = mat.template packet<Unaligned,PacketType>(0,0);
+ PacketType packet_res = eval.template packet<Unaligned,PacketType>(0,0);
for(Index j=0; j<outerSize; ++j)
for(Index i=(j==0?packetSize:0); i<packetedInnerSize; i+=Index(packetSize))
- packet_res = func.packetOp(packet_res, mat.template packetByOuterInner<Unaligned,PacketType>(j,i));
+ packet_res = func.packetOp(packet_res, eval.template packetByOuterInner<Unaligned,PacketType>(j,i));
res = func.predux(packet_res);
for(Index j=0; j<outerSize; ++j)
for(Index i=packetedInnerSize; i<innerSize; ++i)
- res = func(res, mat.coeffByOuterInner(j,i));
+ res = func(res, eval.coeffByOuterInner(j,i));
}
else // too small to vectorize anything.
// since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize.
{
- res = redux_impl<Func, Derived, DefaultTraversal, NoUnrolling>::run(mat, func);
+ res = redux_impl<Func, Evaluator, DefaultTraversal, NoUnrolling>::run(eval, func, xpr);
}
return res;
}
};
-template<typename Func, typename Derived>
-struct redux_impl<Func, Derived, LinearVectorizedTraversal, CompleteUnrolling>
+template<typename Func, typename Evaluator>
+struct redux_impl<Func, Evaluator, LinearVectorizedTraversal, CompleteUnrolling>
{
- typedef typename Derived::Scalar Scalar;
+ typedef typename Evaluator::Scalar Scalar;
- typedef typename redux_traits<Func, Derived>::PacketType PacketScalar;
+ typedef typename redux_traits<Func, Evaluator>::PacketType PacketType;
enum {
- PacketSize = redux_traits<Func, Derived>::PacketSize,
- Size = Derived::SizeAtCompileTime,
- VectorizedSize = (Size / PacketSize) * PacketSize
+ PacketSize = redux_traits<Func, Evaluator>::PacketSize,
+ Size = Evaluator::SizeAtCompileTime,
+ VectorizedSize = (int(Size) / int(PacketSize)) * int(PacketSize)
};
- EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func& func)
+
+ template<typename XprType>
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE
+ Scalar run(const Evaluator &eval, const Func& func, const XprType &xpr)
{
- eigen_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix");
+ EIGEN_ONLY_USED_FOR_DEBUG(xpr)
+ eigen_assert(xpr.rows()>0 && xpr.cols()>0 && "you are using an empty matrix");
if (VectorizedSize > 0) {
- Scalar res = func.predux(redux_vec_unroller<Func, Derived, 0, Size / PacketSize>::run(mat,func));
+ Scalar res = func.predux(redux_vec_unroller<Func, Evaluator, 0, Size / PacketSize>::template run<PacketType>(eval,func));
if (VectorizedSize != Size)
- res = func(res,redux_novec_unroller<Func, Derived, VectorizedSize, Size-VectorizedSize>::run(mat,func));
+ res = func(res,redux_novec_unroller<Func, Evaluator, VectorizedSize, Size-VectorizedSize>::run(eval,func));
return res;
}
else {
- return redux_novec_unroller<Func, Derived, 0, Size>::run(mat,func);
+ return redux_novec_unroller<Func, Evaluator, 0, Size>::run(eval,func);
}
}
};
// evaluator adaptor
template<typename _XprType>
-class redux_evaluator
+class redux_evaluator : public internal::evaluator<_XprType>
{
+ typedef internal::evaluator<_XprType> Base;
public:
typedef _XprType XprType;
- EIGEN_DEVICE_FUNC explicit redux_evaluator(const XprType &xpr) : m_evaluator(xpr), m_xpr(xpr) {}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ explicit redux_evaluator(const XprType &xpr) : Base(xpr) {}
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename XprType::PacketScalar PacketScalar;
- typedef typename XprType::PacketReturnType PacketReturnType;
enum {
MaxRowsAtCompileTime = XprType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = XprType::MaxColsAtCompileTime,
// TODO we should not remove DirectAccessBit and rather find an elegant way to query the alignment offset at runtime from the evaluator
- Flags = evaluator<XprType>::Flags & ~DirectAccessBit,
+ Flags = Base::Flags & ~DirectAccessBit,
IsRowMajor = XprType::IsRowMajor,
SizeAtCompileTime = XprType::SizeAtCompileTime,
- InnerSizeAtCompileTime = XprType::InnerSizeAtCompileTime,
- CoeffReadCost = evaluator<XprType>::CoeffReadCost,
- Alignment = evaluator<XprType>::Alignment
+ InnerSizeAtCompileTime = XprType::InnerSizeAtCompileTime
};
- EIGEN_DEVICE_FUNC Index rows() const { return m_xpr.rows(); }
- EIGEN_DEVICE_FUNC Index cols() const { return m_xpr.cols(); }
- EIGEN_DEVICE_FUNC Index size() const { return m_xpr.size(); }
- EIGEN_DEVICE_FUNC Index innerSize() const { return m_xpr.innerSize(); }
- EIGEN_DEVICE_FUNC Index outerSize() const { return m_xpr.outerSize(); }
-
- EIGEN_DEVICE_FUNC
- CoeffReturnType coeff(Index row, Index col) const
- { return m_evaluator.coeff(row, col); }
-
- EIGEN_DEVICE_FUNC
- CoeffReturnType coeff(Index index) const
- { return m_evaluator.coeff(index); }
-
- template<int LoadMode, typename PacketType>
- PacketType packet(Index row, Index col) const
- { return m_evaluator.template packet<LoadMode,PacketType>(row, col); }
-
- template<int LoadMode, typename PacketType>
- PacketType packet(Index index) const
- { return m_evaluator.template packet<LoadMode,PacketType>(index); }
-
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeffByOuterInner(Index outer, Index inner) const
- { return m_evaluator.coeff(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); }
+ { return Base::coeff(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); }
template<int LoadMode, typename PacketType>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
PacketType packetByOuterInner(Index outer, Index inner) const
- { return m_evaluator.template packet<LoadMode,PacketType>(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); }
-
- const XprType & nestedExpression() const { return m_xpr; }
+ { return Base::template packet<LoadMode,PacketType>(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); }
-protected:
- internal::evaluator<XprType> m_evaluator;
- const XprType &m_xpr;
};
} // end namespace internal
@@ -403,39 +399,53 @@ protected:
* The template parameter \a BinaryOp is the type of the functor \a func which must be
* an associative operator. Both current C++98 and C++11 functor styles are handled.
*
+ * \warning the matrix must be not empty, otherwise an assertion is triggered.
+ *
* \sa DenseBase::sum(), DenseBase::minCoeff(), DenseBase::maxCoeff(), MatrixBase::colwise(), MatrixBase::rowwise()
*/
template<typename Derived>
template<typename Func>
-EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
DenseBase<Derived>::redux(const Func& func) const
{
eigen_assert(this->rows()>0 && this->cols()>0 && "you are using an empty matrix");
typedef typename internal::redux_evaluator<Derived> ThisEvaluator;
ThisEvaluator thisEval(derived());
-
- return internal::redux_impl<Func, ThisEvaluator>::run(thisEval, func);
+
+ // The initial expression is passed to the reducer as an additional argument instead of
+ // passing it as a member of redux_evaluator to help
+ return internal::redux_impl<Func, ThisEvaluator>::run(thisEval, func, derived());
}
/** \returns the minimum of all coefficients of \c *this.
- * \warning the result is undefined if \c *this contains NaN.
+ * In case \c *this contains NaN, NaNPropagation determines the behavior:
+ * NaNPropagation == PropagateFast : undefined
+ * NaNPropagation == PropagateNaN : result is NaN
+ * NaNPropagation == PropagateNumbers : result is minimum of elements that are not NaN
+ * \warning the matrix must be not empty, otherwise an assertion is triggered.
*/
template<typename Derived>
+template<int NaNPropagation>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
DenseBase<Derived>::minCoeff() const
{
- return derived().redux(Eigen::internal::scalar_min_op<Scalar,Scalar>());
+ return derived().redux(Eigen::internal::scalar_min_op<Scalar,Scalar, NaNPropagation>());
}
-/** \returns the maximum of all coefficients of \c *this.
- * \warning the result is undefined if \c *this contains NaN.
+/** \returns the maximum of all coefficients of \c *this.
+ * In case \c *this contains NaN, NaNPropagation determines the behavior:
+ * NaNPropagation == PropagateFast : undefined
+ * NaNPropagation == PropagateNaN : result is NaN
+ * NaNPropagation == PropagateNumbers : result is maximum of elements that are not NaN
+ * \warning the matrix must be not empty, otherwise an assertion is triggered.
*/
template<typename Derived>
+template<int NaNPropagation>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
DenseBase<Derived>::maxCoeff() const
{
- return derived().redux(Eigen::internal::scalar_max_op<Scalar,Scalar>());
+ return derived().redux(Eigen::internal::scalar_max_op<Scalar,Scalar, NaNPropagation>());
}
/** \returns the sum of all coefficients of \c *this
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/Ref.h b/examples/ThirdPartyLibs/Eigen/src/Core/Ref.h
index abb1e5121..c2a37eadb 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/Ref.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/Ref.h
@@ -10,7 +10,7 @@
#ifndef EIGEN_REF_H
#define EIGEN_REF_H
-namespace Eigen {
+namespace Eigen {
namespace internal {
@@ -28,12 +28,13 @@ struct traits<Ref<_PlainObjectType, _Options, _StrideType> >
template<typename Derived> struct match {
enum {
+ IsVectorAtCompileTime = PlainObjectType::IsVectorAtCompileTime || Derived::IsVectorAtCompileTime,
HasDirectAccess = internal::has_direct_access<Derived>::ret,
- StorageOrderMatch = PlainObjectType::IsVectorAtCompileTime || Derived::IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)),
+ StorageOrderMatch = IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)),
InnerStrideMatch = int(StrideType::InnerStrideAtCompileTime)==int(Dynamic)
|| int(StrideType::InnerStrideAtCompileTime)==int(Derived::InnerStrideAtCompileTime)
|| (int(StrideType::InnerStrideAtCompileTime)==0 && int(Derived::InnerStrideAtCompileTime)==1),
- OuterStrideMatch = Derived::IsVectorAtCompileTime
+ OuterStrideMatch = IsVectorAtCompileTime
|| int(StrideType::OuterStrideAtCompileTime)==int(Dynamic) || int(StrideType::OuterStrideAtCompileTime)==int(Derived::OuterStrideAtCompileTime),
// NOTE, this indirection of evaluator<Derived>::Alignment is needed
// to workaround a very strange bug in MSVC related to the instantiation
@@ -47,7 +48,7 @@ struct traits<Ref<_PlainObjectType, _Options, _StrideType> >
};
typedef typename internal::conditional<MatchAtCompileTime,internal::true_type,internal::false_type>::type type;
};
-
+
};
template<typename Derived>
@@ -66,12 +67,12 @@ public:
typedef MapBase<Derived> Base;
EIGEN_DENSE_PUBLIC_INTERFACE(RefBase)
- EIGEN_DEVICE_FUNC inline Index innerStride() const
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const
{
return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1;
}
- EIGEN_DEVICE_FUNC inline Index outerStride() const
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const
{
return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer()
: IsVectorAtCompileTime ? this->size()
@@ -85,34 +86,122 @@ public:
m_stride(StrideType::OuterStrideAtCompileTime==Dynamic?0:StrideType::OuterStrideAtCompileTime,
StrideType::InnerStrideAtCompileTime==Dynamic?0:StrideType::InnerStrideAtCompileTime)
{}
-
+
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(RefBase)
protected:
typedef Stride<StrideType::OuterStrideAtCompileTime,StrideType::InnerStrideAtCompileTime> StrideBase;
+ // Resolves inner stride if default 0.
+ static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index resolveInnerStride(Index inner) {
+ return inner == 0 ? 1 : inner;
+ }
+
+ // Resolves outer stride if default 0.
+ static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index resolveOuterStride(Index inner, Index outer, Index rows, Index cols, bool isVectorAtCompileTime, bool isRowMajor) {
+ return outer == 0 ? isVectorAtCompileTime ? inner * rows * cols : isRowMajor ? inner * cols : inner * rows : outer;
+ }
+
+ // Returns true if construction is valid, false if there is a stride mismatch,
+ // and fails if there is a size mismatch.
template<typename Expression>
- EIGEN_DEVICE_FUNC void construct(Expression& expr)
+ EIGEN_DEVICE_FUNC bool construct(Expression& expr)
{
+ // Check matrix sizes. If this is a compile-time vector, we do allow
+ // implicitly transposing.
+ EIGEN_STATIC_ASSERT(
+ EIGEN_PREDICATE_SAME_MATRIX_SIZE(PlainObjectType, Expression)
+ // If it is a vector, the transpose sizes might match.
+ || ( PlainObjectType::IsVectorAtCompileTime
+ && ((int(PlainObjectType::RowsAtCompileTime)==Eigen::Dynamic
+ || int(Expression::ColsAtCompileTime)==Eigen::Dynamic
+ || int(PlainObjectType::RowsAtCompileTime)==int(Expression::ColsAtCompileTime))
+ && (int(PlainObjectType::ColsAtCompileTime)==Eigen::Dynamic
+ || int(Expression::RowsAtCompileTime)==Eigen::Dynamic
+ || int(PlainObjectType::ColsAtCompileTime)==int(Expression::RowsAtCompileTime)))),
+ YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES
+ )
+
+ // Determine runtime rows and columns.
+ Index rows = expr.rows();
+ Index cols = expr.cols();
if(PlainObjectType::RowsAtCompileTime==1)
{
eigen_assert(expr.rows()==1 || expr.cols()==1);
- ::new (static_cast<Base*>(this)) Base(expr.data(), 1, expr.size());
+ rows = 1;
+ cols = expr.size();
}
else if(PlainObjectType::ColsAtCompileTime==1)
{
eigen_assert(expr.rows()==1 || expr.cols()==1);
- ::new (static_cast<Base*>(this)) Base(expr.data(), expr.size(), 1);
+ rows = expr.size();
+ cols = 1;
+ }
+ // Verify that the sizes are valid.
+ eigen_assert(
+ (PlainObjectType::RowsAtCompileTime == Dynamic) || (PlainObjectType::RowsAtCompileTime == rows));
+ eigen_assert(
+ (PlainObjectType::ColsAtCompileTime == Dynamic) || (PlainObjectType::ColsAtCompileTime == cols));
+
+
+ // If this is a vector, we might be transposing, which means that stride should swap.
+ const bool transpose = PlainObjectType::IsVectorAtCompileTime && (rows != expr.rows());
+ // If the storage format differs, we also need to swap the stride.
+ const bool row_major = ((PlainObjectType::Flags)&RowMajorBit) != 0;
+ const bool expr_row_major = (Expression::Flags&RowMajorBit) != 0;
+ const bool storage_differs = (row_major != expr_row_major);
+
+ const bool swap_stride = (transpose != storage_differs);
+
+ // Determine expr's actual strides, resolving any defaults if zero.
+ const Index expr_inner_actual = resolveInnerStride(expr.innerStride());
+ const Index expr_outer_actual = resolveOuterStride(expr_inner_actual,
+ expr.outerStride(),
+ expr.rows(),
+ expr.cols(),
+ Expression::IsVectorAtCompileTime != 0,
+ expr_row_major);
+
+ // If this is a column-major row vector or row-major column vector, the inner-stride
+ // is arbitrary, so set it to either the compile-time inner stride or 1.
+ const bool row_vector = (rows == 1);
+ const bool col_vector = (cols == 1);
+ const Index inner_stride =
+ ( (!row_major && row_vector) || (row_major && col_vector) ) ?
+ ( StrideType::InnerStrideAtCompileTime > 0 ? Index(StrideType::InnerStrideAtCompileTime) : 1)
+ : swap_stride ? expr_outer_actual : expr_inner_actual;
+
+ // If this is a column-major column vector or row-major row vector, the outer-stride
+ // is arbitrary, so set it to either the compile-time outer stride or vector size.
+ const Index outer_stride =
+ ( (!row_major && col_vector) || (row_major && row_vector) ) ?
+ ( StrideType::OuterStrideAtCompileTime > 0 ? Index(StrideType::OuterStrideAtCompileTime) : rows * cols * inner_stride)
+ : swap_stride ? expr_inner_actual : expr_outer_actual;
+
+ // Check if given inner/outer strides are compatible with compile-time strides.
+ const bool inner_valid = (StrideType::InnerStrideAtCompileTime == Dynamic)
+ || (resolveInnerStride(Index(StrideType::InnerStrideAtCompileTime)) == inner_stride);
+ if (!inner_valid) {
+ return false;
}
- else
- ::new (static_cast<Base*>(this)) Base(expr.data(), expr.rows(), expr.cols());
-
- if(Expression::IsVectorAtCompileTime && (!PlainObjectType::IsVectorAtCompileTime) && ((Expression::Flags&RowMajorBit)!=(PlainObjectType::Flags&RowMajorBit)))
- ::new (&m_stride) StrideBase(expr.innerStride(), StrideType::InnerStrideAtCompileTime==0?0:1);
- else
- ::new (&m_stride) StrideBase(StrideType::OuterStrideAtCompileTime==0?0:expr.outerStride(),
- StrideType::InnerStrideAtCompileTime==0?0:expr.innerStride());
+
+ const bool outer_valid = (StrideType::OuterStrideAtCompileTime == Dynamic)
+ || (resolveOuterStride(
+ inner_stride,
+ Index(StrideType::OuterStrideAtCompileTime),
+ rows, cols, PlainObjectType::IsVectorAtCompileTime != 0,
+ row_major)
+ == outer_stride);
+ if (!outer_valid) {
+ return false;
+ }
+
+ ::new (static_cast<Base*>(this)) Base(expr.data(), rows, cols);
+ ::new (&m_stride) StrideBase(
+ (StrideType::OuterStrideAtCompileTime == 0) ? 0 : outer_stride,
+ (StrideType::InnerStrideAtCompileTime == 0) ? 0 : inner_stride );
+ return true;
}
StrideBase m_stride;
@@ -209,7 +298,10 @@ template<typename PlainObjectType, int Options, typename StrideType> class Ref
typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0)
{
EIGEN_STATIC_ASSERT(bool(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
- Base::construct(expr.derived());
+ // Construction must pass since we will not create temprary storage in the non-const case.
+ const bool success = Base::construct(expr.derived());
+ EIGEN_UNUSED_VARIABLE(success)
+ eigen_assert(success);
}
template<typename Derived>
EIGEN_DEVICE_FUNC inline Ref(const DenseBase<Derived>& expr,
@@ -223,7 +315,10 @@ template<typename PlainObjectType, int Options, typename StrideType> class Ref
EIGEN_STATIC_ASSERT(bool(internal::is_lvalue<Derived>::value), THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);
EIGEN_STATIC_ASSERT(bool(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
EIGEN_STATIC_ASSERT(!Derived::IsPlainObjectBase,THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);
- Base::construct(expr.const_cast_derived());
+ // Construction must pass since we will not create temporary storage in the non-const case.
+ const bool success = Base::construct(expr.const_cast_derived());
+ EIGEN_UNUSED_VARIABLE(success)
+ eigen_assert(success);
}
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Ref)
@@ -264,7 +359,10 @@ template<typename TPlainObjectType, int Options, typename StrideType> class Ref<
template<typename Expression>
EIGEN_DEVICE_FUNC void construct(const Expression& expr,internal::true_type)
{
- Base::construct(expr);
+ // Check if we can use the underlying expr's storage directly, otherwise call the copy version.
+ if (!Base::construct(expr)) {
+ construct(expr, internal::false_type());
+ }
}
template<typename Expression>
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/Replicate.h b/examples/ThirdPartyLibs/Eigen/src/Core/Replicate.h
index 0b2d6d743..ab5be7e64 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/Replicate.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/Replicate.h
@@ -10,7 +10,7 @@
#ifndef EIGEN_REPLICATE_H
#define EIGEN_REPLICATE_H
-namespace Eigen {
+namespace Eigen {
namespace internal {
template<typename MatrixType,int RowFactor,int ColFactor>
@@ -35,7 +35,7 @@ struct traits<Replicate<MatrixType,RowFactor,ColFactor> >
IsRowMajor = MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1 ? 1
: MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1 ? 0
: (MatrixType::Flags & RowMajorBit) ? 1 : 0,
-
+
// FIXME enable DirectAccess with negative strides?
Flags = IsRowMajor ? RowMajorBit : 0
};
@@ -88,15 +88,15 @@ template<typename MatrixType,int RowFactor,int ColFactor> class Replicate
THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE)
}
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
inline Index rows() const { return m_matrix.rows() * m_rowFactor.value(); }
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
inline Index cols() const { return m_matrix.cols() * m_colFactor.value(); }
EIGEN_DEVICE_FUNC
const _MatrixTypeNested& nestedExpression() const
- {
- return m_matrix;
+ {
+ return m_matrix;
}
protected:
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/Reshaped.h b/examples/ThirdPartyLibs/Eigen/src/Core/Reshaped.h
new file mode 100644
index 000000000..52de73b6f
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/Reshaped.h
@@ -0,0 +1,454 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2017 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2014 yoco <peter.xiau@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_RESHAPED_H
+#define EIGEN_RESHAPED_H
+
+namespace Eigen {
+
+/** \class Reshaped
+ * \ingroup Core_Module
+ *
+ * \brief Expression of a fixed-size or dynamic-size reshape
+ *
+ * \tparam XprType the type of the expression in which we are taking a reshape
+ * \tparam Rows the number of rows of the reshape we are taking at compile time (optional)
+ * \tparam Cols the number of columns of the reshape we are taking at compile time (optional)
+ * \tparam Order can be ColMajor or RowMajor, default is ColMajor.
+ *
+ * This class represents an expression of either a fixed-size or dynamic-size reshape.
+ * It is the return type of DenseBase::reshaped(NRowsType,NColsType) and
+ * most of the time this is the only way it is used.
+ *
+ * However, in C++98, if you want to directly maniputate reshaped expressions,
+ * for instance if you want to write a function returning such an expression, you
+ * will need to use this class. In C++11, it is advised to use the \em auto
+ * keyword for such use cases.
+ *
+ * Here is an example illustrating the dynamic case:
+ * \include class_Reshaped.cpp
+ * Output: \verbinclude class_Reshaped.out
+ *
+ * Here is an example illustrating the fixed-size case:
+ * \include class_FixedReshaped.cpp
+ * Output: \verbinclude class_FixedReshaped.out
+ *
+ * \sa DenseBase::reshaped(NRowsType,NColsType)
+ */
+
+namespace internal {
+
+template<typename XprType, int Rows, int Cols, int Order>
+struct traits<Reshaped<XprType, Rows, Cols, Order> > : traits<XprType>
+{
+ typedef typename traits<XprType>::Scalar Scalar;
+ typedef typename traits<XprType>::StorageKind StorageKind;
+ typedef typename traits<XprType>::XprKind XprKind;
+ enum{
+ MatrixRows = traits<XprType>::RowsAtCompileTime,
+ MatrixCols = traits<XprType>::ColsAtCompileTime,
+ RowsAtCompileTime = Rows,
+ ColsAtCompileTime = Cols,
+ MaxRowsAtCompileTime = Rows,
+ MaxColsAtCompileTime = Cols,
+ XpxStorageOrder = ((int(traits<XprType>::Flags) & RowMajorBit) == RowMajorBit) ? RowMajor : ColMajor,
+ ReshapedStorageOrder = (RowsAtCompileTime == 1 && ColsAtCompileTime != 1) ? RowMajor
+ : (ColsAtCompileTime == 1 && RowsAtCompileTime != 1) ? ColMajor
+ : XpxStorageOrder,
+ HasSameStorageOrderAsXprType = (ReshapedStorageOrder == XpxStorageOrder),
+ InnerSize = (ReshapedStorageOrder==int(RowMajor)) ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
+ InnerStrideAtCompileTime = HasSameStorageOrderAsXprType
+ ? int(inner_stride_at_compile_time<XprType>::ret)
+ : Dynamic,
+ OuterStrideAtCompileTime = Dynamic,
+
+ HasDirectAccess = internal::has_direct_access<XprType>::ret
+ && (Order==int(XpxStorageOrder))
+ && ((evaluator<XprType>::Flags&LinearAccessBit)==LinearAccessBit),
+
+ MaskPacketAccessBit = (InnerSize == Dynamic || (InnerSize % packet_traits<Scalar>::size) == 0)
+ && (InnerStrideAtCompileTime == 1)
+ ? PacketAccessBit : 0,
+ //MaskAlignedBit = ((OuterStrideAtCompileTime!=Dynamic) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % 16) == 0)) ? AlignedBit : 0,
+ FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1) ? LinearAccessBit : 0,
+ FlagsLvalueBit = is_lvalue<XprType>::value ? LvalueBit : 0,
+ FlagsRowMajorBit = (ReshapedStorageOrder==int(RowMajor)) ? RowMajorBit : 0,
+ FlagsDirectAccessBit = HasDirectAccess ? DirectAccessBit : 0,
+ Flags0 = traits<XprType>::Flags & ( (HereditaryBits & ~RowMajorBit) | MaskPacketAccessBit),
+
+ Flags = (Flags0 | FlagsLinearAccessBit | FlagsLvalueBit | FlagsRowMajorBit | FlagsDirectAccessBit)
+ };
+};
+
+template<typename XprType, int Rows, int Cols, int Order, bool HasDirectAccess> class ReshapedImpl_dense;
+
+} // end namespace internal
+
+template<typename XprType, int Rows, int Cols, int Order, typename StorageKind> class ReshapedImpl;
+
+template<typename XprType, int Rows, int Cols, int Order> class Reshaped
+ : public ReshapedImpl<XprType, Rows, Cols, Order, typename internal::traits<XprType>::StorageKind>
+{
+ typedef ReshapedImpl<XprType, Rows, Cols, Order, typename internal::traits<XprType>::StorageKind> Impl;
+ public:
+ //typedef typename Impl::Base Base;
+ typedef Impl Base;
+ EIGEN_GENERIC_PUBLIC_INTERFACE(Reshaped)
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Reshaped)
+
+ /** Fixed-size constructor
+ */
+ EIGEN_DEVICE_FUNC
+ inline Reshaped(XprType& xpr)
+ : Impl(xpr)
+ {
+ EIGEN_STATIC_ASSERT(RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic,THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE)
+ eigen_assert(Rows * Cols == xpr.rows() * xpr.cols());
+ }
+
+ /** Dynamic-size constructor
+ */
+ EIGEN_DEVICE_FUNC
+ inline Reshaped(XprType& xpr,
+ Index reshapeRows, Index reshapeCols)
+ : Impl(xpr, reshapeRows, reshapeCols)
+ {
+ eigen_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==reshapeRows)
+ && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==reshapeCols));
+ eigen_assert(reshapeRows * reshapeCols == xpr.rows() * xpr.cols());
+ }
+};
+
+// The generic default implementation for dense reshape simply forward to the internal::ReshapedImpl_dense
+// that must be specialized for direct and non-direct access...
+template<typename XprType, int Rows, int Cols, int Order>
+class ReshapedImpl<XprType, Rows, Cols, Order, Dense>
+ : public internal::ReshapedImpl_dense<XprType, Rows, Cols, Order,internal::traits<Reshaped<XprType,Rows,Cols,Order> >::HasDirectAccess>
+{
+ typedef internal::ReshapedImpl_dense<XprType, Rows, Cols, Order,internal::traits<Reshaped<XprType,Rows,Cols,Order> >::HasDirectAccess> Impl;
+ public:
+ typedef Impl Base;
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ReshapedImpl)
+ EIGEN_DEVICE_FUNC inline ReshapedImpl(XprType& xpr) : Impl(xpr) {}
+ EIGEN_DEVICE_FUNC inline ReshapedImpl(XprType& xpr, Index reshapeRows, Index reshapeCols)
+ : Impl(xpr, reshapeRows, reshapeCols) {}
+};
+
+namespace internal {
+
+/** \internal Internal implementation of dense Reshaped in the general case. */
+template<typename XprType, int Rows, int Cols, int Order>
+class ReshapedImpl_dense<XprType,Rows,Cols,Order,false>
+ : public internal::dense_xpr_base<Reshaped<XprType, Rows, Cols, Order> >::type
+{
+ typedef Reshaped<XprType, Rows, Cols, Order> ReshapedType;
+ public:
+
+ typedef typename internal::dense_xpr_base<ReshapedType>::type Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(ReshapedType)
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ReshapedImpl_dense)
+
+ typedef typename internal::ref_selector<XprType>::non_const_type MatrixTypeNested;
+ typedef typename internal::remove_all<XprType>::type NestedExpression;
+
+ class InnerIterator;
+
+ /** Fixed-size constructor
+ */
+ EIGEN_DEVICE_FUNC
+ inline ReshapedImpl_dense(XprType& xpr)
+ : m_xpr(xpr), m_rows(Rows), m_cols(Cols)
+ {}
+
+ /** Dynamic-size constructor
+ */
+ EIGEN_DEVICE_FUNC
+ inline ReshapedImpl_dense(XprType& xpr, Index nRows, Index nCols)
+ : m_xpr(xpr), m_rows(nRows), m_cols(nCols)
+ {}
+
+ EIGEN_DEVICE_FUNC Index rows() const { return m_rows; }
+ EIGEN_DEVICE_FUNC Index cols() const { return m_cols; }
+
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
+ /** \sa MapBase::data() */
+ EIGEN_DEVICE_FUNC inline const Scalar* data() const;
+ EIGEN_DEVICE_FUNC inline Index innerStride() const;
+ EIGEN_DEVICE_FUNC inline Index outerStride() const;
+ #endif
+
+ /** \returns the nested expression */
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<XprType>::type&
+ nestedExpression() const { return m_xpr; }
+
+ /** \returns the nested expression */
+ EIGEN_DEVICE_FUNC
+ typename internal::remove_reference<XprType>::type&
+ nestedExpression() { return m_xpr; }
+
+ protected:
+
+ MatrixTypeNested m_xpr;
+ const internal::variable_if_dynamic<Index, Rows> m_rows;
+ const internal::variable_if_dynamic<Index, Cols> m_cols;
+};
+
+
+/** \internal Internal implementation of dense Reshaped in the direct access case. */
+template<typename XprType, int Rows, int Cols, int Order>
+class ReshapedImpl_dense<XprType, Rows, Cols, Order, true>
+ : public MapBase<Reshaped<XprType, Rows, Cols, Order> >
+{
+ typedef Reshaped<XprType, Rows, Cols, Order> ReshapedType;
+ typedef typename internal::ref_selector<XprType>::non_const_type XprTypeNested;
+ public:
+
+ typedef MapBase<ReshapedType> Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(ReshapedType)
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ReshapedImpl_dense)
+
+ /** Fixed-size constructor
+ */
+ EIGEN_DEVICE_FUNC
+ inline ReshapedImpl_dense(XprType& xpr)
+ : Base(xpr.data()), m_xpr(xpr)
+ {}
+
+ /** Dynamic-size constructor
+ */
+ EIGEN_DEVICE_FUNC
+ inline ReshapedImpl_dense(XprType& xpr, Index nRows, Index nCols)
+ : Base(xpr.data(), nRows, nCols),
+ m_xpr(xpr)
+ {}
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<XprTypeNested>::type& nestedExpression() const
+ {
+ return m_xpr;
+ }
+
+ EIGEN_DEVICE_FUNC
+ XprType& nestedExpression() { return m_xpr; }
+
+ /** \sa MapBase::innerStride() */
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index innerStride() const
+ {
+ return m_xpr.innerStride();
+ }
+
+ /** \sa MapBase::outerStride() */
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index outerStride() const
+ {
+ return ((Flags&RowMajorBit)==RowMajorBit) ? this->cols() : this->rows();
+ }
+
+ protected:
+
+ XprTypeNested m_xpr;
+};
+
+// Evaluators
+template<typename ArgType, int Rows, int Cols, int Order, bool HasDirectAccess> struct reshaped_evaluator;
+
+template<typename ArgType, int Rows, int Cols, int Order>
+struct evaluator<Reshaped<ArgType, Rows, Cols, Order> >
+ : reshaped_evaluator<ArgType, Rows, Cols, Order, traits<Reshaped<ArgType,Rows,Cols,Order> >::HasDirectAccess>
+{
+ typedef Reshaped<ArgType, Rows, Cols, Order> XprType;
+ typedef typename XprType::Scalar Scalar;
+ // TODO: should check for smaller packet types
+ typedef typename packet_traits<Scalar>::type PacketScalar;
+
+ enum {
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
+ HasDirectAccess = traits<XprType>::HasDirectAccess,
+
+// RowsAtCompileTime = traits<XprType>::RowsAtCompileTime,
+// ColsAtCompileTime = traits<XprType>::ColsAtCompileTime,
+// MaxRowsAtCompileTime = traits<XprType>::MaxRowsAtCompileTime,
+// MaxColsAtCompileTime = traits<XprType>::MaxColsAtCompileTime,
+//
+// InnerStrideAtCompileTime = traits<XprType>::HasSameStorageOrderAsXprType
+// ? int(inner_stride_at_compile_time<ArgType>::ret)
+// : Dynamic,
+// OuterStrideAtCompileTime = Dynamic,
+
+ FlagsLinearAccessBit = (traits<XprType>::RowsAtCompileTime == 1 || traits<XprType>::ColsAtCompileTime == 1 || HasDirectAccess) ? LinearAccessBit : 0,
+ FlagsRowMajorBit = (traits<XprType>::ReshapedStorageOrder==int(RowMajor)) ? RowMajorBit : 0,
+ FlagsDirectAccessBit = HasDirectAccess ? DirectAccessBit : 0,
+ Flags0 = evaluator<ArgType>::Flags & (HereditaryBits & ~RowMajorBit),
+ Flags = Flags0 | FlagsLinearAccessBit | FlagsRowMajorBit | FlagsDirectAccessBit,
+
+ PacketAlignment = unpacket_traits<PacketScalar>::alignment,
+ Alignment = evaluator<ArgType>::Alignment
+ };
+ typedef reshaped_evaluator<ArgType, Rows, Cols, Order, HasDirectAccess> reshaped_evaluator_type;
+ EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : reshaped_evaluator_type(xpr)
+ {
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
+ }
+};
+
+template<typename ArgType, int Rows, int Cols, int Order>
+struct reshaped_evaluator<ArgType, Rows, Cols, Order, /* HasDirectAccess */ false>
+ : evaluator_base<Reshaped<ArgType, Rows, Cols, Order> >
+{
+ typedef Reshaped<ArgType, Rows, Cols, Order> XprType;
+
+ enum {
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost /* TODO + cost of index computations */,
+
+ Flags = (evaluator<ArgType>::Flags & (HereditaryBits /*| LinearAccessBit | DirectAccessBit*/)),
+
+ Alignment = 0
+ };
+
+ EIGEN_DEVICE_FUNC explicit reshaped_evaluator(const XprType& xpr) : m_argImpl(xpr.nestedExpression()), m_xpr(xpr)
+ {
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
+ }
+
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+
+ typedef std::pair<Index, Index> RowCol;
+
+ inline RowCol index_remap(Index rowId, Index colId) const
+ {
+ if(Order==ColMajor)
+ {
+ const Index nth_elem_idx = colId * m_xpr.rows() + rowId;
+ return RowCol(nth_elem_idx % m_xpr.nestedExpression().rows(),
+ nth_elem_idx / m_xpr.nestedExpression().rows());
+ }
+ else
+ {
+ const Index nth_elem_idx = colId + rowId * m_xpr.cols();
+ return RowCol(nth_elem_idx / m_xpr.nestedExpression().cols(),
+ nth_elem_idx % m_xpr.nestedExpression().cols());
+ }
+ }
+
+ EIGEN_DEVICE_FUNC
+ inline Scalar& coeffRef(Index rowId, Index colId)
+ {
+ EIGEN_STATIC_ASSERT_LVALUE(XprType)
+ const RowCol row_col = index_remap(rowId, colId);
+ return m_argImpl.coeffRef(row_col.first, row_col.second);
+ }
+
+ EIGEN_DEVICE_FUNC
+ inline const Scalar& coeffRef(Index rowId, Index colId) const
+ {
+ const RowCol row_col = index_remap(rowId, colId);
+ return m_argImpl.coeffRef(row_col.first, row_col.second);
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index rowId, Index colId) const
+ {
+ const RowCol row_col = index_remap(rowId, colId);
+ return m_argImpl.coeff(row_col.first, row_col.second);
+ }
+
+ EIGEN_DEVICE_FUNC
+ inline Scalar& coeffRef(Index index)
+ {
+ EIGEN_STATIC_ASSERT_LVALUE(XprType)
+ const RowCol row_col = index_remap(Rows == 1 ? 0 : index,
+ Rows == 1 ? index : 0);
+ return m_argImpl.coeffRef(row_col.first, row_col.second);
+
+ }
+
+ EIGEN_DEVICE_FUNC
+ inline const Scalar& coeffRef(Index index) const
+ {
+ const RowCol row_col = index_remap(Rows == 1 ? 0 : index,
+ Rows == 1 ? index : 0);
+ return m_argImpl.coeffRef(row_col.first, row_col.second);
+ }
+
+ EIGEN_DEVICE_FUNC
+ inline const CoeffReturnType coeff(Index index) const
+ {
+ const RowCol row_col = index_remap(Rows == 1 ? 0 : index,
+ Rows == 1 ? index : 0);
+ return m_argImpl.coeff(row_col.first, row_col.second);
+ }
+#if 0
+ EIGEN_DEVICE_FUNC
+ template<int LoadMode>
+ inline PacketScalar packet(Index rowId, Index colId) const
+ {
+ const RowCol row_col = index_remap(rowId, colId);
+ return m_argImpl.template packet<Unaligned>(row_col.first, row_col.second);
+
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC
+ inline void writePacket(Index rowId, Index colId, const PacketScalar& val)
+ {
+ const RowCol row_col = index_remap(rowId, colId);
+ m_argImpl.const_cast_derived().template writePacket<Unaligned>
+ (row_col.first, row_col.second, val);
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC
+ inline PacketScalar packet(Index index) const
+ {
+ const RowCol row_col = index_remap(RowsAtCompileTime == 1 ? 0 : index,
+ RowsAtCompileTime == 1 ? index : 0);
+ return m_argImpl.template packet<Unaligned>(row_col.first, row_col.second);
+ }
+
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC
+ inline void writePacket(Index index, const PacketScalar& val)
+ {
+ const RowCol row_col = index_remap(RowsAtCompileTime == 1 ? 0 : index,
+ RowsAtCompileTime == 1 ? index : 0);
+ return m_argImpl.template packet<Unaligned>(row_col.first, row_col.second, val);
+ }
+#endif
+protected:
+
+ evaluator<ArgType> m_argImpl;
+ const XprType& m_xpr;
+
+};
+
+template<typename ArgType, int Rows, int Cols, int Order>
+struct reshaped_evaluator<ArgType, Rows, Cols, Order, /* HasDirectAccess */ true>
+: mapbase_evaluator<Reshaped<ArgType, Rows, Cols, Order>,
+ typename Reshaped<ArgType, Rows, Cols, Order>::PlainObject>
+{
+ typedef Reshaped<ArgType, Rows, Cols, Order> XprType;
+ typedef typename XprType::Scalar Scalar;
+
+ EIGEN_DEVICE_FUNC explicit reshaped_evaluator(const XprType& xpr)
+ : mapbase_evaluator<XprType, typename XprType::PlainObject>(xpr)
+ {
+ // TODO: for the 3.4 release, this should be turned to an internal assertion, but let's keep it as is for the beta lifetime
+ eigen_assert(((internal::UIntPtr(xpr.data()) % EIGEN_PLAIN_ENUM_MAX(1,evaluator<XprType>::Alignment)) == 0) && "data is not aligned");
+ }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_RESHAPED_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/ReturnByValue.h b/examples/ThirdPartyLibs/Eigen/src/Core/ReturnByValue.h
index 11dc86d07..4dad13ea1 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/ReturnByValue.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/ReturnByValue.h
@@ -60,8 +60,10 @@ template<typename Derived> class ReturnByValue
EIGEN_DEVICE_FUNC
inline void evalTo(Dest& dst) const
{ static_cast<const Derived*>(this)->evalTo(dst); }
- EIGEN_DEVICE_FUNC inline Index rows() const { return static_cast<const Derived*>(this)->rows(); }
- EIGEN_DEVICE_FUNC inline Index cols() const { return static_cast<const Derived*>(this)->cols(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index rows() const EIGEN_NOEXCEPT { return static_cast<const Derived*>(this)->rows(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index cols() const EIGEN_NOEXCEPT { return static_cast<const Derived*>(this)->cols(); }
#ifndef EIGEN_PARSED_BY_DOXYGEN
#define Unusable YOU_ARE_TRYING_TO_ACCESS_A_SINGLE_COEFFICIENT_IN_A_SPECIAL_EXPRESSION_WHERE_THAT_IS_NOT_ALLOWED_BECAUSE_THAT_WOULD_BE_INEFFICIENT
@@ -90,7 +92,7 @@ namespace internal {
// Expression is evaluated in a temporary; default implementation of Assignment is bypassed so that
// when a ReturnByValue expression is assigned, the evaluator is not constructed.
// TODO: Finalize port to new regime; ReturnByValue should not exist in the expression world
-
+
template<typename Derived>
struct evaluator<ReturnByValue<Derived> >
: public evaluator<typename internal::traits<Derived>::ReturnType>
@@ -98,7 +100,7 @@ struct evaluator<ReturnByValue<Derived> >
typedef ReturnByValue<Derived> XprType;
typedef typename internal::traits<Derived>::ReturnType PlainObject;
typedef evaluator<PlainObject> Base;
-
+
EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr)
: m_result(xpr.rows(), xpr.cols())
{
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/Reverse.h b/examples/ThirdPartyLibs/Eigen/src/Core/Reverse.h
index 8b6b3ab03..28cdd76ac 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/Reverse.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/Reverse.h
@@ -12,7 +12,7 @@
#ifndef EIGEN_REVERSE_H
#define EIGEN_REVERSE_H
-namespace Eigen {
+namespace Eigen {
namespace internal {
@@ -44,7 +44,7 @@ template<typename PacketType> struct reverse_packet_cond<PacketType,false>
static inline PacketType run(const PacketType& x) { return x; }
};
-} // end namespace internal
+} // end namespace internal
/** \class Reverse
* \ingroup Core_Module
@@ -89,8 +89,10 @@ template<typename MatrixType, int Direction> class Reverse
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Reverse)
- EIGEN_DEVICE_FUNC inline Index rows() const { return m_matrix.rows(); }
- EIGEN_DEVICE_FUNC inline Index cols() const { return m_matrix.cols(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index rows() const EIGEN_NOEXCEPT { return m_matrix.rows(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index cols() const EIGEN_NOEXCEPT { return m_matrix.cols(); }
EIGEN_DEVICE_FUNC inline Index innerStride() const
{
@@ -98,7 +100,7 @@ template<typename MatrixType, int Direction> class Reverse
}
EIGEN_DEVICE_FUNC const typename internal::remove_all<typename MatrixType::Nested>::type&
- nestedExpression() const
+ nestedExpression() const
{
return m_matrix;
}
@@ -161,7 +163,7 @@ EIGEN_DEVICE_FUNC inline void DenseBase<Derived>::reverseInPlace()
}
namespace internal {
-
+
template<int Direction>
struct vectorwise_reverse_inplace_impl;
@@ -171,8 +173,10 @@ struct vectorwise_reverse_inplace_impl<Vertical>
template<typename ExpressionType>
static void run(ExpressionType &xpr)
{
+ const int HalfAtCompileTime = ExpressionType::RowsAtCompileTime==Dynamic?Dynamic:ExpressionType::RowsAtCompileTime/2;
Index half = xpr.rows()/2;
- xpr.topRows(half).swap(xpr.bottomRows(half).colwise().reverse());
+ xpr.topRows(fix<HalfAtCompileTime>(half))
+ .swap(xpr.bottomRows(fix<HalfAtCompileTime>(half)).colwise().reverse());
}
};
@@ -182,8 +186,10 @@ struct vectorwise_reverse_inplace_impl<Horizontal>
template<typename ExpressionType>
static void run(ExpressionType &xpr)
{
+ const int HalfAtCompileTime = ExpressionType::ColsAtCompileTime==Dynamic?Dynamic:ExpressionType::ColsAtCompileTime/2;
Index half = xpr.cols()/2;
- xpr.leftCols(half).swap(xpr.rightCols(half).rowwise().reverse());
+ xpr.leftCols(fix<HalfAtCompileTime>(half))
+ .swap(xpr.rightCols(fix<HalfAtCompileTime>(half)).rowwise().reverse());
}
};
@@ -203,7 +209,7 @@ struct vectorwise_reverse_inplace_impl<Horizontal>
template<typename ExpressionType, int Direction>
EIGEN_DEVICE_FUNC void VectorwiseOp<ExpressionType,Direction>::reverseInPlace()
{
- internal::vectorwise_reverse_inplace_impl<Direction>::run(_expression().const_cast_derived());
+ internal::vectorwise_reverse_inplace_impl<Direction>::run(m_matrix);
}
} // end namespace Eigen
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/Select.h b/examples/ThirdPartyLibs/Eigen/src/Core/Select.h
index 79eec1b5b..7c86bf87c 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/Select.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/Select.h
@@ -10,7 +10,7 @@
#ifndef EIGEN_SELECT_H
#define EIGEN_SELECT_H
-namespace Eigen {
+namespace Eigen {
/** \class Select
* \ingroup Core_Module
@@ -67,8 +67,10 @@ class Select : public internal::dense_xpr_base< Select<ConditionMatrixType, Then
eigen_assert(m_condition.cols() == m_then.cols() && m_condition.cols() == m_else.cols());
}
- inline EIGEN_DEVICE_FUNC Index rows() const { return m_condition.rows(); }
- inline EIGEN_DEVICE_FUNC Index cols() const { return m_condition.cols(); }
+ inline EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ Index rows() const EIGEN_NOEXCEPT { return m_condition.rows(); }
+ inline EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ Index cols() const EIGEN_NOEXCEPT { return m_condition.cols(); }
inline EIGEN_DEVICE_FUNC
const Scalar coeff(Index i, Index j) const
@@ -120,7 +122,7 @@ class Select : public internal::dense_xpr_base< Select<ConditionMatrixType, Then
*/
template<typename Derived>
template<typename ThenDerived,typename ElseDerived>
-inline const Select<Derived,ThenDerived,ElseDerived>
+inline EIGEN_DEVICE_FUNC const Select<Derived,ThenDerived,ElseDerived>
DenseBase<Derived>::select(const DenseBase<ThenDerived>& thenMatrix,
const DenseBase<ElseDerived>& elseMatrix) const
{
@@ -134,7 +136,7 @@ DenseBase<Derived>::select(const DenseBase<ThenDerived>& thenMatrix,
*/
template<typename Derived>
template<typename ThenDerived>
-inline const Select<Derived,ThenDerived, typename ThenDerived::ConstantReturnType>
+inline EIGEN_DEVICE_FUNC const Select<Derived,ThenDerived, typename ThenDerived::ConstantReturnType>
DenseBase<Derived>::select(const DenseBase<ThenDerived>& thenMatrix,
const typename ThenDerived::Scalar& elseScalar) const
{
@@ -149,7 +151,7 @@ DenseBase<Derived>::select(const DenseBase<ThenDerived>& thenMatrix,
*/
template<typename Derived>
template<typename ElseDerived>
-inline const Select<Derived, typename ElseDerived::ConstantReturnType, ElseDerived >
+inline EIGEN_DEVICE_FUNC const Select<Derived, typename ElseDerived::ConstantReturnType, ElseDerived >
DenseBase<Derived>::select(const typename ElseDerived::Scalar& thenScalar,
const DenseBase<ElseDerived>& elseMatrix) const
{
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/SelfAdjointView.h b/examples/ThirdPartyLibs/Eigen/src/Core/SelfAdjointView.h
index 7e71fe3c0..8ce3b372a 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/SelfAdjointView.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/SelfAdjointView.h
@@ -10,7 +10,7 @@
#ifndef EIGEN_SELFADJOINTMATRIX_H
#define EIGEN_SELFADJOINTMATRIX_H
-namespace Eigen {
+namespace Eigen {
/** \class SelfAdjointView
* \ingroup Core_Module
@@ -58,29 +58,32 @@ template<typename _MatrixType, unsigned int UpLo> class SelfAdjointView
typedef MatrixTypeNestedCleaned NestedExpression;
/** \brief The type of coefficients in this matrix */
- typedef typename internal::traits<SelfAdjointView>::Scalar Scalar;
+ typedef typename internal::traits<SelfAdjointView>::Scalar Scalar;
typedef typename MatrixType::StorageIndex StorageIndex;
typedef typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type MatrixConjugateReturnType;
+ typedef SelfAdjointView<typename internal::add_const<MatrixType>::type, UpLo> ConstSelfAdjointView;
enum {
Mode = internal::traits<SelfAdjointView>::Mode,
Flags = internal::traits<SelfAdjointView>::Flags,
- TransposeMode = ((Mode & Upper) ? Lower : 0) | ((Mode & Lower) ? Upper : 0)
+ TransposeMode = ((int(Mode) & int(Upper)) ? Lower : 0) | ((int(Mode) & int(Lower)) ? Upper : 0)
};
typedef typename MatrixType::PlainObject PlainObject;
EIGEN_DEVICE_FUNC
explicit inline SelfAdjointView(MatrixType& matrix) : m_matrix(matrix)
- {}
+ {
+ EIGEN_STATIC_ASSERT(UpLo==Lower || UpLo==Upper,SELFADJOINTVIEW_ACCEPTS_UPPER_AND_LOWER_MODE_ONLY);
+ }
- EIGEN_DEVICE_FUNC
- inline Index rows() const { return m_matrix.rows(); }
- EIGEN_DEVICE_FUNC
- inline Index cols() const { return m_matrix.cols(); }
- EIGEN_DEVICE_FUNC
- inline Index outerStride() const { return m_matrix.outerStride(); }
- EIGEN_DEVICE_FUNC
- inline Index innerStride() const { return m_matrix.innerStride(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index rows() const EIGEN_NOEXCEPT { return m_matrix.rows(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index cols() const EIGEN_NOEXCEPT { return m_matrix.cols(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index outerStride() const EIGEN_NOEXCEPT { return m_matrix.outerStride(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index innerStride() const EIGEN_NOEXCEPT { return m_matrix.innerStride(); }
/** \sa MatrixBase::coeff()
* \warning the coordinates must fit into the referenced triangular part
@@ -129,7 +132,7 @@ template<typename _MatrixType, unsigned int UpLo> class SelfAdjointView
{
return Product<OtherDerived,SelfAdjointView>(lhs.derived(),rhs);
}
-
+
friend EIGEN_DEVICE_FUNC
const SelfAdjointView<const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(Scalar,MatrixType,product),UpLo>
operator*(const Scalar& s, const SelfAdjointView& mat)
@@ -189,12 +192,24 @@ template<typename _MatrixType, unsigned int UpLo> class SelfAdjointView
TriangularView<typename MatrixType::AdjointReturnType,TriMode> >::type(tmp2);
}
- typedef SelfAdjointView<const MatrixConjugateReturnType,Mode> ConjugateReturnType;
+ typedef SelfAdjointView<const MatrixConjugateReturnType,UpLo> ConjugateReturnType;
/** \sa MatrixBase::conjugate() const */
EIGEN_DEVICE_FUNC
inline const ConjugateReturnType conjugate() const
{ return ConjugateReturnType(m_matrix.conjugate()); }
+ /** \returns an expression of the complex conjugate of \c *this if Cond==true,
+ * returns \c *this otherwise.
+ */
+ template<bool Cond>
+ EIGEN_DEVICE_FUNC
+ inline typename internal::conditional<Cond,ConjugateReturnType,ConstSelfAdjointView>::type
+ conjugateIf() const
+ {
+ typedef typename internal::conditional<Cond,ConjugateReturnType,ConstSelfAdjointView>::type ReturnType;
+ return ReturnType(m_matrix.template conjugateIf<Cond>());
+ }
+
typedef SelfAdjointView<const typename MatrixType::AdjointReturnType,TransposeMode> AdjointReturnType;
/** \sa MatrixBase::adjoint() const */
EIGEN_DEVICE_FUNC
@@ -285,17 +300,17 @@ protected:
using Base::m_src;
using Base::m_functor;
public:
-
+
typedef typename Base::DstEvaluatorType DstEvaluatorType;
typedef typename Base::SrcEvaluatorType SrcEvaluatorType;
typedef typename Base::Scalar Scalar;
typedef typename Base::AssignmentTraits AssignmentTraits;
-
-
+
+
EIGEN_DEVICE_FUNC triangular_dense_assignment_kernel(DstEvaluatorType &dst, const SrcEvaluatorType &src, const Functor &func, DstXprType& dstExpr)
: Base(dst, src, func, dstExpr)
{}
-
+
EIGEN_DEVICE_FUNC void assignCoeff(Index row, Index col)
{
eigen_internal_assert(row!=col);
@@ -303,12 +318,12 @@ public:
m_functor.assignCoeff(m_dst.coeffRef(row,col), tmp);
m_functor.assignCoeff(m_dst.coeffRef(col,row), numext::conj(tmp));
}
-
+
EIGEN_DEVICE_FUNC void assignDiagonalCoeff(Index id)
{
Base::assignCoeff(id,id);
}
-
+
EIGEN_DEVICE_FUNC void assignOppositeCoeff(Index, Index)
{ eigen_internal_assert(false && "should never be called"); }
};
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/SelfCwiseBinaryOp.h b/examples/ThirdPartyLibs/Eigen/src/Core/SelfCwiseBinaryOp.h
index 50099df82..7c89c2e23 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/SelfCwiseBinaryOp.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/SelfCwiseBinaryOp.h
@@ -17,7 +17,6 @@ namespace Eigen {
template<typename Derived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator*=(const Scalar& other)
{
- typedef typename Derived::PlainObject PlainObject;
internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::mul_assign_op<Scalar,Scalar>());
return derived();
}
@@ -25,7 +24,6 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator*=(co
template<typename Derived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& ArrayBase<Derived>::operator+=(const Scalar& other)
{
- typedef typename Derived::PlainObject PlainObject;
internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::add_assign_op<Scalar,Scalar>());
return derived();
}
@@ -33,7 +31,6 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& ArrayBase<Derived>::operator+=(co
template<typename Derived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& ArrayBase<Derived>::operator-=(const Scalar& other)
{
- typedef typename Derived::PlainObject PlainObject;
internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::sub_assign_op<Scalar,Scalar>());
return derived();
}
@@ -41,7 +38,6 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& ArrayBase<Derived>::operator-=(co
template<typename Derived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator/=(const Scalar& other)
{
- typedef typename Derived::PlainObject PlainObject;
internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::div_assign_op<Scalar,Scalar>());
return derived();
}
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/Solve.h b/examples/ThirdPartyLibs/Eigen/src/Core/Solve.h
index a8daea511..23d5cb707 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/Solve.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/Solve.h
@@ -13,13 +13,13 @@
namespace Eigen {
template<typename Decomposition, typename RhsType, typename StorageKind> class SolveImpl;
-
+
/** \class Solve
* \ingroup Core_Module
*
* \brief Pseudo expression representing a solving operation
*
- * \tparam Decomposition the type of the matrix or decomposion object
+ * \tparam Decomposition the type of the matrix or decomposition object
* \tparam Rhstype the type of the right-hand side
*
* This class represents an expression of A.solve(B)
@@ -64,13 +64,13 @@ class Solve : public SolveImpl<Decomposition,RhsType,typename internal::traits<R
public:
typedef typename internal::traits<Solve>::PlainObject PlainObject;
typedef typename internal::traits<Solve>::StorageIndex StorageIndex;
-
+
Solve(const Decomposition &dec, const RhsType &rhs)
: m_dec(dec), m_rhs(rhs)
{}
-
- EIGEN_DEVICE_FUNC Index rows() const { return m_dec.cols(); }
- EIGEN_DEVICE_FUNC Index cols() const { return m_rhs.cols(); }
+
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_dec.cols(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_rhs.cols(); }
EIGEN_DEVICE_FUNC const Decomposition& dec() const { return m_dec; }
EIGEN_DEVICE_FUNC const RhsType& rhs() const { return m_rhs; }
@@ -87,14 +87,14 @@ class SolveImpl<Decomposition,RhsType,Dense>
: public MatrixBase<Solve<Decomposition,RhsType> >
{
typedef Solve<Decomposition,RhsType> Derived;
-
+
public:
-
+
typedef MatrixBase<Solve<Decomposition,RhsType> > Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
private:
-
+
Scalar coeff(Index row, Index col) const;
Scalar coeff(Index i) const;
};
@@ -119,15 +119,15 @@ struct evaluator<Solve<Decomposition,RhsType> >
typedef evaluator<PlainObject> Base;
enum { Flags = Base::Flags | EvalBeforeNestingBit };
-
+
EIGEN_DEVICE_FUNC explicit evaluator(const SolveType& solve)
: m_result(solve.rows(), solve.cols())
{
::new (static_cast<Base*>(this)) Base(m_result);
solve.dec()._solve_impl(solve.rhs(), m_result);
}
-
-protected:
+
+protected:
PlainObject m_result;
};
@@ -176,12 +176,12 @@ struct Assignment<DstXprType, Solve<CwiseUnaryOp<internal::scalar_conjugate_op<t
Index dstCols = src.cols();
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
dst.resize(dstRows, dstCols);
-
+
src.dec().nestedExpression().nestedExpression().template _solve_impl_transposed<true>(src.rhs(), dst);
}
};
-} // end namepsace internal
+} // end namespace internal
} // end namespace Eigen
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/SolveTriangular.h b/examples/ThirdPartyLibs/Eigen/src/Core/SolveTriangular.h
index a0011d4f9..dfbf99523 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/SolveTriangular.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/SolveTriangular.h
@@ -10,7 +10,7 @@
#ifndef EIGEN_SOLVETRIANGULAR_H
#define EIGEN_SOLVETRIANGULAR_H
-namespace Eigen {
+namespace Eigen {
namespace internal {
@@ -19,7 +19,7 @@ namespace internal {
template<typename LhsScalar, typename RhsScalar, typename Index, int Side, int Mode, bool Conjugate, int StorageOrder>
struct triangular_solve_vector;
-template <typename Scalar, typename Index, int Side, int Mode, bool Conjugate, int TriStorageOrder, int OtherStorageOrder>
+template <typename Scalar, typename Index, int Side, int Mode, bool Conjugate, int TriStorageOrder, int OtherStorageOrder, int OtherInnerStride>
struct triangular_solve_matrix;
// small helper struct extracting some traits on the underlying solver operation
@@ -54,7 +54,7 @@ struct triangular_solver_selector<Lhs,Rhs,Side,Mode,NoUnrolling,1>
typedef blas_traits<Lhs> LhsProductTraits;
typedef typename LhsProductTraits::ExtractType ActualLhsType;
typedef Map<Matrix<RhsScalar,Dynamic,1>, Aligned> MappedRhs;
- static void run(const Lhs& lhs, Rhs& rhs)
+ static EIGEN_DEVICE_FUNC void run(const Lhs& lhs, Rhs& rhs)
{
ActualLhsType actualLhs = LhsProductTraits::extract(lhs);
@@ -64,7 +64,7 @@ struct triangular_solver_selector<Lhs,Rhs,Side,Mode,NoUnrolling,1>
ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhs,rhs.size(),
(useRhsDirectly ? rhs.data() : 0));
-
+
if(!useRhsDirectly)
MappedRhs(actualRhs,rhs.size()) = rhs;
@@ -85,7 +85,7 @@ struct triangular_solver_selector<Lhs,Rhs,Side,Mode,NoUnrolling,Dynamic>
typedef blas_traits<Lhs> LhsProductTraits;
typedef typename LhsProductTraits::DirectLinearAccessType ActualLhsType;
- static void run(const Lhs& lhs, Rhs& rhs)
+ static EIGEN_DEVICE_FUNC void run(const Lhs& lhs, Rhs& rhs)
{
typename internal::add_const_on_value_type<ActualLhsType>::type actualLhs = LhsProductTraits::extract(lhs);
@@ -98,8 +98,8 @@ struct triangular_solver_selector<Lhs,Rhs,Side,Mode,NoUnrolling,Dynamic>
BlockingType blocking(rhs.rows(), rhs.cols(), size, 1, false);
triangular_solve_matrix<Scalar,Index,Side,Mode,LhsProductTraits::NeedToConjugate,(int(Lhs::Flags) & RowMajorBit) ? RowMajor : ColMajor,
- (Rhs::Flags&RowMajorBit) ? RowMajor : ColMajor>
- ::run(size, othersize, &actualLhs.coeffRef(0,0), actualLhs.outerStride(), &rhs.coeffRef(0,0), rhs.outerStride(), blocking);
+ (Rhs::Flags&RowMajorBit) ? RowMajor : ColMajor, Rhs::InnerStrideAtCompileTime>
+ ::run(size, othersize, &actualLhs.coeffRef(0,0), actualLhs.outerStride(), &rhs.coeffRef(0,0), rhs.innerStride(), rhs.outerStride(), blocking);
}
};
@@ -118,7 +118,7 @@ struct triangular_solver_unroller<Lhs,Rhs,Mode,LoopIndex,Size,false> {
DiagIndex = IsLower ? LoopIndex : Size - LoopIndex - 1,
StartIndex = IsLower ? 0 : DiagIndex+1
};
- static void run(const Lhs& lhs, Rhs& rhs)
+ static EIGEN_DEVICE_FUNC void run(const Lhs& lhs, Rhs& rhs)
{
if (LoopIndex>0)
rhs.coeffRef(DiagIndex) -= lhs.row(DiagIndex).template segment<LoopIndex>(StartIndex).transpose()
@@ -133,22 +133,22 @@ struct triangular_solver_unroller<Lhs,Rhs,Mode,LoopIndex,Size,false> {
template<typename Lhs, typename Rhs, int Mode, int LoopIndex, int Size>
struct triangular_solver_unroller<Lhs,Rhs,Mode,LoopIndex,Size,true> {
- static void run(const Lhs&, Rhs&) {}
+ static EIGEN_DEVICE_FUNC void run(const Lhs&, Rhs&) {}
};
template<typename Lhs, typename Rhs, int Mode>
struct triangular_solver_selector<Lhs,Rhs,OnTheLeft,Mode,CompleteUnrolling,1> {
- static void run(const Lhs& lhs, Rhs& rhs)
+ static EIGEN_DEVICE_FUNC void run(const Lhs& lhs, Rhs& rhs)
{ triangular_solver_unroller<Lhs,Rhs,Mode,0,Rhs::SizeAtCompileTime>::run(lhs,rhs); }
};
template<typename Lhs, typename Rhs, int Mode>
struct triangular_solver_selector<Lhs,Rhs,OnTheRight,Mode,CompleteUnrolling,1> {
- static void run(const Lhs& lhs, Rhs& rhs)
+ static EIGEN_DEVICE_FUNC void run(const Lhs& lhs, Rhs& rhs)
{
Transpose<const Lhs> trLhs(lhs);
Transpose<Rhs> trRhs(rhs);
-
+
triangular_solver_unroller<Transpose<const Lhs>,Transpose<Rhs>,
((Mode&Upper)==Upper ? Lower : Upper) | (Mode&UnitDiag),
0,Rhs::SizeAtCompileTime>::run(trLhs,trRhs);
@@ -168,7 +168,10 @@ EIGEN_DEVICE_FUNC void TriangularViewImpl<MatrixType,Mode,Dense>::solveInPlace(c
{
OtherDerived& other = _other.const_cast_derived();
eigen_assert( derived().cols() == derived().rows() && ((Side==OnTheLeft && derived().cols() == other.rows()) || (Side==OnTheRight && derived().cols() == other.cols())) );
- eigen_assert((!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower)));
+ eigen_assert((!(int(Mode) & int(ZeroDiag))) && bool(int(Mode) & (int(Upper) | int(Lower))));
+ // If solving for a 0x0 matrix, nothing to do, simply return.
+ if (derived().cols() == 0)
+ return;
enum { copy = (internal::traits<OtherDerived>::Flags & RowMajorBit) && OtherDerived::IsVectorAtCompileTime && OtherDerived::SizeAtCompileTime!=1};
typedef typename internal::conditional<copy,
@@ -210,8 +213,8 @@ template<int Side, typename TriangularType, typename Rhs> struct triangular_solv
: m_triangularMatrix(tri), m_rhs(rhs)
{}
- inline Index rows() const { return m_rhs.rows(); }
- inline Index cols() const { return m_rhs.cols(); }
+ inline EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_rhs.rows(); }
+ inline EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_rhs.cols(); }
template<typename Dest> inline void evalTo(Dest& dst) const
{
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/SolverBase.h b/examples/ThirdPartyLibs/Eigen/src/Core/SolverBase.h
index 8a4adc229..501461042 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/SolverBase.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/SolverBase.h
@@ -14,8 +14,35 @@ namespace Eigen {
namespace internal {
+template<typename Derived>
+struct solve_assertion {
+ template<bool Transpose_, typename Rhs>
+ static void run(const Derived& solver, const Rhs& b) { solver.template _check_solve_assertion<Transpose_>(b); }
+};
+
+template<typename Derived>
+struct solve_assertion<Transpose<Derived> >
+{
+ typedef Transpose<Derived> type;
+
+ template<bool Transpose_, typename Rhs>
+ static void run(const type& transpose, const Rhs& b)
+ {
+ internal::solve_assertion<typename internal::remove_all<Derived>::type>::template run<true>(transpose.nestedExpression(), b);
+ }
+};
+template<typename Scalar, typename Derived>
+struct solve_assertion<CwiseUnaryOp<Eigen::internal::scalar_conjugate_op<Scalar>, const Transpose<Derived> > >
+{
+ typedef CwiseUnaryOp<Eigen::internal::scalar_conjugate_op<Scalar>, const Transpose<Derived> > type;
+ template<bool Transpose_, typename Rhs>
+ static void run(const type& adjoint, const Rhs& b)
+ {
+ internal::solve_assertion<typename internal::remove_all<Transpose<Derived> >::type>::template run<true>(adjoint.nestedExpression(), b);
+ }
+};
} // end namespace internal
/** \class SolverBase
@@ -35,7 +62,7 @@ namespace internal {
*
* \warning Currently, any other usage of transpose() and adjoint() are not supported and will produce compilation errors.
*
- * \sa class PartialPivLU, class FullPivLU
+ * \sa class PartialPivLU, class FullPivLU, class HouseholderQR, class ColPivHouseholderQR, class FullPivHouseholderQR, class CompleteOrthogonalDecomposition, class LLT, class LDLT, class SVDBase
*/
template<typename Derived>
class SolverBase : public EigenBase<Derived>
@@ -46,6 +73,9 @@ class SolverBase : public EigenBase<Derived>
typedef typename internal::traits<Derived>::Scalar Scalar;
typedef Scalar CoeffReturnType;
+ template<typename Derived_>
+ friend struct internal::solve_assertion;
+
enum {
RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
@@ -56,7 +86,8 @@ class SolverBase : public EigenBase<Derived>
MaxSizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::MaxRowsAtCompileTime,
internal::traits<Derived>::MaxColsAtCompileTime>::ret),
IsVectorAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime == 1
- || internal::traits<Derived>::MaxColsAtCompileTime == 1
+ || internal::traits<Derived>::MaxColsAtCompileTime == 1,
+ NumDimensions = int(MaxSizeAtCompileTime) == 1 ? 0 : bool(IsVectorAtCompileTime) ? 1 : 2
};
/** Default constructor */
@@ -74,7 +105,7 @@ class SolverBase : public EigenBase<Derived>
inline const Solve<Derived, Rhs>
solve(const MatrixBase<Rhs>& b) const
{
- eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b");
+ internal::solve_assertion<typename internal::remove_all<Derived>::type>::template run<false>(derived(), b);
return Solve<Derived, Rhs>(derived(), b.derived());
}
@@ -112,6 +143,13 @@ class SolverBase : public EigenBase<Derived>
}
protected:
+
+ template<bool Transpose_, typename Rhs>
+ void _check_solve_assertion(const Rhs& b) const {
+ EIGEN_ONLY_USED_FOR_DEBUG(b);
+ eigen_assert(derived().m_isInitialized && "Solver is not initialized.");
+ eigen_assert((Transpose_?derived().cols():derived().rows())==b.rows() && "SolverBase::solve(): invalid number of rows of the right hand side matrix b");
+ }
};
namespace internal {
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/StableNorm.h b/examples/ThirdPartyLibs/Eigen/src/Core/StableNorm.h
index be04ed44d..4a3f0cca8 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/StableNorm.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/StableNorm.h
@@ -50,6 +50,71 @@ inline void stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& sc
ssq += (bl*invScale).squaredNorm();
}
+template<typename VectorType, typename RealScalar>
+void stable_norm_impl_inner_step(const VectorType &vec, RealScalar& ssq, RealScalar& scale, RealScalar& invScale)
+{
+ typedef typename VectorType::Scalar Scalar;
+ const Index blockSize = 4096;
+
+ typedef typename internal::nested_eval<VectorType,2>::type VectorTypeCopy;
+ typedef typename internal::remove_all<VectorTypeCopy>::type VectorTypeCopyClean;
+ const VectorTypeCopy copy(vec);
+
+ enum {
+ CanAlign = ( (int(VectorTypeCopyClean::Flags)&DirectAccessBit)
+ || (int(internal::evaluator<VectorTypeCopyClean>::Alignment)>0) // FIXME Alignment)>0 might not be enough
+ ) && (blockSize*sizeof(Scalar)*2<EIGEN_STACK_ALLOCATION_LIMIT)
+ && (EIGEN_MAX_STATIC_ALIGN_BYTES>0) // if we cannot allocate on the stack, then let's not bother about this optimization
+ };
+ typedef typename internal::conditional<CanAlign, Ref<const Matrix<Scalar,Dynamic,1,0,blockSize,1>, internal::evaluator<VectorTypeCopyClean>::Alignment>,
+ typename VectorTypeCopyClean::ConstSegmentReturnType>::type SegmentWrapper;
+ Index n = vec.size();
+
+ Index bi = internal::first_default_aligned(copy);
+ if (bi>0)
+ internal::stable_norm_kernel(copy.head(bi), ssq, scale, invScale);
+ for (; bi<n; bi+=blockSize)
+ internal::stable_norm_kernel(SegmentWrapper(copy.segment(bi,numext::mini(blockSize, n - bi))), ssq, scale, invScale);
+}
+
+template<typename VectorType>
+typename VectorType::RealScalar
+stable_norm_impl(const VectorType &vec, typename enable_if<VectorType::IsVectorAtCompileTime>::type* = 0 )
+{
+ using std::sqrt;
+ using std::abs;
+
+ Index n = vec.size();
+
+ if(n==1)
+ return abs(vec.coeff(0));
+
+ typedef typename VectorType::RealScalar RealScalar;
+ RealScalar scale(0);
+ RealScalar invScale(1);
+ RealScalar ssq(0); // sum of squares
+
+ stable_norm_impl_inner_step(vec, ssq, scale, invScale);
+
+ return scale * sqrt(ssq);
+}
+
+template<typename MatrixType>
+typename MatrixType::RealScalar
+stable_norm_impl(const MatrixType &mat, typename enable_if<!MatrixType::IsVectorAtCompileTime>::type* = 0 )
+{
+ using std::sqrt;
+
+ typedef typename MatrixType::RealScalar RealScalar;
+ RealScalar scale(0);
+ RealScalar invScale(1);
+ RealScalar ssq(0); // sum of squares
+
+ for(Index j=0; j<mat.outerSize(); ++j)
+ stable_norm_impl_inner_step(mat.innerVector(j), ssq, scale, invScale);
+ return scale * sqrt(ssq);
+}
+
template<typename Derived>
inline typename NumTraits<typename traits<Derived>::Scalar>::Real
blueNorm_impl(const EigenBase<Derived>& _vec)
@@ -58,52 +123,43 @@ blueNorm_impl(const EigenBase<Derived>& _vec)
using std::pow;
using std::sqrt;
using std::abs;
+
+ // This program calculates the machine-dependent constants
+ // bl, b2, slm, s2m, relerr overfl
+ // from the "basic" machine-dependent numbers
+ // nbig, ibeta, it, iemin, iemax, rbig.
+ // The following define the basic machine-dependent constants.
+ // For portability, the PORT subprograms "ilmaeh" and "rlmach"
+ // are used. For any specific computer, each of the assignment
+ // statements can be replaced
+ static const int ibeta = std::numeric_limits<RealScalar>::radix; // base for floating-point numbers
+ static const int it = NumTraits<RealScalar>::digits(); // number of base-beta digits in mantissa
+ static const int iemin = NumTraits<RealScalar>::min_exponent(); // minimum exponent
+ static const int iemax = NumTraits<RealScalar>::max_exponent(); // maximum exponent
+ static const RealScalar rbig = NumTraits<RealScalar>::highest(); // largest floating-point number
+ static const RealScalar b1 = RealScalar(pow(RealScalar(ibeta),RealScalar(-((1-iemin)/2)))); // lower boundary of midrange
+ static const RealScalar b2 = RealScalar(pow(RealScalar(ibeta),RealScalar((iemax + 1 - it)/2))); // upper boundary of midrange
+ static const RealScalar s1m = RealScalar(pow(RealScalar(ibeta),RealScalar((2-iemin)/2))); // scaling factor for lower range
+ static const RealScalar s2m = RealScalar(pow(RealScalar(ibeta),RealScalar(- ((iemax+it)/2)))); // scaling factor for upper range
+ static const RealScalar eps = RealScalar(pow(double(ibeta), 1-it));
+ static const RealScalar relerr = sqrt(eps); // tolerance for neglecting asml
+
const Derived& vec(_vec.derived());
- static bool initialized = false;
- static RealScalar b1, b2, s1m, s2m, rbig, relerr;
- if(!initialized)
- {
- int ibeta, it, iemin, iemax, iexp;
- RealScalar eps;
- // This program calculates the machine-dependent constants
- // bl, b2, slm, s2m, relerr overfl
- // from the "basic" machine-dependent numbers
- // nbig, ibeta, it, iemin, iemax, rbig.
- // The following define the basic machine-dependent constants.
- // For portability, the PORT subprograms "ilmaeh" and "rlmach"
- // are used. For any specific computer, each of the assignment
- // statements can be replaced
- ibeta = std::numeric_limits<RealScalar>::radix; // base for floating-point numbers
- it = std::numeric_limits<RealScalar>::digits; // number of base-beta digits in mantissa
- iemin = std::numeric_limits<RealScalar>::min_exponent; // minimum exponent
- iemax = std::numeric_limits<RealScalar>::max_exponent; // maximum exponent
- rbig = (std::numeric_limits<RealScalar>::max)(); // largest floating-point number
-
- iexp = -((1-iemin)/2);
- b1 = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // lower boundary of midrange
- iexp = (iemax + 1 - it)/2;
- b2 = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // upper boundary of midrange
-
- iexp = (2-iemin)/2;
- s1m = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // scaling factor for lower range
- iexp = - ((iemax+it)/2);
- s2m = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // scaling factor for upper range
-
- eps = RealScalar(pow(double(ibeta), 1-it));
- relerr = sqrt(eps); // tolerance for neglecting asml
- initialized = true;
- }
Index n = vec.size();
RealScalar ab2 = b2 / RealScalar(n);
RealScalar asml = RealScalar(0);
RealScalar amed = RealScalar(0);
RealScalar abig = RealScalar(0);
- for(typename Derived::InnerIterator it(vec, 0); it; ++it)
+
+ for(Index j=0; j<vec.outerSize(); ++j)
{
- RealScalar ax = abs(it.value());
- if(ax > ab2) abig += numext::abs2(ax*s2m);
- else if(ax < b1) asml += numext::abs2(ax*s1m);
- else amed += numext::abs2(ax);
+ for(typename Derived::InnerIterator iter(vec, j); iter; ++iter)
+ {
+ RealScalar ax = abs(iter.value());
+ if(ax > ab2) abig += numext::abs2(ax*s2m);
+ else if(ax < b1) asml += numext::abs2(ax*s1m);
+ else amed += numext::abs2(ax);
+ }
}
if(amed!=amed)
return amed; // we got a NaN
@@ -156,36 +212,7 @@ template<typename Derived>
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
MatrixBase<Derived>::stableNorm() const
{
- using std::sqrt;
- using std::abs;
- const Index blockSize = 4096;
- RealScalar scale(0);
- RealScalar invScale(1);
- RealScalar ssq(0); // sum of square
-
- typedef typename internal::nested_eval<Derived,2>::type DerivedCopy;
- typedef typename internal::remove_all<DerivedCopy>::type DerivedCopyClean;
- DerivedCopy copy(derived());
-
- enum {
- CanAlign = ( (int(DerivedCopyClean::Flags)&DirectAccessBit)
- || (int(internal::evaluator<DerivedCopyClean>::Alignment)>0) // FIXME Alignment)>0 might not be enough
- ) && (blockSize*sizeof(Scalar)*2<EIGEN_STACK_ALLOCATION_LIMIT)
- && (EIGEN_MAX_STATIC_ALIGN_BYTES>0) // if we cannot allocate on the stack, then let's not bother about this optimization
- };
- typedef typename internal::conditional<CanAlign, Ref<const Matrix<Scalar,Dynamic,1,0,blockSize,1>, internal::evaluator<DerivedCopyClean>::Alignment>,
- typename DerivedCopyClean::ConstSegmentReturnType>::type SegmentWrapper;
- Index n = size();
-
- if(n==1)
- return abs(this->coeff(0));
-
- Index bi = internal::first_default_aligned(copy);
- if (bi>0)
- internal::stable_norm_kernel(copy.head(bi), ssq, scale, invScale);
- for (; bi<n; bi+=blockSize)
- internal::stable_norm_kernel(SegmentWrapper(copy.segment(bi,numext::mini(blockSize, n - bi))), ssq, scale, invScale);
- return scale * sqrt(ssq);
+ return internal::stable_norm_impl(derived());
}
/** \returns the \em l2 norm of \c *this using the Blue's algorithm.
@@ -213,7 +240,10 @@ template<typename Derived>
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
MatrixBase<Derived>::hypotNorm() const
{
- return this->cwiseAbs().redux(internal::scalar_hypot_op<RealScalar>());
+ if(size()==1)
+ return numext::abs(coeff(0,0));
+ else
+ return this->cwiseAbs().redux(internal::scalar_hypot_op<RealScalar>());
}
} // end namespace Eigen
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/StlIterators.h b/examples/ThirdPartyLibs/Eigen/src/Core/StlIterators.h
new file mode 100644
index 000000000..09041db1d
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/StlIterators.h
@@ -0,0 +1,463 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2018 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_STLITERATORS_H
+#define EIGEN_STLITERATORS_H
+
+namespace Eigen {
+
+namespace internal {
+
+template<typename IteratorType>
+struct indexed_based_stl_iterator_traits;
+
+template<typename Derived>
+class indexed_based_stl_iterator_base
+{
+protected:
+ typedef indexed_based_stl_iterator_traits<Derived> traits;
+ typedef typename traits::XprType XprType;
+ typedef indexed_based_stl_iterator_base<typename traits::non_const_iterator> non_const_iterator;
+ typedef indexed_based_stl_iterator_base<typename traits::const_iterator> const_iterator;
+ typedef typename internal::conditional<internal::is_const<XprType>::value,non_const_iterator,const_iterator>::type other_iterator;
+ // NOTE: in C++03 we cannot declare friend classes through typedefs because we need to write friend class:
+ friend class indexed_based_stl_iterator_base<typename traits::const_iterator>;
+ friend class indexed_based_stl_iterator_base<typename traits::non_const_iterator>;
+public:
+ typedef Index difference_type;
+ typedef std::random_access_iterator_tag iterator_category;
+
+ indexed_based_stl_iterator_base() EIGEN_NO_THROW : mp_xpr(0), m_index(0) {}
+ indexed_based_stl_iterator_base(XprType& xpr, Index index) EIGEN_NO_THROW : mp_xpr(&xpr), m_index(index) {}
+
+ indexed_based_stl_iterator_base(const non_const_iterator& other) EIGEN_NO_THROW
+ : mp_xpr(other.mp_xpr), m_index(other.m_index)
+ {}
+
+ indexed_based_stl_iterator_base& operator=(const non_const_iterator& other)
+ {
+ mp_xpr = other.mp_xpr;
+ m_index = other.m_index;
+ return *this;
+ }
+
+ Derived& operator++() { ++m_index; return derived(); }
+ Derived& operator--() { --m_index; return derived(); }
+
+ Derived operator++(int) { Derived prev(derived()); operator++(); return prev;}
+ Derived operator--(int) { Derived prev(derived()); operator--(); return prev;}
+
+ friend Derived operator+(const indexed_based_stl_iterator_base& a, Index b) { Derived ret(a.derived()); ret += b; return ret; }
+ friend Derived operator-(const indexed_based_stl_iterator_base& a, Index b) { Derived ret(a.derived()); ret -= b; return ret; }
+ friend Derived operator+(Index a, const indexed_based_stl_iterator_base& b) { Derived ret(b.derived()); ret += a; return ret; }
+ friend Derived operator-(Index a, const indexed_based_stl_iterator_base& b) { Derived ret(b.derived()); ret -= a; return ret; }
+
+ Derived& operator+=(Index b) { m_index += b; return derived(); }
+ Derived& operator-=(Index b) { m_index -= b; return derived(); }
+
+ difference_type operator-(const indexed_based_stl_iterator_base& other) const
+ {
+ eigen_assert(mp_xpr == other.mp_xpr);
+ return m_index - other.m_index;
+ }
+
+ difference_type operator-(const other_iterator& other) const
+ {
+ eigen_assert(mp_xpr == other.mp_xpr);
+ return m_index - other.m_index;
+ }
+
+ bool operator==(const indexed_based_stl_iterator_base& other) const { eigen_assert(mp_xpr == other.mp_xpr); return m_index == other.m_index; }
+ bool operator!=(const indexed_based_stl_iterator_base& other) const { eigen_assert(mp_xpr == other.mp_xpr); return m_index != other.m_index; }
+ bool operator< (const indexed_based_stl_iterator_base& other) const { eigen_assert(mp_xpr == other.mp_xpr); return m_index < other.m_index; }
+ bool operator<=(const indexed_based_stl_iterator_base& other) const { eigen_assert(mp_xpr == other.mp_xpr); return m_index <= other.m_index; }
+ bool operator> (const indexed_based_stl_iterator_base& other) const { eigen_assert(mp_xpr == other.mp_xpr); return m_index > other.m_index; }
+ bool operator>=(const indexed_based_stl_iterator_base& other) const { eigen_assert(mp_xpr == other.mp_xpr); return m_index >= other.m_index; }
+
+ bool operator==(const other_iterator& other) const { eigen_assert(mp_xpr == other.mp_xpr); return m_index == other.m_index; }
+ bool operator!=(const other_iterator& other) const { eigen_assert(mp_xpr == other.mp_xpr); return m_index != other.m_index; }
+ bool operator< (const other_iterator& other) const { eigen_assert(mp_xpr == other.mp_xpr); return m_index < other.m_index; }
+ bool operator<=(const other_iterator& other) const { eigen_assert(mp_xpr == other.mp_xpr); return m_index <= other.m_index; }
+ bool operator> (const other_iterator& other) const { eigen_assert(mp_xpr == other.mp_xpr); return m_index > other.m_index; }
+ bool operator>=(const other_iterator& other) const { eigen_assert(mp_xpr == other.mp_xpr); return m_index >= other.m_index; }
+
+protected:
+
+ Derived& derived() { return static_cast<Derived&>(*this); }
+ const Derived& derived() const { return static_cast<const Derived&>(*this); }
+
+ XprType *mp_xpr;
+ Index m_index;
+};
+
+template<typename Derived>
+class indexed_based_stl_reverse_iterator_base
+{
+protected:
+ typedef indexed_based_stl_iterator_traits<Derived> traits;
+ typedef typename traits::XprType XprType;
+ typedef indexed_based_stl_reverse_iterator_base<typename traits::non_const_iterator> non_const_iterator;
+ typedef indexed_based_stl_reverse_iterator_base<typename traits::const_iterator> const_iterator;
+ typedef typename internal::conditional<internal::is_const<XprType>::value,non_const_iterator,const_iterator>::type other_iterator;
+ // NOTE: in C++03 we cannot declare friend classes through typedefs because we need to write friend class:
+ friend class indexed_based_stl_reverse_iterator_base<typename traits::const_iterator>;
+ friend class indexed_based_stl_reverse_iterator_base<typename traits::non_const_iterator>;
+public:
+ typedef Index difference_type;
+ typedef std::random_access_iterator_tag iterator_category;
+
+ indexed_based_stl_reverse_iterator_base() : mp_xpr(0), m_index(0) {}
+ indexed_based_stl_reverse_iterator_base(XprType& xpr, Index index) : mp_xpr(&xpr), m_index(index) {}
+
+ indexed_based_stl_reverse_iterator_base(const non_const_iterator& other)
+ : mp_xpr(other.mp_xpr), m_index(other.m_index)
+ {}
+
+ indexed_based_stl_reverse_iterator_base& operator=(const non_const_iterator& other)
+ {
+ mp_xpr = other.mp_xpr;
+ m_index = other.m_index;
+ return *this;
+ }
+
+ Derived& operator++() { --m_index; return derived(); }
+ Derived& operator--() { ++m_index; return derived(); }
+
+ Derived operator++(int) { Derived prev(derived()); operator++(); return prev;}
+ Derived operator--(int) { Derived prev(derived()); operator--(); return prev;}
+
+ friend Derived operator+(const indexed_based_stl_reverse_iterator_base& a, Index b) { Derived ret(a.derived()); ret += b; return ret; }
+ friend Derived operator-(const indexed_based_stl_reverse_iterator_base& a, Index b) { Derived ret(a.derived()); ret -= b; return ret; }
+ friend Derived operator+(Index a, const indexed_based_stl_reverse_iterator_base& b) { Derived ret(b.derived()); ret += a; return ret; }
+ friend Derived operator-(Index a, const indexed_based_stl_reverse_iterator_base& b) { Derived ret(b.derived()); ret -= a; return ret; }
+
+ Derived& operator+=(Index b) { m_index -= b; return derived(); }
+ Derived& operator-=(Index b) { m_index += b; return derived(); }
+
+ difference_type operator-(const indexed_based_stl_reverse_iterator_base& other) const
+ {
+ eigen_assert(mp_xpr == other.mp_xpr);
+ return other.m_index - m_index;
+ }
+
+ difference_type operator-(const other_iterator& other) const
+ {
+ eigen_assert(mp_xpr == other.mp_xpr);
+ return other.m_index - m_index;
+ }
+
+ bool operator==(const indexed_based_stl_reverse_iterator_base& other) const { eigen_assert(mp_xpr == other.mp_xpr); return m_index == other.m_index; }
+ bool operator!=(const indexed_based_stl_reverse_iterator_base& other) const { eigen_assert(mp_xpr == other.mp_xpr); return m_index != other.m_index; }
+ bool operator< (const indexed_based_stl_reverse_iterator_base& other) const { eigen_assert(mp_xpr == other.mp_xpr); return m_index > other.m_index; }
+ bool operator<=(const indexed_based_stl_reverse_iterator_base& other) const { eigen_assert(mp_xpr == other.mp_xpr); return m_index >= other.m_index; }
+ bool operator> (const indexed_based_stl_reverse_iterator_base& other) const { eigen_assert(mp_xpr == other.mp_xpr); return m_index < other.m_index; }
+ bool operator>=(const indexed_based_stl_reverse_iterator_base& other) const { eigen_assert(mp_xpr == other.mp_xpr); return m_index <= other.m_index; }
+
+ bool operator==(const other_iterator& other) const { eigen_assert(mp_xpr == other.mp_xpr); return m_index == other.m_index; }
+ bool operator!=(const other_iterator& other) const { eigen_assert(mp_xpr == other.mp_xpr); return m_index != other.m_index; }
+ bool operator< (const other_iterator& other) const { eigen_assert(mp_xpr == other.mp_xpr); return m_index > other.m_index; }
+ bool operator<=(const other_iterator& other) const { eigen_assert(mp_xpr == other.mp_xpr); return m_index >= other.m_index; }
+ bool operator> (const other_iterator& other) const { eigen_assert(mp_xpr == other.mp_xpr); return m_index < other.m_index; }
+ bool operator>=(const other_iterator& other) const { eigen_assert(mp_xpr == other.mp_xpr); return m_index <= other.m_index; }
+
+protected:
+
+ Derived& derived() { return static_cast<Derived&>(*this); }
+ const Derived& derived() const { return static_cast<const Derived&>(*this); }
+
+ XprType *mp_xpr;
+ Index m_index;
+};
+
+template<typename XprType>
+class pointer_based_stl_iterator
+{
+ enum { is_lvalue = internal::is_lvalue<XprType>::value };
+ typedef pointer_based_stl_iterator<typename internal::remove_const<XprType>::type> non_const_iterator;
+ typedef pointer_based_stl_iterator<typename internal::add_const<XprType>::type> const_iterator;
+ typedef typename internal::conditional<internal::is_const<XprType>::value,non_const_iterator,const_iterator>::type other_iterator;
+ // NOTE: in C++03 we cannot declare friend classes through typedefs because we need to write friend class:
+ friend class pointer_based_stl_iterator<typename internal::add_const<XprType>::type>;
+ friend class pointer_based_stl_iterator<typename internal::remove_const<XprType>::type>;
+public:
+ typedef Index difference_type;
+ typedef typename XprType::Scalar value_type;
+ typedef std::random_access_iterator_tag iterator_category;
+ typedef typename internal::conditional<bool(is_lvalue), value_type*, const value_type*>::type pointer;
+ typedef typename internal::conditional<bool(is_lvalue), value_type&, const value_type&>::type reference;
+
+
+ pointer_based_stl_iterator() EIGEN_NO_THROW : m_ptr(0) {}
+ pointer_based_stl_iterator(XprType& xpr, Index index) EIGEN_NO_THROW : m_incr(xpr.innerStride())
+ {
+ m_ptr = xpr.data() + index * m_incr.value();
+ }
+
+ pointer_based_stl_iterator(const non_const_iterator& other) EIGEN_NO_THROW
+ : m_ptr(other.m_ptr), m_incr(other.m_incr)
+ {}
+
+ pointer_based_stl_iterator& operator=(const non_const_iterator& other) EIGEN_NO_THROW
+ {
+ m_ptr = other.m_ptr;
+ m_incr.setValue(other.m_incr);
+ return *this;
+ }
+
+ reference operator*() const { return *m_ptr; }
+ reference operator[](Index i) const { return *(m_ptr+i*m_incr.value()); }
+ pointer operator->() const { return m_ptr; }
+
+ pointer_based_stl_iterator& operator++() { m_ptr += m_incr.value(); return *this; }
+ pointer_based_stl_iterator& operator--() { m_ptr -= m_incr.value(); return *this; }
+
+ pointer_based_stl_iterator operator++(int) { pointer_based_stl_iterator prev(*this); operator++(); return prev;}
+ pointer_based_stl_iterator operator--(int) { pointer_based_stl_iterator prev(*this); operator--(); return prev;}
+
+ friend pointer_based_stl_iterator operator+(const pointer_based_stl_iterator& a, Index b) { pointer_based_stl_iterator ret(a); ret += b; return ret; }
+ friend pointer_based_stl_iterator operator-(const pointer_based_stl_iterator& a, Index b) { pointer_based_stl_iterator ret(a); ret -= b; return ret; }
+ friend pointer_based_stl_iterator operator+(Index a, const pointer_based_stl_iterator& b) { pointer_based_stl_iterator ret(b); ret += a; return ret; }
+ friend pointer_based_stl_iterator operator-(Index a, const pointer_based_stl_iterator& b) { pointer_based_stl_iterator ret(b); ret -= a; return ret; }
+
+ pointer_based_stl_iterator& operator+=(Index b) { m_ptr += b*m_incr.value(); return *this; }
+ pointer_based_stl_iterator& operator-=(Index b) { m_ptr -= b*m_incr.value(); return *this; }
+
+ difference_type operator-(const pointer_based_stl_iterator& other) const {
+ return (m_ptr - other.m_ptr)/m_incr.value();
+ }
+
+ difference_type operator-(const other_iterator& other) const {
+ return (m_ptr - other.m_ptr)/m_incr.value();
+ }
+
+ bool operator==(const pointer_based_stl_iterator& other) const { return m_ptr == other.m_ptr; }
+ bool operator!=(const pointer_based_stl_iterator& other) const { return m_ptr != other.m_ptr; }
+ bool operator< (const pointer_based_stl_iterator& other) const { return m_ptr < other.m_ptr; }
+ bool operator<=(const pointer_based_stl_iterator& other) const { return m_ptr <= other.m_ptr; }
+ bool operator> (const pointer_based_stl_iterator& other) const { return m_ptr > other.m_ptr; }
+ bool operator>=(const pointer_based_stl_iterator& other) const { return m_ptr >= other.m_ptr; }
+
+ bool operator==(const other_iterator& other) const { return m_ptr == other.m_ptr; }
+ bool operator!=(const other_iterator& other) const { return m_ptr != other.m_ptr; }
+ bool operator< (const other_iterator& other) const { return m_ptr < other.m_ptr; }
+ bool operator<=(const other_iterator& other) const { return m_ptr <= other.m_ptr; }
+ bool operator> (const other_iterator& other) const { return m_ptr > other.m_ptr; }
+ bool operator>=(const other_iterator& other) const { return m_ptr >= other.m_ptr; }
+
+protected:
+
+ pointer m_ptr;
+ internal::variable_if_dynamic<Index, XprType::InnerStrideAtCompileTime> m_incr;
+};
+
+template<typename _XprType>
+struct indexed_based_stl_iterator_traits<generic_randaccess_stl_iterator<_XprType> >
+{
+ typedef _XprType XprType;
+ typedef generic_randaccess_stl_iterator<typename internal::remove_const<XprType>::type> non_const_iterator;
+ typedef generic_randaccess_stl_iterator<typename internal::add_const<XprType>::type> const_iterator;
+};
+
+template<typename XprType>
+class generic_randaccess_stl_iterator : public indexed_based_stl_iterator_base<generic_randaccess_stl_iterator<XprType> >
+{
+public:
+ typedef typename XprType::Scalar value_type;
+
+protected:
+
+ enum {
+ has_direct_access = (internal::traits<XprType>::Flags & DirectAccessBit) ? 1 : 0,
+ is_lvalue = internal::is_lvalue<XprType>::value
+ };
+
+ typedef indexed_based_stl_iterator_base<generic_randaccess_stl_iterator> Base;
+ using Base::m_index;
+ using Base::mp_xpr;
+
+ // TODO currently const Transpose/Reshape expressions never returns const references,
+ // so lets return by value too.
+ //typedef typename internal::conditional<bool(has_direct_access), const value_type&, const value_type>::type read_only_ref_t;
+ typedef const value_type read_only_ref_t;
+
+public:
+
+ typedef typename internal::conditional<bool(is_lvalue), value_type *, const value_type *>::type pointer;
+ typedef typename internal::conditional<bool(is_lvalue), value_type&, read_only_ref_t>::type reference;
+
+ generic_randaccess_stl_iterator() : Base() {}
+ generic_randaccess_stl_iterator(XprType& xpr, Index index) : Base(xpr,index) {}
+ generic_randaccess_stl_iterator(const typename Base::non_const_iterator& other) : Base(other) {}
+ using Base::operator=;
+
+ reference operator*() const { return (*mp_xpr)(m_index); }
+ reference operator[](Index i) const { return (*mp_xpr)(m_index+i); }
+ pointer operator->() const { return &((*mp_xpr)(m_index)); }
+};
+
+template<typename _XprType, DirectionType Direction>
+struct indexed_based_stl_iterator_traits<subvector_stl_iterator<_XprType,Direction> >
+{
+ typedef _XprType XprType;
+ typedef subvector_stl_iterator<typename internal::remove_const<XprType>::type, Direction> non_const_iterator;
+ typedef subvector_stl_iterator<typename internal::add_const<XprType>::type, Direction> const_iterator;
+};
+
+template<typename XprType, DirectionType Direction>
+class subvector_stl_iterator : public indexed_based_stl_iterator_base<subvector_stl_iterator<XprType,Direction> >
+{
+protected:
+
+ enum { is_lvalue = internal::is_lvalue<XprType>::value };
+
+ typedef indexed_based_stl_iterator_base<subvector_stl_iterator> Base;
+ using Base::m_index;
+ using Base::mp_xpr;
+
+ typedef typename internal::conditional<Direction==Vertical,typename XprType::ColXpr,typename XprType::RowXpr>::type SubVectorType;
+ typedef typename internal::conditional<Direction==Vertical,typename XprType::ConstColXpr,typename XprType::ConstRowXpr>::type ConstSubVectorType;
+
+
+public:
+ typedef typename internal::conditional<bool(is_lvalue), SubVectorType, ConstSubVectorType>::type reference;
+ typedef typename reference::PlainObject value_type;
+
+private:
+ class subvector_stl_iterator_ptr
+ {
+ public:
+ subvector_stl_iterator_ptr(const reference &subvector) : m_subvector(subvector) {}
+ reference* operator->() { return &m_subvector; }
+ private:
+ reference m_subvector;
+ };
+public:
+
+ typedef subvector_stl_iterator_ptr pointer;
+
+ subvector_stl_iterator() : Base() {}
+ subvector_stl_iterator(XprType& xpr, Index index) : Base(xpr,index) {}
+
+ reference operator*() const { return (*mp_xpr).template subVector<Direction>(m_index); }
+ reference operator[](Index i) const { return (*mp_xpr).template subVector<Direction>(m_index+i); }
+ pointer operator->() const { return (*mp_xpr).template subVector<Direction>(m_index); }
+};
+
+template<typename _XprType, DirectionType Direction>
+struct indexed_based_stl_iterator_traits<subvector_stl_reverse_iterator<_XprType,Direction> >
+{
+ typedef _XprType XprType;
+ typedef subvector_stl_reverse_iterator<typename internal::remove_const<XprType>::type, Direction> non_const_iterator;
+ typedef subvector_stl_reverse_iterator<typename internal::add_const<XprType>::type, Direction> const_iterator;
+};
+
+template<typename XprType, DirectionType Direction>
+class subvector_stl_reverse_iterator : public indexed_based_stl_reverse_iterator_base<subvector_stl_reverse_iterator<XprType,Direction> >
+{
+protected:
+
+ enum { is_lvalue = internal::is_lvalue<XprType>::value };
+
+ typedef indexed_based_stl_reverse_iterator_base<subvector_stl_reverse_iterator> Base;
+ using Base::m_index;
+ using Base::mp_xpr;
+
+ typedef typename internal::conditional<Direction==Vertical,typename XprType::ColXpr,typename XprType::RowXpr>::type SubVectorType;
+ typedef typename internal::conditional<Direction==Vertical,typename XprType::ConstColXpr,typename XprType::ConstRowXpr>::type ConstSubVectorType;
+
+
+public:
+ typedef typename internal::conditional<bool(is_lvalue), SubVectorType, ConstSubVectorType>::type reference;
+ typedef typename reference::PlainObject value_type;
+
+private:
+ class subvector_stl_reverse_iterator_ptr
+ {
+ public:
+ subvector_stl_reverse_iterator_ptr(const reference &subvector) : m_subvector(subvector) {}
+ reference* operator->() { return &m_subvector; }
+ private:
+ reference m_subvector;
+ };
+public:
+
+ typedef subvector_stl_reverse_iterator_ptr pointer;
+
+ subvector_stl_reverse_iterator() : Base() {}
+ subvector_stl_reverse_iterator(XprType& xpr, Index index) : Base(xpr,index) {}
+
+ reference operator*() const { return (*mp_xpr).template subVector<Direction>(m_index); }
+ reference operator[](Index i) const { return (*mp_xpr).template subVector<Direction>(m_index+i); }
+ pointer operator->() const { return (*mp_xpr).template subVector<Direction>(m_index); }
+};
+
+} // namespace internal
+
+
+/** returns an iterator to the first element of the 1D vector or array
+ * \only_for_vectors
+ * \sa end(), cbegin()
+ */
+template<typename Derived>
+inline typename DenseBase<Derived>::iterator DenseBase<Derived>::begin()
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
+ return iterator(derived(), 0);
+}
+
+/** const version of begin() */
+template<typename Derived>
+inline typename DenseBase<Derived>::const_iterator DenseBase<Derived>::begin() const
+{
+ return cbegin();
+}
+
+/** returns a read-only const_iterator to the first element of the 1D vector or array
+ * \only_for_vectors
+ * \sa cend(), begin()
+ */
+template<typename Derived>
+inline typename DenseBase<Derived>::const_iterator DenseBase<Derived>::cbegin() const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
+ return const_iterator(derived(), 0);
+}
+
+/** returns an iterator to the element following the last element of the 1D vector or array
+ * \only_for_vectors
+ * \sa begin(), cend()
+ */
+template<typename Derived>
+inline typename DenseBase<Derived>::iterator DenseBase<Derived>::end()
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
+ return iterator(derived(), size());
+}
+
+/** const version of end() */
+template<typename Derived>
+inline typename DenseBase<Derived>::const_iterator DenseBase<Derived>::end() const
+{
+ return cend();
+}
+
+/** returns a read-only const_iterator to the element following the last element of the 1D vector or array
+ * \only_for_vectors
+ * \sa begin(), cend()
+ */
+template<typename Derived>
+inline typename DenseBase<Derived>::const_iterator DenseBase<Derived>::cend() const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
+ return const_iterator(derived(), size());
+}
+
+} // namespace Eigen
+
+#endif // EIGEN_STLITERATORS_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/Stride.h b/examples/ThirdPartyLibs/Eigen/src/Core/Stride.h
index 513742f34..d164e5399 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/Stride.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/Stride.h
@@ -10,7 +10,7 @@
#ifndef EIGEN_STRIDE_H
#define EIGEN_STRIDE_H
-namespace Eigen {
+namespace Eigen {
/** \class Stride
* \ingroup Core_Module
@@ -38,6 +38,14 @@ namespace Eigen {
* \include Map_general_stride.cpp
* Output: \verbinclude Map_general_stride.out
*
+ * Both strides can be negative. However, a negative stride of -1 cannot be specified at compile time
+ * because of the ambiguity with Dynamic which is defined to -1 (historically, negative strides were
+ * not allowed).
+ *
+ * Note that for compile-time vectors (ColsAtCompileTime==1 or RowsAtCompile==1),
+ * the inner stride is the pointer increment between two consecutive elements,
+ * regardless of storage layout.
+ *
* \sa class InnerStride, class OuterStride, \ref TopicStorageOrders
*/
template<int _OuterStrideAtCompileTime, int _InnerStrideAtCompileTime>
@@ -55,6 +63,8 @@ class Stride
Stride()
: m_outer(OuterStrideAtCompileTime), m_inner(InnerStrideAtCompileTime)
{
+ // FIXME: for Eigen 4 we should use DynamicIndex instead of Dynamic.
+ // FIXME: for Eigen 4 we should also unify this API with fix<>
eigen_assert(InnerStrideAtCompileTime != Dynamic && OuterStrideAtCompileTime != Dynamic);
}
@@ -63,7 +73,6 @@ class Stride
Stride(Index outerStride, Index innerStride)
: m_outer(outerStride), m_inner(innerStride)
{
- eigen_assert(innerStride>=0 && outerStride>=0);
}
/** Copy constructor */
@@ -73,10 +82,10 @@ class Stride
{}
/** \returns the outer stride */
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
inline Index outer() const { return m_outer.value(); }
/** \returns the inner stride */
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
inline Index inner() const { return m_inner.value(); }
protected:
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/Swap.h b/examples/ThirdPartyLibs/Eigen/src/Core/Swap.h
index d70200918..180a4e5ad 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/Swap.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/Swap.h
@@ -30,12 +30,13 @@ public:
typedef typename Base::DstXprType DstXprType;
typedef swap_assign_op<Scalar> Functor;
- EIGEN_DEVICE_FUNC generic_dense_assignment_kernel(DstEvaluatorTypeT &dst, const SrcEvaluatorTypeT &src, const Functor &func, DstXprType& dstExpr)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ generic_dense_assignment_kernel(DstEvaluatorTypeT &dst, const SrcEvaluatorTypeT &src, const Functor &func, DstXprType& dstExpr)
: Base(dst, src, func, dstExpr)
{}
template<int StoreMode, int LoadMode, typename PacketType>
- void assignPacket(Index row, Index col)
+ EIGEN_STRONG_INLINE void assignPacket(Index row, Index col)
{
PacketType tmp = m_src.template packet<LoadMode,PacketType>(row,col);
const_cast<SrcEvaluatorTypeT&>(m_src).template writePacket<LoadMode>(row,col, m_dst.template packet<StoreMode,PacketType>(row,col));
@@ -43,7 +44,7 @@ public:
}
template<int StoreMode, int LoadMode, typename PacketType>
- void assignPacket(Index index)
+ EIGEN_STRONG_INLINE void assignPacket(Index index)
{
PacketType tmp = m_src.template packet<LoadMode,PacketType>(index);
const_cast<SrcEvaluatorTypeT&>(m_src).template writePacket<LoadMode>(index, m_dst.template packet<StoreMode,PacketType>(index));
@@ -52,7 +53,7 @@ public:
// TODO find a simple way not to have to copy/paste this function from generic_dense_assignment_kernel, by simple I mean no CRTP (Gael)
template<int StoreMode, int LoadMode, typename PacketType>
- void assignPacketByOuterInner(Index outer, Index inner)
+ EIGEN_STRONG_INLINE void assignPacketByOuterInner(Index outer, Index inner)
{
Index row = Base::rowIndexByOuterInner(outer, inner);
Index col = Base::colIndexByOuterInner(outer, inner);
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/Transpose.h b/examples/ThirdPartyLibs/Eigen/src/Core/Transpose.h
index ba7d6e629..2bc658f40 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/Transpose.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/Transpose.h
@@ -11,7 +11,7 @@
#ifndef EIGEN_TRANSPOSE_H
#define EIGEN_TRANSPOSE_H
-namespace Eigen {
+namespace Eigen {
namespace internal {
template<typename MatrixType>
@@ -61,24 +61,27 @@ template<typename MatrixType> class Transpose
typedef typename internal::remove_all<MatrixType>::type NestedExpression;
EIGEN_DEVICE_FUNC
- explicit inline Transpose(MatrixType& matrix) : m_matrix(matrix) {}
+ explicit EIGEN_STRONG_INLINE Transpose(MatrixType& matrix) : m_matrix(matrix) {}
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Transpose)
- EIGEN_DEVICE_FUNC inline Index rows() const { return m_matrix.cols(); }
- EIGEN_DEVICE_FUNC inline Index cols() const { return m_matrix.rows(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ Index rows() const EIGEN_NOEXCEPT { return m_matrix.cols(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ Index cols() const EIGEN_NOEXCEPT { return m_matrix.rows(); }
/** \returns the nested expression */
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const typename internal::remove_all<MatrixTypeNested>::type&
nestedExpression() const { return m_matrix; }
/** \returns the nested expression */
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
typename internal::remove_reference<MatrixTypeNested>::type&
nestedExpression() { return m_matrix; }
/** \internal */
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void resize(Index nrows, Index ncols) {
m_matrix.resize(ncols,nrows);
}
@@ -122,8 +125,10 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Dense>
EIGEN_DENSE_PUBLIC_INTERFACE(Transpose<MatrixType>)
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(TransposeImpl)
- EIGEN_DEVICE_FUNC inline Index innerStride() const { return derived().nestedExpression().innerStride(); }
- EIGEN_DEVICE_FUNC inline Index outerStride() const { return derived().nestedExpression().outerStride(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Index innerStride() const { return derived().nestedExpression().innerStride(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Index outerStride() const { return derived().nestedExpression().outerStride(); }
typedef typename internal::conditional<
internal::is_lvalue<MatrixType>::value,
@@ -131,21 +136,25 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Dense>
const Scalar
>::type ScalarWithConstIfNotLvalue;
- EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue* data() { return derived().nestedExpression().data(); }
- EIGEN_DEVICE_FUNC inline const Scalar* data() const { return derived().nestedExpression().data(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ ScalarWithConstIfNotLvalue* data() { return derived().nestedExpression().data(); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const Scalar* data() const { return derived().nestedExpression().data(); }
// FIXME: shall we keep the const version of coeffRef?
- EIGEN_DEVICE_FUNC
- inline const Scalar& coeffRef(Index rowId, Index colId) const
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const Scalar& coeffRef(Index rowId, Index colId) const
{
return derived().nestedExpression().coeffRef(colId, rowId);
}
- EIGEN_DEVICE_FUNC
- inline const Scalar& coeffRef(Index index) const
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ const Scalar& coeffRef(Index index) const
{
return derived().nestedExpression().coeffRef(index);
}
+ protected:
+ EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(TransposeImpl)
};
/** \returns an expression of the transpose of *this.
@@ -168,7 +177,8 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Dense>
*
* \sa transposeInPlace(), adjoint() */
template<typename Derived>
-EIGEN_DEVICE_FUNC inline Transpose<Derived>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+Transpose<Derived>
DenseBase<Derived>::transpose()
{
return TransposeReturnType(derived());
@@ -180,7 +190,8 @@ DenseBase<Derived>::transpose()
*
* \sa transposeInPlace(), adjoint() */
template<typename Derived>
-EIGEN_DEVICE_FUNC inline typename DenseBase<Derived>::ConstTransposeReturnType
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename DenseBase<Derived>::ConstTransposeReturnType
DenseBase<Derived>::transpose() const
{
return ConstTransposeReturnType(derived());
@@ -228,11 +239,10 @@ struct inplace_transpose_selector;
template<typename MatrixType>
struct inplace_transpose_selector<MatrixType,true,false> { // square matrix
static void run(MatrixType& m) {
- m.matrix().template triangularView<StrictlyUpper>().swap(m.matrix().transpose());
+ m.matrix().template triangularView<StrictlyUpper>().swap(m.matrix().transpose().template triangularView<StrictlyUpper>());
}
};
-// TODO: vectorized path is currently limited to LargestPacketSize x LargestPacketSize cases only.
template<typename MatrixType>
struct inplace_transpose_selector<MatrixType,true,true> { // PacketSize x PacketSize
static void run(MatrixType& m) {
@@ -249,16 +259,66 @@ struct inplace_transpose_selector<MatrixType,true,true> { // PacketSize x Packet
}
};
+
+template <typename MatrixType, Index Alignment>
+void BlockedInPlaceTranspose(MatrixType& m) {
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename internal::packet_traits<typename MatrixType::Scalar>::type Packet;
+ const Index PacketSize = internal::packet_traits<Scalar>::size;
+ eigen_assert(m.rows() == m.cols());
+ int row_start = 0;
+ for (; row_start + PacketSize <= m.rows(); row_start += PacketSize) {
+ for (int col_start = row_start; col_start + PacketSize <= m.cols(); col_start += PacketSize) {
+ PacketBlock<Packet> A;
+ if (row_start == col_start) {
+ for (Index i=0; i<PacketSize; ++i)
+ A.packet[i] = m.template packetByOuterInner<Alignment>(row_start + i,col_start);
+ internal::ptranspose(A);
+ for (Index i=0; i<PacketSize; ++i)
+ m.template writePacket<Alignment>(m.rowIndexByOuterInner(row_start + i, col_start), m.colIndexByOuterInner(row_start + i,col_start), A.packet[i]);
+ } else {
+ PacketBlock<Packet> B;
+ for (Index i=0; i<PacketSize; ++i) {
+ A.packet[i] = m.template packetByOuterInner<Alignment>(row_start + i,col_start);
+ B.packet[i] = m.template packetByOuterInner<Alignment>(col_start + i, row_start);
+ }
+ internal::ptranspose(A);
+ internal::ptranspose(B);
+ for (Index i=0; i<PacketSize; ++i) {
+ m.template writePacket<Alignment>(m.rowIndexByOuterInner(row_start + i, col_start), m.colIndexByOuterInner(row_start + i,col_start), B.packet[i]);
+ m.template writePacket<Alignment>(m.rowIndexByOuterInner(col_start + i, row_start), m.colIndexByOuterInner(col_start + i,row_start), A.packet[i]);
+ }
+ }
+ }
+ }
+ for (Index row = row_start; row < m.rows(); ++row) {
+ m.matrix().row(row).head(row).swap(
+ m.matrix().col(row).head(row).transpose());
+ }
+}
+
template<typename MatrixType,bool MatchPacketSize>
-struct inplace_transpose_selector<MatrixType,false,MatchPacketSize> { // non square matrix
+struct inplace_transpose_selector<MatrixType,false,MatchPacketSize> { // non square or dynamic matrix
static void run(MatrixType& m) {
- if (m.rows()==m.cols())
- m.matrix().template triangularView<StrictlyUpper>().swap(m.matrix().transpose());
- else
+ typedef typename MatrixType::Scalar Scalar;
+ if (m.rows() == m.cols()) {
+ const Index PacketSize = internal::packet_traits<Scalar>::size;
+ if (!NumTraits<Scalar>::IsComplex && m.rows() >= PacketSize) {
+ if ((m.rows() % PacketSize) == 0)
+ BlockedInPlaceTranspose<MatrixType,internal::evaluator<MatrixType>::Alignment>(m);
+ else
+ BlockedInPlaceTranspose<MatrixType,Unaligned>(m);
+ }
+ else {
+ m.matrix().template triangularView<StrictlyUpper>().swap(m.matrix().transpose().template triangularView<StrictlyUpper>());
+ }
+ } else {
m = m.transpose().eval();
+ }
}
};
+
} // end namespace internal
/** This is the "in place" version of transpose(): it replaces \c *this by its own transpose.
@@ -276,7 +336,7 @@ struct inplace_transpose_selector<MatrixType,false,MatchPacketSize> { // non squ
* Notice however that this method is only useful if you want to replace a matrix by its own transpose.
* If you just need the transpose of a matrix, use transpose().
*
- * \note if the matrix is not square, then \c *this must be a resizable matrix.
+ * \note if the matrix is not square, then \c *this must be a resizable matrix.
* This excludes (non-square) fixed-size matrices, block-expressions and maps.
*
* \sa transpose(), adjoint(), adjointInPlace() */
@@ -391,7 +451,8 @@ struct checkTransposeAliasing_impl<Derived, OtherDerived, false>
template<typename Dst, typename Src>
void check_for_aliasing(const Dst &dst, const Src &src)
{
- internal::checkTransposeAliasing_impl<Dst, Src>::run(dst, src);
+ if((!Dst::IsVectorAtCompileTime) && dst.rows()>1 && dst.cols()>1)
+ internal::checkTransposeAliasing_impl<Dst, Src>::run(dst, src);
}
} // end namespace internal
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/Transpositions.h b/examples/ThirdPartyLibs/Eigen/src/Core/Transpositions.h
index 19c17bb4a..38a7b01cb 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/Transpositions.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/Transpositions.h
@@ -10,20 +10,22 @@
#ifndef EIGEN_TRANSPOSITIONS_H
#define EIGEN_TRANSPOSITIONS_H
-namespace Eigen {
+namespace Eigen {
template<typename Derived>
class TranspositionsBase
{
typedef internal::traits<Derived> Traits;
-
+
public:
typedef typename Traits::IndicesType IndicesType;
typedef typename IndicesType::Scalar StorageIndex;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
+ EIGEN_DEVICE_FUNC
Derived& derived() { return *static_cast<Derived*>(this); }
+ EIGEN_DEVICE_FUNC
const Derived& derived() const { return *static_cast<const Derived*>(this); }
/** Copies the \a other transpositions into \c *this */
@@ -33,26 +35,19 @@ class TranspositionsBase
indices() = other.indices();
return derived();
}
-
- #ifndef EIGEN_PARSED_BY_DOXYGEN
- /** This is a special case of the templated operator=. Its purpose is to
- * prevent a default operator= from hiding the templated operator=.
- */
- Derived& operator=(const TranspositionsBase& other)
- {
- indices() = other.indices();
- return derived();
- }
- #endif
/** \returns the number of transpositions */
+ EIGEN_DEVICE_FUNC
Index size() const { return indices().size(); }
/** \returns the number of rows of the equivalent permutation matrix */
+ EIGEN_DEVICE_FUNC
Index rows() const { return indices().size(); }
/** \returns the number of columns of the equivalent permutation matrix */
+ EIGEN_DEVICE_FUNC
Index cols() const { return indices().size(); }
/** Direct access to the underlying index vector */
+ EIGEN_DEVICE_FUNC
inline const StorageIndex& coeff(Index i) const { return indices().coeff(i); }
/** Direct access to the underlying index vector */
inline StorageIndex& coeffRef(Index i) { return indices().coeffRef(i); }
@@ -66,8 +61,10 @@ class TranspositionsBase
inline StorageIndex& operator[](Index i) { return indices()(i); }
/** const version of indices(). */
+ EIGEN_DEVICE_FUNC
const IndicesType& indices() const { return derived().indices(); }
/** \returns a reference to the stored array representing the transpositions. */
+ EIGEN_DEVICE_FUNC
IndicesType& indices() { return derived().indices(); }
/** Resizes to given size. */
@@ -84,7 +81,7 @@ class TranspositionsBase
}
// FIXME: do we want such methods ?
- // might be usefull when the target matrix expression is complex, e.g.:
+ // might be useful when the target matrix expression is complex, e.g.:
// object.matrix().block(..,..,..,..) = trans * object.matrix().block(..,..,..,..);
/*
template<typename MatrixType>
@@ -171,12 +168,6 @@ class Transpositions : public TranspositionsBase<Transpositions<SizeAtCompileTim
inline Transpositions(const TranspositionsBase<OtherDerived>& other)
: m_indices(other.indices()) {}
- #ifndef EIGEN_PARSED_BY_DOXYGEN
- /** Standard copy constructor. Defined only to prevent a default copy constructor
- * from hiding the other templated constructor */
- inline Transpositions(const Transpositions& other) : m_indices(other.indices()) {}
- #endif
-
/** Generic constructor from expression of the transposition indices. */
template<typename Other>
explicit inline Transpositions(const MatrixBase<Other>& indices) : m_indices(indices)
@@ -189,25 +180,16 @@ class Transpositions : public TranspositionsBase<Transpositions<SizeAtCompileTim
return Base::operator=(other);
}
- #ifndef EIGEN_PARSED_BY_DOXYGEN
- /** This is a special case of the templated operator=. Its purpose is to
- * prevent a default operator= from hiding the templated operator=.
- */
- Transpositions& operator=(const Transpositions& other)
- {
- m_indices = other.m_indices;
- return *this;
- }
- #endif
-
/** Constructs an uninitialized permutation matrix of given size.
*/
inline Transpositions(Index size) : m_indices(size)
{}
/** const version of indices(). */
+ EIGEN_DEVICE_FUNC
const IndicesType& indices() const { return m_indices; }
/** \returns a reference to the stored array representing the transpositions. */
+ EIGEN_DEVICE_FUNC
IndicesType& indices() { return m_indices; }
protected:
@@ -265,9 +247,11 @@ class Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndex>,P
#endif
/** const version of indices(). */
+ EIGEN_DEVICE_FUNC
const IndicesType& indices() const { return m_indices; }
-
+
/** \returns a reference to the stored array representing the transpositions. */
+ EIGEN_DEVICE_FUNC
IndicesType& indices() { return m_indices; }
protected:
@@ -306,21 +290,12 @@ class TranspositionsWrapper
return Base::operator=(other);
}
- #ifndef EIGEN_PARSED_BY_DOXYGEN
- /** This is a special case of the templated operator=. Its purpose is to
- * prevent a default operator= from hiding the templated operator=.
- */
- TranspositionsWrapper& operator=(const TranspositionsWrapper& other)
- {
- m_indices = other.m_indices;
- return *this;
- }
- #endif
-
/** const version of indices(). */
+ EIGEN_DEVICE_FUNC
const IndicesType& indices() const { return m_indices; }
/** \returns a reference to the stored array representing the transpositions. */
+ EIGEN_DEVICE_FUNC
IndicesType& indices() { return m_indices; }
protected:
@@ -374,9 +349,12 @@ class Transpose<TranspositionsBase<TranspositionsDerived> >
explicit Transpose(const TranspositionType& t) : m_transpositions(t) {}
- Index size() const { return m_transpositions.size(); }
- Index rows() const { return m_transpositions.size(); }
- Index cols() const { return m_transpositions.size(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ Index size() const EIGEN_NOEXCEPT { return m_transpositions.size(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ Index rows() const EIGEN_NOEXCEPT { return m_transpositions.size(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ Index cols() const EIGEN_NOEXCEPT { return m_transpositions.size(); }
/** \returns the \a matrix with the inverse transpositions applied to the columns.
*/
@@ -384,7 +362,7 @@ class Transpose<TranspositionsBase<TranspositionsDerived> >
const Product<OtherDerived, Transpose, AliasFreeProduct>
operator*(const MatrixBase<OtherDerived>& matrix, const Transpose& trt)
{
- return Product<OtherDerived, Transpose, AliasFreeProduct>(matrix.derived(), trt.derived());
+ return Product<OtherDerived, Transpose, AliasFreeProduct>(matrix.derived(), trt);
}
/** \returns the \a matrix with the inverse transpositions applied to the rows.
@@ -395,7 +373,8 @@ class Transpose<TranspositionsBase<TranspositionsDerived> >
{
return Product<Transpose, OtherDerived, AliasFreeProduct>(*this, matrix.derived());
}
-
+
+ EIGEN_DEVICE_FUNC
const TranspositionType& nestedExpression() const { return m_transpositions; }
protected:
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/TriangularMatrix.h b/examples/ThirdPartyLibs/Eigen/src/Core/TriangularMatrix.h
index ed80da36a..fdb8bc15a 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/TriangularMatrix.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/TriangularMatrix.h
@@ -11,12 +11,12 @@
#ifndef EIGEN_TRIANGULARMATRIX_H
#define EIGEN_TRIANGULARMATRIX_H
-namespace Eigen {
+namespace Eigen {
namespace internal {
-
+
template<int Side, typename TriangularType, typename Rhs> struct triangular_solve_retval;
-
+
}
/** \class TriangularBase
@@ -34,16 +34,16 @@ template<typename Derived> class TriangularBase : public EigenBase<Derived>
ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime,
MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime,
-
+
SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime,
internal::traits<Derived>::ColsAtCompileTime>::ret),
/**< This is equal to the number of coefficients, i.e. the number of
* rows times the number of columns, or to \a Dynamic if this is not
* known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */
-
+
MaxSizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::MaxRowsAtCompileTime,
internal::traits<Derived>::MaxColsAtCompileTime>::ret)
-
+
};
typedef typename internal::traits<Derived>::Scalar Scalar;
typedef typename internal::traits<Derived>::StorageKind StorageKind;
@@ -53,18 +53,19 @@ template<typename Derived> class TriangularBase : public EigenBase<Derived>
typedef Derived const& Nested;
EIGEN_DEVICE_FUNC
- inline TriangularBase() { eigen_assert(!((Mode&UnitDiag) && (Mode&ZeroDiag))); }
+ inline TriangularBase() { eigen_assert(!((int(Mode) & int(UnitDiag)) && (int(Mode) & int(ZeroDiag)))); }
+
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index rows() const EIGEN_NOEXCEPT { return derived().rows(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index cols() const EIGEN_NOEXCEPT { return derived().cols(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index outerStride() const EIGEN_NOEXCEPT { return derived().outerStride(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index innerStride() const EIGEN_NOEXCEPT { return derived().innerStride(); }
- EIGEN_DEVICE_FUNC
- inline Index rows() const { return derived().rows(); }
- EIGEN_DEVICE_FUNC
- inline Index cols() const { return derived().cols(); }
- EIGEN_DEVICE_FUNC
- inline Index outerStride() const { return derived().outerStride(); }
- EIGEN_DEVICE_FUNC
- inline Index innerStride() const { return derived().innerStride(); }
-
// dummy resize function
+ EIGEN_DEVICE_FUNC
void resize(Index rows, Index cols)
{
EIGEN_UNUSED_VARIABLE(rows);
@@ -155,7 +156,7 @@ template<typename Derived> class TriangularBase : public EigenBase<Derived>
* \param MatrixType the type of the object in which we are taking the triangular part
* \param Mode the kind of triangular matrix expression to construct. Can be #Upper,
* #Lower, #UnitUpper, #UnitLower, #StrictlyUpper, or #StrictlyLower.
- * This is in fact a bit field; it must have either #Upper or #Lower,
+ * This is in fact a bit field; it must have either #Upper or #Lower,
* and additionally it may have #UnitDiag or #ZeroDiag or neither.
*
* This class represents a triangular part of a matrix, not necessarily square. Strictly speaking, for rectangular
@@ -197,7 +198,8 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
typedef typename internal::traits<TriangularView>::MatrixTypeNestedNonRef MatrixTypeNestedNonRef;
typedef typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type MatrixConjugateReturnType;
-
+ typedef TriangularView<typename internal::add_const<MatrixType>::type, _Mode> ConstTriangularView;
+
public:
typedef typename internal::traits<TriangularView>::StorageKind StorageKind;
@@ -216,17 +218,15 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
EIGEN_DEVICE_FUNC
explicit inline TriangularView(MatrixType& matrix) : m_matrix(matrix)
{}
-
- using Base::operator=;
- TriangularView& operator=(const TriangularView &other)
- { return Base::operator=(other); }
+
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(TriangularView)
/** \copydoc EigenBase::rows() */
- EIGEN_DEVICE_FUNC
- inline Index rows() const { return m_matrix.rows(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index rows() const EIGEN_NOEXCEPT { return m_matrix.rows(); }
/** \copydoc EigenBase::cols() */
- EIGEN_DEVICE_FUNC
- inline Index cols() const { return m_matrix.cols(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index cols() const EIGEN_NOEXCEPT { return m_matrix.cols(); }
/** \returns a const reference to the nested expression */
EIGEN_DEVICE_FUNC
@@ -235,13 +235,25 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
/** \returns a reference to the nested expression */
EIGEN_DEVICE_FUNC
NestedExpression& nestedExpression() { return m_matrix; }
-
+
typedef TriangularView<const MatrixConjugateReturnType,Mode> ConjugateReturnType;
/** \sa MatrixBase::conjugate() const */
EIGEN_DEVICE_FUNC
inline const ConjugateReturnType conjugate() const
{ return ConjugateReturnType(m_matrix.conjugate()); }
+ /** \returns an expression of the complex conjugate of \c *this if Cond==true,
+ * returns \c *this otherwise.
+ */
+ template<bool Cond>
+ EIGEN_DEVICE_FUNC
+ inline typename internal::conditional<Cond,ConjugateReturnType,ConstTriangularView>::type
+ conjugateIf() const
+ {
+ typedef typename internal::conditional<Cond,ConjugateReturnType,ConstTriangularView>::type ReturnType;
+ return ReturnType(m_matrix.template conjugateIf<Cond>());
+ }
+
typedef TriangularView<const typename MatrixType::AdjointReturnType,TransposeMode> AdjointReturnType;
/** \sa MatrixBase::adjoint() const */
EIGEN_DEVICE_FUNC
@@ -257,7 +269,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
typename MatrixType::TransposeReturnType tmp(m_matrix);
return TransposeReturnType(tmp);
}
-
+
typedef TriangularView<const typename MatrixType::ConstTransposeReturnType,TransposeMode> ConstTransposeReturnType;
/** \sa MatrixBase::transpose() const */
EIGEN_DEVICE_FUNC
@@ -268,10 +280,10 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
template<typename Other>
EIGEN_DEVICE_FUNC
- inline const Solve<TriangularView, Other>
+ inline const Solve<TriangularView, Other>
solve(const MatrixBase<Other>& other) const
{ return Solve<TriangularView, Other>(*this, other.derived()); }
-
+
// workaround MSVC ICE
#if EIGEN_COMP_MSVC
template<int Side, typename Other>
@@ -315,7 +327,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
else
return m_matrix.diagonal().prod();
}
-
+
protected:
MatrixTypeNested m_matrix;
@@ -377,7 +389,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularViewImpl<_Mat
internal::call_assignment_no_alias(derived(), other.derived(), internal::sub_assign_op<Scalar,typename Other::Scalar>());
return derived();
}
-
+
/** \sa MatrixBase::operator*=() */
EIGEN_DEVICE_FUNC
TriangularViewType& operator*=(const typename internal::traits<MatrixType>::Scalar& other) { return *this = derived().nestedExpression() * other; }
@@ -435,14 +447,14 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularViewImpl<_Mat
TriangularViewType& operator=(const TriangularViewImpl& other)
{ return *this = other.derived().nestedExpression(); }
- /** \deprecated */
template<typename OtherDerived>
- EIGEN_DEVICE_FUNC
+ /** \deprecated */
+ EIGEN_DEPRECATED EIGEN_DEVICE_FUNC
void lazyAssign(const TriangularBase<OtherDerived>& other);
- /** \deprecated */
template<typename OtherDerived>
- EIGEN_DEVICE_FUNC
+ /** \deprecated */
+ EIGEN_DEPRECATED EIGEN_DEVICE_FUNC
void lazyAssign(const MatrixBase<OtherDerived>& other);
#endif
@@ -470,7 +482,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularViewImpl<_Mat
* \a Side==OnTheLeft (the default), or the right-inverse-multiply \a other * inverse(\c *this) if
* \a Side==OnTheRight.
*
- * Note that the template parameter \c Side can be ommitted, in which case \c Side==OnTheLeft
+ * Note that the template parameter \c Side can be omitted, in which case \c Side==OnTheLeft
*
* The matrix \c *this must be triangular and invertible (i.e., all the coefficients of the
* diagonal must be non zero). It works as a forward (resp. backward) substitution if \c *this
@@ -496,7 +508,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularViewImpl<_Mat
* \warning The parameter is only marked 'const' to make the C++ compiler accept a temporary expression here.
* This function will const_cast it, so constness isn't honored here.
*
- * Note that the template parameter \c Side can be ommitted, in which case \c Side==OnTheLeft
+ * Note that the template parameter \c Side can be omitted, in which case \c Side==OnTheLeft
*
* See TriangularView:solve() for the details.
*/
@@ -522,10 +534,10 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularViewImpl<_Mat
call_assignment(derived(), other.const_cast_derived(), internal::swap_assign_op<Scalar>());
}
- /** \deprecated
- * Shortcut for \code (*this).swap(other.triangularView<(*this)::Mode>()) \endcode */
+ /** Shortcut for \code (*this).swap(other.triangularView<(*this)::Mode>()) \endcode */
template<typename OtherDerived>
- EIGEN_DEVICE_FUNC
+ /** \deprecated */
+ EIGEN_DEPRECATED EIGEN_DEVICE_FUNC
void swap(MatrixBase<OtherDerived> const & other)
{
EIGEN_STATIC_ASSERT_LVALUE(OtherDerived);
@@ -543,6 +555,10 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularViewImpl<_Mat
template<typename ProductType>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE TriangularViewType& _assignProduct(const ProductType& prod, const Scalar& alpha, bool beta);
+ protected:
+ EIGEN_DEFAULT_COPY_CONSTRUCTOR(TriangularViewImpl)
+ EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(TriangularViewImpl)
+
};
/***************************************************************************
@@ -699,7 +715,7 @@ bool MatrixBase<Derived>::isLowerTriangular(const RealScalar& prec) const
namespace internal {
-
+
// TODO currently a triangular expression has the form TriangularView<.,.>
// in the future triangular-ness should be defined by the expression traits
// such that Transpose<TriangularView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to make it work)
@@ -716,6 +732,7 @@ struct unary_evaluator<TriangularView<MatrixType,Mode>, IndexBased>
{
typedef TriangularView<MatrixType,Mode> XprType;
typedef evaluator<typename internal::remove_all<MatrixType>::type> Base;
+ EIGEN_DEVICE_FUNC
unary_evaluator(const XprType &xpr) : Base(xpr.nestedExpression()) {}
};
@@ -727,7 +744,7 @@ struct Dense2Triangular {};
template<typename Kernel, unsigned int Mode, int UnrollCount, bool ClearOpposite> struct triangular_assignment_loop;
-
+
/** \internal Specialization of the dense assignment kernel for triangular matrices.
* The main difference is that the triangular, diagonal, and opposite parts are processed through three different functions.
* \tparam UpLo must be either Lower or Upper
@@ -744,17 +761,17 @@ protected:
using Base::m_src;
using Base::m_functor;
public:
-
+
typedef typename Base::DstEvaluatorType DstEvaluatorType;
typedef typename Base::SrcEvaluatorType SrcEvaluatorType;
typedef typename Base::Scalar Scalar;
typedef typename Base::AssignmentTraits AssignmentTraits;
-
-
+
+
EIGEN_DEVICE_FUNC triangular_dense_assignment_kernel(DstEvaluatorType &dst, const SrcEvaluatorType &src, const Functor &func, DstXprType& dstExpr)
: Base(dst, src, func, dstExpr)
{}
-
+
#ifdef EIGEN_INTERNAL_DEBUGGING
EIGEN_DEVICE_FUNC void assignCoeff(Index row, Index col)
{
@@ -764,16 +781,16 @@ public:
#else
using Base::assignCoeff;
#endif
-
+
EIGEN_DEVICE_FUNC void assignDiagonalCoeff(Index id)
{
if(Mode==UnitDiag && SetOpposite) m_functor.assignCoeff(m_dst.coeffRef(id,id), Scalar(1));
else if(Mode==ZeroDiag && SetOpposite) m_functor.assignCoeff(m_dst.coeffRef(id,id), Scalar(0));
else if(Mode==0) Base::assignCoeff(id,id);
}
-
+
EIGEN_DEVICE_FUNC void assignOppositeCoeff(Index row, Index col)
- {
+ {
eigen_internal_assert(row!=col);
if(SetOpposite)
m_functor.assignCoeff(m_dst.coeffRef(row,col), Scalar(0));
@@ -794,17 +811,17 @@ void call_triangular_assignment_loop(DstXprType& dst, const SrcXprType& src, con
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
dst.resize(dstRows, dstCols);
DstEvaluatorType dstEvaluator(dst);
-
+
typedef triangular_dense_assignment_kernel< Mode&(Lower|Upper),Mode&(UnitDiag|ZeroDiag|SelfAdjoint),SetOpposite,
DstEvaluatorType,SrcEvaluatorType,Functor> Kernel;
Kernel kernel(dstEvaluator, srcEvaluator, func, dst.const_cast_derived());
-
+
enum {
unroll = DstXprType::SizeAtCompileTime != Dynamic
&& SrcEvaluatorType::CoeffReadCost < HugeCost
- && DstXprType::SizeAtCompileTime * (DstEvaluatorType::CoeffReadCost+SrcEvaluatorType::CoeffReadCost) / 2 <= EIGEN_UNROLLING_LIMIT
+ && DstXprType::SizeAtCompileTime * (int(DstEvaluatorType::CoeffReadCost) + int(SrcEvaluatorType::CoeffReadCost)) / 2 <= EIGEN_UNROLLING_LIMIT
};
-
+
triangular_assignment_loop<Kernel, Mode, unroll ? int(DstXprType::SizeAtCompileTime) : Dynamic, SetOpposite>::run(kernel);
}
@@ -826,8 +843,8 @@ struct Assignment<DstXprType, SrcXprType, Functor, Triangular2Triangular>
EIGEN_DEVICE_FUNC static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
{
eigen_assert(int(DstXprType::Mode) == int(SrcXprType::Mode));
-
- call_triangular_assignment_loop<DstXprType::Mode, false>(dst, src, func);
+
+ call_triangular_assignment_loop<DstXprType::Mode, false>(dst, src, func);
}
};
@@ -836,7 +853,7 @@ struct Assignment<DstXprType, SrcXprType, Functor, Triangular2Dense>
{
EIGEN_DEVICE_FUNC static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
{
- call_triangular_assignment_loop<SrcXprType::Mode, (SrcXprType::Mode&SelfAdjoint)==0>(dst, src, func);
+ call_triangular_assignment_loop<SrcXprType::Mode, (int(SrcXprType::Mode) & int(SelfAdjoint)) == 0>(dst, src, func);
}
};
@@ -845,7 +862,7 @@ struct Assignment<DstXprType, SrcXprType, Functor, Dense2Triangular>
{
EIGEN_DEVICE_FUNC static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
{
- call_triangular_assignment_loop<DstXprType::Mode, false>(dst, src, func);
+ call_triangular_assignment_loop<DstXprType::Mode, false>(dst, src, func);
}
};
@@ -856,19 +873,19 @@ struct triangular_assignment_loop
// FIXME: this is not very clean, perhaps this information should be provided by the kernel?
typedef typename Kernel::DstEvaluatorType DstEvaluatorType;
typedef typename DstEvaluatorType::XprType DstXprType;
-
+
enum {
col = (UnrollCount-1) / DstXprType::RowsAtCompileTime,
row = (UnrollCount-1) % DstXprType::RowsAtCompileTime
};
-
+
typedef typename Kernel::Scalar Scalar;
EIGEN_DEVICE_FUNC
static inline void run(Kernel &kernel)
{
triangular_assignment_loop<Kernel, Mode, UnrollCount-1, SetOpposite>::run(kernel);
-
+
if(row==col)
kernel.assignDiagonalCoeff(row);
else if( ((Mode&Lower) && row>col) || ((Mode&Upper) && row<col) )
@@ -911,10 +928,10 @@ struct triangular_assignment_loop<Kernel, Mode, Dynamic, SetOpposite>
}
else
i = maxi;
-
+
if(i<kernel.rows()) // then i==j
kernel.assignDiagonalCoeff(i++);
-
+
if (((Mode&Upper) && SetOpposite) || (Mode&Lower))
{
for(; i < kernel.rows(); ++i)
@@ -934,11 +951,11 @@ template<typename DenseDerived>
EIGEN_DEVICE_FUNC void TriangularBase<Derived>::evalToLazy(MatrixBase<DenseDerived> &other) const
{
other.derived().resize(this->rows(), this->cols());
- internal::call_triangular_assignment_loop<Derived::Mode,(Derived::Mode&SelfAdjoint)==0 /* SetOpposite */>(other.derived(), derived().nestedExpression());
+ internal::call_triangular_assignment_loop<Derived::Mode, (int(Derived::Mode) & int(SelfAdjoint)) == 0 /* SetOpposite */>(other.derived(), derived().nestedExpression());
}
namespace internal {
-
+
// Triangular = Product
template< typename DstXprType, typename Lhs, typename Rhs, typename Scalar>
struct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::assign_op<Scalar,typename Product<Lhs,Rhs,DefaultProduct>::Scalar>, Dense2Triangular>
@@ -951,7 +968,7 @@ struct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::assign_
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
dst.resize(dstRows, dstCols);
- dst._assignProduct(src, 1, 0);
+ dst._assignProduct(src, Scalar(1), false);
}
};
@@ -962,7 +979,7 @@ struct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::add_ass
typedef Product<Lhs,Rhs,DefaultProduct> SrcXprType;
static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<Scalar,typename SrcXprType::Scalar> &)
{
- dst._assignProduct(src, 1, 1);
+ dst._assignProduct(src, Scalar(1), true);
}
};
@@ -973,7 +990,7 @@ struct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::sub_ass
typedef Product<Lhs,Rhs,DefaultProduct> SrcXprType;
static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<Scalar,typename SrcXprType::Scalar> &)
{
- dst._assignProduct(src, -1, 1);
+ dst._assignProduct(src, Scalar(-1), true);
}
};
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/VectorBlock.h b/examples/ThirdPartyLibs/Eigen/src/Core/VectorBlock.h
index d72fbf7e9..71c5b95ee 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/VectorBlock.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/VectorBlock.h
@@ -35,7 +35,7 @@ struct traits<VectorBlock<VectorType, Size> >
* It is the return type of DenseBase::segment(Index,Index) and DenseBase::segment<int>(Index) and
* most of the time this is the only way it is used.
*
- * However, if you want to directly maniputate sub-vector expressions,
+ * However, if you want to directly manipulate sub-vector expressions,
* for instance if you want to write a function returning such an expression, you
* will need to use this class.
*
@@ -71,8 +71,8 @@ template<typename VectorType, int Size> class VectorBlock
/** Dynamic-size constructor
*/
- EIGEN_DEVICE_FUNC
- inline VectorBlock(VectorType& vector, Index start, Index size)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ VectorBlock(VectorType& vector, Index start, Index size)
: Base(vector,
IsColVector ? start : 0, IsColVector ? 0 : start,
IsColVector ? size : 1, IsColVector ? 1 : size)
@@ -82,8 +82,8 @@ template<typename VectorType, int Size> class VectorBlock
/** Fixed-size constructor
*/
- EIGEN_DEVICE_FUNC
- inline VectorBlock(VectorType& vector, Index start)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ VectorBlock(VectorType& vector, Index start)
: Base(vector, IsColVector ? start : 0, IsColVector ? 0 : start)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorBlock);
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/VectorwiseOp.h b/examples/ThirdPartyLibs/Eigen/src/Core/VectorwiseOp.h
index 893bc796f..870f4f1e4 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/VectorwiseOp.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/VectorwiseOp.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2019 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
@@ -65,10 +65,10 @@ class PartialReduxExpr : public internal::dense_xpr_base< PartialReduxExpr<Matri
explicit PartialReduxExpr(const MatrixType& mat, const MemberOp& func = MemberOp())
: m_matrix(mat), m_functor(func) {}
- EIGEN_DEVICE_FUNC
- Index rows() const { return (Direction==Vertical ? 1 : m_matrix.rows()); }
- EIGEN_DEVICE_FUNC
- Index cols() const { return (Direction==Horizontal ? 1 : m_matrix.cols()); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ Index rows() const EIGEN_NOEXCEPT { return (Direction==Vertical ? 1 : m_matrix.rows()); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ Index cols() const EIGEN_NOEXCEPT { return (Direction==Horizontal ? 1 : m_matrix.cols()); }
EIGEN_DEVICE_FUNC
typename MatrixType::Nested nestedExpression() const { return m_matrix; }
@@ -81,39 +81,46 @@ class PartialReduxExpr : public internal::dense_xpr_base< PartialReduxExpr<Matri
const MemberOp m_functor;
};
-#define EIGEN_MEMBER_FUNCTOR(MEMBER,COST) \
- template <typename ResultType> \
- struct member_##MEMBER { \
- EIGEN_EMPTY_STRUCT_CTOR(member_##MEMBER) \
- typedef ResultType result_type; \
- template<typename Scalar, int Size> struct Cost \
- { enum { value = COST }; }; \
- template<typename XprType> \
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
- ResultType operator()(const XprType& mat) const \
- { return mat.MEMBER(); } \
+template<typename A,typename B> struct partial_redux_dummy_func;
+
+#define EIGEN_MAKE_PARTIAL_REDUX_FUNCTOR(MEMBER,COST,VECTORIZABLE,BINARYOP) \
+ template <typename ResultType,typename Scalar> \
+ struct member_##MEMBER { \
+ EIGEN_EMPTY_STRUCT_CTOR(member_##MEMBER) \
+ typedef ResultType result_type; \
+ typedef BINARYOP<Scalar,Scalar> BinaryOp; \
+ template<int Size> struct Cost { enum { value = COST }; }; \
+ enum { Vectorizable = VECTORIZABLE }; \
+ template<typename XprType> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+ ResultType operator()(const XprType& mat) const \
+ { return mat.MEMBER(); } \
+ BinaryOp binaryFunc() const { return BinaryOp(); } \
}
+#define EIGEN_MEMBER_FUNCTOR(MEMBER,COST) \
+ EIGEN_MAKE_PARTIAL_REDUX_FUNCTOR(MEMBER,COST,0,partial_redux_dummy_func)
+
namespace internal {
-EIGEN_MEMBER_FUNCTOR(squaredNorm, Size * NumTraits<Scalar>::MulCost + (Size-1)*NumTraits<Scalar>::AddCost);
EIGEN_MEMBER_FUNCTOR(norm, (Size+5) * NumTraits<Scalar>::MulCost + (Size-1)*NumTraits<Scalar>::AddCost);
EIGEN_MEMBER_FUNCTOR(stableNorm, (Size+5) * NumTraits<Scalar>::MulCost + (Size-1)*NumTraits<Scalar>::AddCost);
EIGEN_MEMBER_FUNCTOR(blueNorm, (Size+5) * NumTraits<Scalar>::MulCost + (Size-1)*NumTraits<Scalar>::AddCost);
EIGEN_MEMBER_FUNCTOR(hypotNorm, (Size-1) * functor_traits<scalar_hypot_op<Scalar> >::Cost );
-EIGEN_MEMBER_FUNCTOR(sum, (Size-1)*NumTraits<Scalar>::AddCost);
-EIGEN_MEMBER_FUNCTOR(mean, (Size-1)*NumTraits<Scalar>::AddCost + NumTraits<Scalar>::MulCost);
-EIGEN_MEMBER_FUNCTOR(minCoeff, (Size-1)*NumTraits<Scalar>::AddCost);
-EIGEN_MEMBER_FUNCTOR(maxCoeff, (Size-1)*NumTraits<Scalar>::AddCost);
EIGEN_MEMBER_FUNCTOR(all, (Size-1)*NumTraits<Scalar>::AddCost);
EIGEN_MEMBER_FUNCTOR(any, (Size-1)*NumTraits<Scalar>::AddCost);
EIGEN_MEMBER_FUNCTOR(count, (Size-1)*NumTraits<Scalar>::AddCost);
-EIGEN_MEMBER_FUNCTOR(prod, (Size-1)*NumTraits<Scalar>::MulCost);
-template <int p, typename ResultType>
+EIGEN_MAKE_PARTIAL_REDUX_FUNCTOR(sum, (Size-1)*NumTraits<Scalar>::AddCost, 1, internal::scalar_sum_op);
+EIGEN_MAKE_PARTIAL_REDUX_FUNCTOR(minCoeff, (Size-1)*NumTraits<Scalar>::AddCost, 1, internal::scalar_min_op);
+EIGEN_MAKE_PARTIAL_REDUX_FUNCTOR(maxCoeff, (Size-1)*NumTraits<Scalar>::AddCost, 1, internal::scalar_max_op);
+EIGEN_MAKE_PARTIAL_REDUX_FUNCTOR(prod, (Size-1)*NumTraits<Scalar>::MulCost, 1, internal::scalar_product_op);
+
+template <int p, typename ResultType,typename Scalar>
struct member_lpnorm {
typedef ResultType result_type;
- template<typename Scalar, int Size> struct Cost
+ enum { Vectorizable = 0 };
+ template<int Size> struct Cost
{ enum { value = (Size+5) * NumTraits<Scalar>::MulCost + (Size-1)*NumTraits<Scalar>::AddCost }; };
EIGEN_DEVICE_FUNC member_lpnorm() {}
template<typename XprType>
@@ -121,17 +128,20 @@ struct member_lpnorm {
{ return mat.template lpNorm<p>(); }
};
-template <typename BinaryOp, typename Scalar>
+template <typename BinaryOpT, typename Scalar>
struct member_redux {
+ typedef BinaryOpT BinaryOp;
typedef typename result_of<
BinaryOp(const Scalar&,const Scalar&)
>::type result_type;
- template<typename _Scalar, int Size> struct Cost
- { enum { value = (Size-1) * functor_traits<BinaryOp>::Cost }; };
+
+ enum { Vectorizable = functor_traits<BinaryOp>::PacketAccess };
+ template<int Size> struct Cost { enum { value = (Size-1) * functor_traits<BinaryOp>::Cost }; };
EIGEN_DEVICE_FUNC explicit member_redux(const BinaryOp func) : m_functor(func) {}
template<typename Derived>
EIGEN_DEVICE_FUNC inline result_type operator()(const DenseBase<Derived>& mat) const
{ return mat.redux(m_functor); }
+ const BinaryOp& binaryFunc() const { return m_functor; }
const BinaryOp m_functor;
};
}
@@ -139,18 +149,38 @@ struct member_redux {
/** \class VectorwiseOp
* \ingroup Core_Module
*
- * \brief Pseudo expression providing partial reduction operations
+ * \brief Pseudo expression providing broadcasting and partial reduction operations
*
* \tparam ExpressionType the type of the object on which to do partial reductions
- * \tparam Direction indicates the direction of the redux (#Vertical or #Horizontal)
+ * \tparam Direction indicates whether to operate on columns (#Vertical) or rows (#Horizontal)
*
- * This class represents a pseudo expression with partial reduction features.
+ * This class represents a pseudo expression with broadcasting and partial reduction features.
* It is the return type of DenseBase::colwise() and DenseBase::rowwise()
- * and most of the time this is the only way it is used.
+ * and most of the time this is the only way it is explicitly used.
+ *
+ * To understand the logic of rowwise/colwise expression, let's consider a generic case `A.colwise().foo()`
+ * where `foo` is any method of `VectorwiseOp`. This expression is equivalent to applying `foo()` to each
+ * column of `A` and then re-assemble the outputs in a matrix expression:
+ * \code [A.col(0).foo(), A.col(1).foo(), ..., A.col(A.cols()-1).foo()] \endcode
*
* Example: \include MatrixBase_colwise.cpp
* Output: \verbinclude MatrixBase_colwise.out
*
+ * The begin() and end() methods are obviously exceptions to the previous rule as they
+ * return STL-compatible begin/end iterators to the rows or columns of the nested expression.
+ * Typical use cases include for-range-loop and calls to STL algorithms:
+ *
+ * Example: \include MatrixBase_colwise_iterator_cxx11.cpp
+ * Output: \verbinclude MatrixBase_colwise_iterator_cxx11.out
+ *
+ * For a partial reduction on an empty input, some rules apply.
+ * For the sake of clarity, let's consider a vertical reduction:
+ * - If the number of columns is zero, then a 1x0 row-major vector expression is returned.
+ * - Otherwise, if the number of rows is zero, then
+ * - a row vector of zeros is returned for sum-like reductions (sum, squaredNorm, norm, etc.)
+ * - a row vector of ones is returned for a product reduction (e.g., <code>MatrixXd(n,0).colwise().prod()</code>)
+ * - an assert is triggered for all other reductions (minCoeff,maxCoeff,redux(bin_op))
+ *
* \sa DenseBase::colwise(), DenseBase::rowwise(), class PartialReduxExpr
*/
template<typename ExpressionType, int Direction> class VectorwiseOp
@@ -163,11 +193,11 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
typedef typename internal::ref_selector<ExpressionType>::non_const_type ExpressionTypeNested;
typedef typename internal::remove_all<ExpressionTypeNested>::type ExpressionTypeNestedCleaned;
- template<template<typename _Scalar> class Functor,
- typename Scalar_=Scalar> struct ReturnType
+ template<template<typename OutScalar,typename InputScalar> class Functor,
+ typename ReturnScalar=Scalar> struct ReturnType
{
typedef PartialReduxExpr<ExpressionType,
- Functor<Scalar_>,
+ Functor<ReturnScalar,Scalar>,
Direction
> Type;
};
@@ -187,23 +217,6 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
protected:
- typedef typename internal::conditional<isVertical,
- typename ExpressionType::ColXpr,
- typename ExpressionType::RowXpr>::type SubVector;
- /** \internal
- * \returns the i-th subvector according to the \c Direction */
- EIGEN_DEVICE_FUNC
- SubVector subVector(Index i)
- {
- return SubVector(m_matrix.derived(),i);
- }
-
- /** \internal
- * \returns the number of subvectors in the direction \c Direction */
- EIGEN_DEVICE_FUNC
- Index subVectors() const
- { return isVertical?m_matrix.cols():m_matrix.rows(); }
-
template<typename OtherDerived> struct ExtendedType {
typedef Replicate<OtherDerived,
isVertical ? 1 : ExpressionType::RowsAtCompileTime,
@@ -258,42 +271,101 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
EIGEN_DEVICE_FUNC
inline const ExpressionType& _expression() const { return m_matrix; }
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
+ /** STL-like <a href="https://en.cppreference.com/w/cpp/named_req/RandomAccessIterator">RandomAccessIterator</a>
+ * iterator type over the columns or rows as returned by the begin() and end() methods.
+ */
+ random_access_iterator_type iterator;
+ /** This is the const version of iterator (aka read-only) */
+ random_access_iterator_type const_iterator;
+ #else
+ typedef internal::subvector_stl_iterator<ExpressionType, DirectionType(Direction)> iterator;
+ typedef internal::subvector_stl_iterator<const ExpressionType, DirectionType(Direction)> const_iterator;
+ typedef internal::subvector_stl_reverse_iterator<ExpressionType, DirectionType(Direction)> reverse_iterator;
+ typedef internal::subvector_stl_reverse_iterator<const ExpressionType, DirectionType(Direction)> const_reverse_iterator;
+ #endif
+
+ /** returns an iterator to the first row (rowwise) or column (colwise) of the nested expression.
+ * \sa end(), cbegin()
+ */
+ iterator begin() { return iterator (m_matrix, 0); }
+ /** const version of begin() */
+ const_iterator begin() const { return const_iterator(m_matrix, 0); }
+ /** const version of begin() */
+ const_iterator cbegin() const { return const_iterator(m_matrix, 0); }
+
+ /** returns a reverse iterator to the last row (rowwise) or column (colwise) of the nested expression.
+ * \sa rend(), crbegin()
+ */
+ reverse_iterator rbegin() { return reverse_iterator (m_matrix, m_matrix.template subVectors<DirectionType(Direction)>()-1); }
+ /** const version of rbegin() */
+ const_reverse_iterator rbegin() const { return const_reverse_iterator (m_matrix, m_matrix.template subVectors<DirectionType(Direction)>()-1); }
+ /** const version of rbegin() */
+ const_reverse_iterator crbegin() const { return const_reverse_iterator (m_matrix, m_matrix.template subVectors<DirectionType(Direction)>()-1); }
+
+ /** returns an iterator to the row (resp. column) following the last row (resp. column) of the nested expression
+ * \sa begin(), cend()
+ */
+ iterator end() { return iterator (m_matrix, m_matrix.template subVectors<DirectionType(Direction)>()); }
+ /** const version of end() */
+ const_iterator end() const { return const_iterator(m_matrix, m_matrix.template subVectors<DirectionType(Direction)>()); }
+ /** const version of end() */
+ const_iterator cend() const { return const_iterator(m_matrix, m_matrix.template subVectors<DirectionType(Direction)>()); }
+
+ /** returns a reverse iterator to the row (resp. column) before the first row (resp. column) of the nested expression
+ * \sa begin(), cend()
+ */
+ reverse_iterator rend() { return reverse_iterator (m_matrix, -1); }
+ /** const version of rend() */
+ const_reverse_iterator rend() const { return const_reverse_iterator (m_matrix, -1); }
+ /** const version of rend() */
+ const_reverse_iterator crend() const { return const_reverse_iterator (m_matrix, -1); }
+
/** \returns a row or column vector expression of \c *this reduxed by \a func
*
* The template parameter \a BinaryOp is the type of the functor
* of the custom redux operator. Note that func must be an associative operator.
*
+ * \warning the size along the reduction direction must be strictly positive,
+ * otherwise an assertion is triggered.
+ *
* \sa class VectorwiseOp, DenseBase::colwise(), DenseBase::rowwise()
*/
template<typename BinaryOp>
EIGEN_DEVICE_FUNC
const typename ReduxReturnType<BinaryOp>::Type
redux(const BinaryOp& func = BinaryOp()) const
- { return typename ReduxReturnType<BinaryOp>::Type(_expression(), internal::member_redux<BinaryOp,Scalar>(func)); }
+ {
+ eigen_assert(redux_length()>0 && "you are using an empty matrix");
+ return typename ReduxReturnType<BinaryOp>::Type(_expression(), internal::member_redux<BinaryOp,Scalar>(func));
+ }
typedef typename ReturnType<internal::member_minCoeff>::Type MinCoeffReturnType;
typedef typename ReturnType<internal::member_maxCoeff>::Type MaxCoeffReturnType;
- typedef typename ReturnType<internal::member_squaredNorm,RealScalar>::Type SquaredNormReturnType;
- typedef typename ReturnType<internal::member_norm,RealScalar>::Type NormReturnType;
+ typedef PartialReduxExpr<const CwiseUnaryOp<internal::scalar_abs2_op<Scalar>, const ExpressionTypeNestedCleaned>,internal::member_sum<RealScalar,RealScalar>,Direction> SquaredNormReturnType;
+ typedef CwiseUnaryOp<internal::scalar_sqrt_op<RealScalar>, const SquaredNormReturnType> NormReturnType;
typedef typename ReturnType<internal::member_blueNorm,RealScalar>::Type BlueNormReturnType;
typedef typename ReturnType<internal::member_stableNorm,RealScalar>::Type StableNormReturnType;
typedef typename ReturnType<internal::member_hypotNorm,RealScalar>::Type HypotNormReturnType;
typedef typename ReturnType<internal::member_sum>::Type SumReturnType;
- typedef typename ReturnType<internal::member_mean>::Type MeanReturnType;
+ typedef EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(SumReturnType,Scalar,quotient) MeanReturnType;
typedef typename ReturnType<internal::member_all>::Type AllReturnType;
typedef typename ReturnType<internal::member_any>::Type AnyReturnType;
- typedef PartialReduxExpr<ExpressionType, internal::member_count<Index>, Direction> CountReturnType;
+ typedef PartialReduxExpr<ExpressionType, internal::member_count<Index,Scalar>, Direction> CountReturnType;
typedef typename ReturnType<internal::member_prod>::Type ProdReturnType;
typedef Reverse<const ExpressionType, Direction> ConstReverseReturnType;
typedef Reverse<ExpressionType, Direction> ReverseReturnType;
template<int p> struct LpNormReturnType {
- typedef PartialReduxExpr<ExpressionType, internal::member_lpnorm<p,RealScalar>,Direction> Type;
+ typedef PartialReduxExpr<ExpressionType, internal::member_lpnorm<p,RealScalar,Scalar>,Direction> Type;
};
/** \returns a row (or column) vector expression of the smallest coefficient
* of each column (or row) of the referenced expression.
*
+ * \warning the size along the reduction direction must be strictly positive,
+ * otherwise an assertion is triggered.
+ *
* \warning the result is undefined if \c *this contains NaN.
*
* Example: \include PartialRedux_minCoeff.cpp
@@ -302,11 +374,17 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
* \sa DenseBase::minCoeff() */
EIGEN_DEVICE_FUNC
const MinCoeffReturnType minCoeff() const
- { return MinCoeffReturnType(_expression()); }
+ {
+ eigen_assert(redux_length()>0 && "you are using an empty matrix");
+ return MinCoeffReturnType(_expression());
+ }
/** \returns a row (or column) vector expression of the largest coefficient
* of each column (or row) of the referenced expression.
*
+ * \warning the size along the reduction direction must be strictly positive,
+ * otherwise an assertion is triggered.
+ *
* \warning the result is undefined if \c *this contains NaN.
*
* Example: \include PartialRedux_maxCoeff.cpp
@@ -315,7 +393,10 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
* \sa DenseBase::maxCoeff() */
EIGEN_DEVICE_FUNC
const MaxCoeffReturnType maxCoeff() const
- { return MaxCoeffReturnType(_expression()); }
+ {
+ eigen_assert(redux_length()>0 && "you are using an empty matrix");
+ return MaxCoeffReturnType(_expression());
+ }
/** \returns a row (or column) vector expression of the squared norm
* of each column (or row) of the referenced expression.
@@ -327,7 +408,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
* \sa DenseBase::squaredNorm() */
EIGEN_DEVICE_FUNC
const SquaredNormReturnType squaredNorm() const
- { return SquaredNormReturnType(_expression()); }
+ { return SquaredNormReturnType(m_matrix.cwiseAbs2()); }
/** \returns a row (or column) vector expression of the norm
* of each column (or row) of the referenced expression.
@@ -339,7 +420,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
* \sa DenseBase::norm() */
EIGEN_DEVICE_FUNC
const NormReturnType norm() const
- { return NormReturnType(_expression()); }
+ { return NormReturnType(squaredNorm()); }
/** \returns a row (or column) vector expression of the norm
* of each column (or row) of the referenced expression.
@@ -404,7 +485,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
* \sa DenseBase::mean() */
EIGEN_DEVICE_FUNC
const MeanReturnType mean() const
- { return MeanReturnType(_expression()); }
+ { return sum() / Scalar(Direction==Vertical?m_matrix.rows():m_matrix.cols()); }
/** \returns a row (or column) vector expression representing
* whether \b all coefficients of each respective column (or row) are \c true.
@@ -500,7 +581,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)
//eigen_assert((m_matrix.isNull()) == (other.isNull())); FIXME
- return const_cast<ExpressionType&>(m_matrix = extendedTo(other.derived()));
+ return m_matrix = extendedTo(other.derived());
}
/** Adds the vector \a other to each subvector of \c *this */
@@ -510,7 +591,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)
- return const_cast<ExpressionType&>(m_matrix += extendedTo(other.derived()));
+ return m_matrix += extendedTo(other.derived());
}
/** Substracts the vector \a other to each subvector of \c *this */
@@ -520,7 +601,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)
- return const_cast<ExpressionType&>(m_matrix -= extendedTo(other.derived()));
+ return m_matrix -= extendedTo(other.derived());
}
/** Multiples each subvector of \c *this by the vector \a other */
@@ -532,7 +613,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
EIGEN_STATIC_ASSERT_ARRAYXPR(ExpressionType)
EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)
m_matrix *= extendedTo(other.derived());
- return const_cast<ExpressionType&>(m_matrix);
+ return m_matrix;
}
/** Divides each subvector of \c *this by the vector \a other */
@@ -544,7 +625,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
EIGEN_STATIC_ASSERT_ARRAYXPR(ExpressionType)
EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)
m_matrix /= extendedTo(other.derived());
- return const_cast<ExpressionType&>(m_matrix);
+ return m_matrix;
}
/** Returns the expression of the sum of the vector \a other to each subvector of \c *this */
@@ -609,7 +690,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
EIGEN_DEVICE_FUNC
CwiseBinaryOp<internal::scalar_quotient_op<Scalar>,
const ExpressionTypeNestedCleaned,
- const typename OppositeExtendedType<typename ReturnType<internal::member_norm,RealScalar>::Type>::Type>
+ const typename OppositeExtendedType<NormReturnType>::Type>
normalized() const { return m_matrix.cwiseQuotient(extendedToOpposite(this->norm())); }
@@ -658,7 +739,15 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
EIGEN_DEVICE_FUNC
const HNormalizedReturnType hnormalized() const;
+# ifdef EIGEN_VECTORWISEOP_PLUGIN
+# include EIGEN_VECTORWISEOP_PLUGIN
+# endif
+
protected:
+ Index redux_length() const
+ {
+ return Direction==Vertical ? m_matrix.rows() : m_matrix.cols();
+ }
ExpressionTypeNested m_matrix;
};
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/Visitor.h b/examples/ThirdPartyLibs/Eigen/src/Core/Visitor.h
index 54c1883d9..00bcca877 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/Visitor.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/Visitor.h
@@ -10,7 +10,7 @@
#ifndef EIGEN_VISITOR_H
#define EIGEN_VISITOR_H
-namespace Eigen {
+namespace Eigen {
namespace internal {
@@ -40,6 +40,14 @@ struct visitor_impl<Visitor, Derived, 1>
}
};
+// This specialization enables visitors on empty matrices at compile-time
+template<typename Visitor, typename Derived>
+struct visitor_impl<Visitor, Derived, 0> {
+ EIGEN_DEVICE_FUNC
+ static inline void run(const Derived &/*mat*/, Visitor& /*visitor*/)
+ {}
+};
+
template<typename Visitor, typename Derived>
struct visitor_impl<Visitor, Derived, Dynamic>
{
@@ -62,22 +70,22 @@ class visitor_evaluator
public:
EIGEN_DEVICE_FUNC
explicit visitor_evaluator(const XprType &xpr) : m_evaluator(xpr), m_xpr(xpr) {}
-
+
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
-
+
enum {
RowsAtCompileTime = XprType::RowsAtCompileTime,
CoeffReadCost = internal::evaluator<XprType>::CoeffReadCost
};
-
- EIGEN_DEVICE_FUNC Index rows() const { return m_xpr.rows(); }
- EIGEN_DEVICE_FUNC Index cols() const { return m_xpr.cols(); }
- EIGEN_DEVICE_FUNC Index size() const { return m_xpr.size(); }
+
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_xpr.rows(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_xpr.cols(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index size() const EIGEN_NOEXCEPT { return m_xpr.size(); }
EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index row, Index col) const
{ return m_evaluator.coeff(row, col); }
-
+
protected:
internal::evaluator<XprType> m_evaluator;
const XprType &m_xpr;
@@ -99,6 +107,8 @@ protected:
* \note compared to one or two \em for \em loops, visitors offer automatic
* unrolling for small fixed size matrix.
*
+ * \note if the matrix is empty, then the visitor is left unchanged.
+ *
* \sa minCoeff(Index*,Index*), maxCoeff(Index*,Index*), DenseBase::redux()
*/
template<typename Derived>
@@ -106,12 +116,15 @@ template<typename Visitor>
EIGEN_DEVICE_FUNC
void DenseBase<Derived>::visit(Visitor& visitor) const
{
+ if(size()==0)
+ return;
+
typedef typename internal::visitor_evaluator<Derived> ThisEvaluator;
ThisEvaluator thisEval(derived());
-
+
enum {
unroll = SizeAtCompileTime != Dynamic
- && SizeAtCompileTime * ThisEvaluator::CoeffReadCost + (SizeAtCompileTime-1) * internal::functor_traits<Visitor>::Cost <= EIGEN_UNROLLING_LIMIT
+ && SizeAtCompileTime * int(ThisEvaluator::CoeffReadCost) + (SizeAtCompileTime-1) * int(internal::functor_traits<Visitor>::Cost) <= EIGEN_UNROLLING_LIMIT
};
return internal::visitor_impl<Visitor, ThisEvaluator, unroll ? int(SizeAtCompileTime) : Dynamic>::run(thisEval, visitor);
}
@@ -124,6 +137,9 @@ namespace internal {
template <typename Derived>
struct coeff_visitor
{
+ // default initialization to avoid countless invalid maybe-uninitialized warnings by gcc
+ EIGEN_DEVICE_FUNC
+ coeff_visitor() : row(-1), col(-1), res(0) {}
typedef typename Derived::Scalar Scalar;
Index row, col;
Scalar res;
@@ -141,7 +157,7 @@ struct coeff_visitor
*
* \sa DenseBase::minCoeff(Index*, Index*)
*/
-template <typename Derived>
+template <typename Derived, int NaNPropagation>
struct min_coeff_visitor : coeff_visitor<Derived>
{
typedef typename Derived::Scalar Scalar;
@@ -157,8 +173,40 @@ struct min_coeff_visitor : coeff_visitor<Derived>
}
};
-template<typename Scalar>
-struct functor_traits<min_coeff_visitor<Scalar> > {
+template <typename Derived>
+struct min_coeff_visitor<Derived, PropagateNumbers> : coeff_visitor<Derived>
+{
+ typedef typename Derived::Scalar Scalar;
+ EIGEN_DEVICE_FUNC
+ void operator() (const Scalar& value, Index i, Index j)
+ {
+ if((numext::isnan)(this->res) || (!(numext::isnan)(value) && value < this->res))
+ {
+ this->res = value;
+ this->row = i;
+ this->col = j;
+ }
+ }
+};
+
+template <typename Derived>
+struct min_coeff_visitor<Derived, PropagateNaN> : coeff_visitor<Derived>
+{
+ typedef typename Derived::Scalar Scalar;
+ EIGEN_DEVICE_FUNC
+ void operator() (const Scalar& value, Index i, Index j)
+ {
+ if((numext::isnan)(value) || value < this->res)
+ {
+ this->res = value;
+ this->row = i;
+ this->col = j;
+ }
+ }
+};
+
+template<typename Scalar, int NaNPropagation>
+ struct functor_traits<min_coeff_visitor<Scalar, NaNPropagation> > {
enum {
Cost = NumTraits<Scalar>::AddCost
};
@@ -169,10 +217,10 @@ struct functor_traits<min_coeff_visitor<Scalar> > {
*
* \sa DenseBase::maxCoeff(Index*, Index*)
*/
-template <typename Derived>
+template <typename Derived, int NaNPropagation>
struct max_coeff_visitor : coeff_visitor<Derived>
{
- typedef typename Derived::Scalar Scalar;
+ typedef typename Derived::Scalar Scalar;
EIGEN_DEVICE_FUNC
void operator() (const Scalar& value, Index i, Index j)
{
@@ -185,8 +233,40 @@ struct max_coeff_visitor : coeff_visitor<Derived>
}
};
-template<typename Scalar>
-struct functor_traits<max_coeff_visitor<Scalar> > {
+template <typename Derived>
+struct max_coeff_visitor<Derived, PropagateNumbers> : coeff_visitor<Derived>
+{
+ typedef typename Derived::Scalar Scalar;
+ EIGEN_DEVICE_FUNC
+ void operator() (const Scalar& value, Index i, Index j)
+ {
+ if((numext::isnan)(this->res) || (!(numext::isnan)(value) && value > this->res))
+ {
+ this->res = value;
+ this->row = i;
+ this->col = j;
+ }
+ }
+};
+
+template <typename Derived>
+struct max_coeff_visitor<Derived, PropagateNaN> : coeff_visitor<Derived>
+{
+ typedef typename Derived::Scalar Scalar;
+ EIGEN_DEVICE_FUNC
+ void operator() (const Scalar& value, Index i, Index j)
+ {
+ if((numext::isnan)(value) || value > this->res)
+ {
+ this->res = value;
+ this->row = i;
+ this->col = j;
+ }
+ }
+};
+
+template<typename Scalar, int NaNPropagation>
+struct functor_traits<max_coeff_visitor<Scalar, NaNPropagation> > {
enum {
Cost = NumTraits<Scalar>::AddCost
};
@@ -196,17 +276,24 @@ struct functor_traits<max_coeff_visitor<Scalar> > {
/** \fn DenseBase<Derived>::minCoeff(IndexType* rowId, IndexType* colId) const
* \returns the minimum of all coefficients of *this and puts in *row and *col its location.
- * \warning the result is undefined if \c *this contains NaN.
+ *
+ * In case \c *this contains NaN, NaNPropagation determines the behavior:
+ * NaNPropagation == PropagateFast : undefined
+ * NaNPropagation == PropagateNaN : result is NaN
+ * NaNPropagation == PropagateNumbers : result is maximum of elements that are not NaN
+ * \warning the matrix must be not empty, otherwise an assertion is triggered.
*
* \sa DenseBase::minCoeff(Index*), DenseBase::maxCoeff(Index*,Index*), DenseBase::visit(), DenseBase::minCoeff()
*/
template<typename Derived>
-template<typename IndexType>
+template<int NaNPropagation, typename IndexType>
EIGEN_DEVICE_FUNC
typename internal::traits<Derived>::Scalar
DenseBase<Derived>::minCoeff(IndexType* rowId, IndexType* colId) const
{
- internal::min_coeff_visitor<Derived> minVisitor;
+ eigen_assert(this->rows()>0 && this->cols()>0 && "you are using an empty matrix");
+
+ internal::min_coeff_visitor<Derived, NaNPropagation> minVisitor;
this->visit(minVisitor);
*rowId = minVisitor.row;
if (colId) *colId = minVisitor.col;
@@ -214,18 +301,25 @@ DenseBase<Derived>::minCoeff(IndexType* rowId, IndexType* colId) const
}
/** \returns the minimum of all coefficients of *this and puts in *index its location.
- * \warning the result is undefined if \c *this contains NaN.
+ *
+ * In case \c *this contains NaN, NaNPropagation determines the behavior:
+ * NaNPropagation == PropagateFast : undefined
+ * NaNPropagation == PropagateNaN : result is NaN
+ * NaNPropagation == PropagateNumbers : result is maximum of elements that are not NaN
+ * \warning the matrix must be not empty, otherwise an assertion is triggered.
*
* \sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::maxCoeff(IndexType*,IndexType*), DenseBase::visit(), DenseBase::minCoeff()
*/
template<typename Derived>
-template<typename IndexType>
+template<int NaNPropagation, typename IndexType>
EIGEN_DEVICE_FUNC
typename internal::traits<Derived>::Scalar
DenseBase<Derived>::minCoeff(IndexType* index) const
{
+ eigen_assert(this->rows()>0 && this->cols()>0 && "you are using an empty matrix");
+
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- internal::min_coeff_visitor<Derived> minVisitor;
+ internal::min_coeff_visitor<Derived, NaNPropagation> minVisitor;
this->visit(minVisitor);
*index = IndexType((RowsAtCompileTime==1) ? minVisitor.col : minVisitor.row);
return minVisitor.res;
@@ -233,17 +327,24 @@ DenseBase<Derived>::minCoeff(IndexType* index) const
/** \fn DenseBase<Derived>::maxCoeff(IndexType* rowId, IndexType* colId) const
* \returns the maximum of all coefficients of *this and puts in *row and *col its location.
- * \warning the result is undefined if \c *this contains NaN.
+ *
+ * In case \c *this contains NaN, NaNPropagation determines the behavior:
+ * NaNPropagation == PropagateFast : undefined
+ * NaNPropagation == PropagateNaN : result is NaN
+ * NaNPropagation == PropagateNumbers : result is maximum of elements that are not NaN
+ * \warning the matrix must be not empty, otherwise an assertion is triggered.
*
* \sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::visit(), DenseBase::maxCoeff()
*/
template<typename Derived>
-template<typename IndexType>
+template<int NaNPropagation, typename IndexType>
EIGEN_DEVICE_FUNC
typename internal::traits<Derived>::Scalar
DenseBase<Derived>::maxCoeff(IndexType* rowPtr, IndexType* colPtr) const
{
- internal::max_coeff_visitor<Derived> maxVisitor;
+ eigen_assert(this->rows()>0 && this->cols()>0 && "you are using an empty matrix");
+
+ internal::max_coeff_visitor<Derived, NaNPropagation> maxVisitor;
this->visit(maxVisitor);
*rowPtr = maxVisitor.row;
if (colPtr) *colPtr = maxVisitor.col;
@@ -251,18 +352,25 @@ DenseBase<Derived>::maxCoeff(IndexType* rowPtr, IndexType* colPtr) const
}
/** \returns the maximum of all coefficients of *this and puts in *index its location.
- * \warning the result is undefined if \c *this contains NaN.
+ *
+ * In case \c *this contains NaN, NaNPropagation determines the behavior:
+ * NaNPropagation == PropagateFast : undefined
+ * NaNPropagation == PropagateNaN : result is NaN
+ * NaNPropagation == PropagateNumbers : result is maximum of elements that are not NaN
+ * \warning the matrix must be not empty, otherwise an assertion is triggered.
*
* \sa DenseBase::maxCoeff(IndexType*,IndexType*), DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::visitor(), DenseBase::maxCoeff()
*/
template<typename Derived>
-template<typename IndexType>
+template<int NaNPropagation, typename IndexType>
EIGEN_DEVICE_FUNC
typename internal::traits<Derived>::Scalar
DenseBase<Derived>::maxCoeff(IndexType* index) const
{
+ eigen_assert(this->rows()>0 && this->cols()>0 && "you are using an empty matrix");
+
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- internal::max_coeff_visitor<Derived> maxVisitor;
+ internal::max_coeff_visitor<Derived, NaNPropagation> maxVisitor;
this->visit(maxVisitor);
*index = (RowsAtCompileTime==1) ? maxVisitor.col : maxVisitor.row;
return maxVisitor.res;
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX/Complex.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX/Complex.h
index 7fa61969d..e9096c0a1 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX/Complex.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX/Complex.h
@@ -22,6 +22,7 @@ struct Packet4cf
__m256 v;
};
+#ifndef EIGEN_VECTORIZE_AVX512
template<> struct packet_traits<std::complex<float> > : default_packet_traits
{
typedef Packet4cf type;
@@ -37,6 +38,7 @@ template<> struct packet_traits<std::complex<float> > : default_packet_traits
HasMul = 1,
HasDiv = 1,
HasNegate = 1,
+ HasSqrt = 1,
HasAbs = 0,
HasAbs2 = 0,
HasMin = 0,
@@ -44,8 +46,20 @@ template<> struct packet_traits<std::complex<float> > : default_packet_traits
HasSetLinear = 0
};
};
+#endif
-template<> struct unpacket_traits<Packet4cf> { typedef std::complex<float> type; enum {size=4, alignment=Aligned32}; typedef Packet2cf half; };
+template<> struct unpacket_traits<Packet4cf> {
+ typedef std::complex<float> type;
+ typedef Packet2cf half;
+ typedef Packet8f as_real;
+ enum {
+ size=4,
+ alignment=Aligned32,
+ vectorizable=true,
+ masked_load_available=false,
+ masked_store_available=false
+ };
+};
template<> EIGEN_STRONG_INLINE Packet4cf padd<Packet4cf>(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_add_ps(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet4cf psub<Packet4cf>(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_sub_ps(a.v,b.v)); }
@@ -67,10 +81,17 @@ template<> EIGEN_STRONG_INLINE Packet4cf pmul<Packet4cf>(const Packet4cf& a, con
return Packet4cf(result);
}
+template <>
+EIGEN_STRONG_INLINE Packet4cf pcmp_eq(const Packet4cf& a, const Packet4cf& b) {
+ __m256 eq = _mm256_cmp_ps(a.v, b.v, _CMP_EQ_OQ);
+ return Packet4cf(_mm256_and_ps(eq, _mm256_permute_ps(eq, 0xb1)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4cf ptrue<Packet4cf>(const Packet4cf& a) { return Packet4cf(ptrue(Packet8f(a.v))); }
template<> EIGEN_STRONG_INLINE Packet4cf pand <Packet4cf>(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_and_ps(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet4cf por <Packet4cf>(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_or_ps(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet4cf pxor <Packet4cf>(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_xor_ps(a.v,b.v)); }
-template<> EIGEN_STRONG_INLINE Packet4cf pandnot<Packet4cf>(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_andnot_ps(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet4cf pandnot<Packet4cf>(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_andnot_ps(b.v,a.v)); }
template<> EIGEN_STRONG_INLINE Packet4cf pload <Packet4cf>(const std::complex<float>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet4cf(pload<Packet8f>(&numext::real_ref(*from))); }
template<> EIGEN_STRONG_INLINE Packet4cf ploadu<Packet4cf>(const std::complex<float>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet4cf(ploadu<Packet8f>(&numext::real_ref(*from))); }
@@ -78,7 +99,9 @@ template<> EIGEN_STRONG_INLINE Packet4cf ploadu<Packet4cf>(const std::complex<fl
template<> EIGEN_STRONG_INLINE Packet4cf pset1<Packet4cf>(const std::complex<float>& from)
{
- return Packet4cf(_mm256_castpd_ps(_mm256_broadcast_sd((const double*)(const void*)&from)));
+ const float re = std::real(from);
+ const float im = std::imag(from);
+ return Packet4cf(_mm256_set_ps(im, re, im, re, im, re, im, re));
}
template<> EIGEN_STRONG_INLINE Packet4cf ploaddup<Packet4cf>(const std::complex<float>* from)
@@ -140,70 +163,12 @@ template<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet4cf>(const Packe
Packet2cf(_mm256_extractf128_ps(a.v,1))));
}
-template<> EIGEN_STRONG_INLINE Packet4cf preduxp<Packet4cf>(const Packet4cf* vecs)
-{
- Packet8f t0 = _mm256_shuffle_ps(vecs[0].v, vecs[0].v, _MM_SHUFFLE(3, 1, 2 ,0));
- Packet8f t1 = _mm256_shuffle_ps(vecs[1].v, vecs[1].v, _MM_SHUFFLE(3, 1, 2 ,0));
- t0 = _mm256_hadd_ps(t0,t1);
- Packet8f t2 = _mm256_shuffle_ps(vecs[2].v, vecs[2].v, _MM_SHUFFLE(3, 1, 2 ,0));
- Packet8f t3 = _mm256_shuffle_ps(vecs[3].v, vecs[3].v, _MM_SHUFFLE(3, 1, 2 ,0));
- t2 = _mm256_hadd_ps(t2,t3);
-
- t1 = _mm256_permute2f128_ps(t0,t2, 0 + (2<<4));
- t3 = _mm256_permute2f128_ps(t0,t2, 1 + (3<<4));
-
- return Packet4cf(_mm256_add_ps(t1,t3));
-}
-
template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet4cf>(const Packet4cf& a)
{
return predux_mul(pmul(Packet2cf(_mm256_extractf128_ps(a.v, 0)),
Packet2cf(_mm256_extractf128_ps(a.v, 1))));
}
-template<int Offset>
-struct palign_impl<Offset,Packet4cf>
-{
- static EIGEN_STRONG_INLINE void run(Packet4cf& first, const Packet4cf& second)
- {
- if (Offset==0) return;
- palign_impl<Offset*2,Packet8f>::run(first.v, second.v);
- }
-};
-
-template<> struct conj_helper<Packet4cf, Packet4cf, false,true>
-{
- EIGEN_STRONG_INLINE Packet4cf pmadd(const Packet4cf& x, const Packet4cf& y, const Packet4cf& c) const
- { return padd(pmul(x,y),c); }
-
- EIGEN_STRONG_INLINE Packet4cf pmul(const Packet4cf& a, const Packet4cf& b) const
- {
- return internal::pmul(a, pconj(b));
- }
-};
-
-template<> struct conj_helper<Packet4cf, Packet4cf, true,false>
-{
- EIGEN_STRONG_INLINE Packet4cf pmadd(const Packet4cf& x, const Packet4cf& y, const Packet4cf& c) const
- { return padd(pmul(x,y),c); }
-
- EIGEN_STRONG_INLINE Packet4cf pmul(const Packet4cf& a, const Packet4cf& b) const
- {
- return internal::pmul(pconj(a), b);
- }
-};
-
-template<> struct conj_helper<Packet4cf, Packet4cf, true,true>
-{
- EIGEN_STRONG_INLINE Packet4cf pmadd(const Packet4cf& x, const Packet4cf& y, const Packet4cf& c) const
- { return padd(pmul(x,y),c); }
-
- EIGEN_STRONG_INLINE Packet4cf pmul(const Packet4cf& a, const Packet4cf& b) const
- {
- return pconj(internal::pmul(a, b));
- }
-};
-
EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet4cf,Packet8f)
template<> EIGEN_STRONG_INLINE Packet4cf pdiv<Packet4cf>(const Packet4cf& a, const Packet4cf& b)
@@ -228,6 +193,7 @@ struct Packet2cd
__m256d v;
};
+#ifndef EIGEN_VECTORIZE_AVX512
template<> struct packet_traits<std::complex<double> > : default_packet_traits
{
typedef Packet2cd type;
@@ -243,6 +209,7 @@ template<> struct packet_traits<std::complex<double> > : default_packet_traits
HasMul = 1,
HasDiv = 1,
HasNegate = 1,
+ HasSqrt = 1,
HasAbs = 0,
HasAbs2 = 0,
HasMin = 0,
@@ -250,8 +217,20 @@ template<> struct packet_traits<std::complex<double> > : default_packet_traits
HasSetLinear = 0
};
};
+#endif
-template<> struct unpacket_traits<Packet2cd> { typedef std::complex<double> type; enum {size=2, alignment=Aligned32}; typedef Packet1cd half; };
+template<> struct unpacket_traits<Packet2cd> {
+ typedef std::complex<double> type;
+ typedef Packet1cd half;
+ typedef Packet4d as_real;
+ enum {
+ size=2,
+ alignment=Aligned32,
+ vectorizable=true,
+ masked_load_available=false,
+ masked_store_available=false
+ };
+};
template<> EIGEN_STRONG_INLINE Packet2cd padd<Packet2cd>(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_add_pd(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet2cd psub<Packet2cd>(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_sub_pd(a.v,b.v)); }
@@ -272,10 +251,17 @@ template<> EIGEN_STRONG_INLINE Packet2cd pmul<Packet2cd>(const Packet2cd& a, con
return Packet2cd(_mm256_addsub_pd(even, odd));
}
+template <>
+EIGEN_STRONG_INLINE Packet2cd pcmp_eq(const Packet2cd& a, const Packet2cd& b) {
+ __m256d eq = _mm256_cmp_pd(a.v, b.v, _CMP_EQ_OQ);
+ return Packet2cd(pand(eq, _mm256_permute_pd(eq, 0x5)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cd ptrue<Packet2cd>(const Packet2cd& a) { return Packet2cd(ptrue(Packet4d(a.v))); }
template<> EIGEN_STRONG_INLINE Packet2cd pand <Packet2cd>(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_and_pd(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet2cd por <Packet2cd>(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_or_pd(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet2cd pxor <Packet2cd>(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_xor_pd(a.v,b.v)); }
-template<> EIGEN_STRONG_INLINE Packet2cd pandnot<Packet2cd>(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_andnot_pd(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cd pandnot<Packet2cd>(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_andnot_pd(b.v,a.v)); }
template<> EIGEN_STRONG_INLINE Packet2cd pload <Packet2cd>(const std::complex<double>* from)
{ EIGEN_DEBUG_ALIGNED_LOAD return Packet2cd(pload<Packet4d>((const double*)from)); }
@@ -327,63 +313,12 @@ template<> EIGEN_STRONG_INLINE std::complex<double> predux<Packet2cd>(const Pack
Packet1cd(_mm256_extractf128_pd(a.v,1))));
}
-template<> EIGEN_STRONG_INLINE Packet2cd preduxp<Packet2cd>(const Packet2cd* vecs)
-{
- Packet4d t0 = _mm256_permute2f128_pd(vecs[0].v,vecs[1].v, 0 + (2<<4));
- Packet4d t1 = _mm256_permute2f128_pd(vecs[0].v,vecs[1].v, 1 + (3<<4));
-
- return Packet2cd(_mm256_add_pd(t0,t1));
-}
-
template<> EIGEN_STRONG_INLINE std::complex<double> predux_mul<Packet2cd>(const Packet2cd& a)
{
return predux(pmul(Packet1cd(_mm256_extractf128_pd(a.v,0)),
Packet1cd(_mm256_extractf128_pd(a.v,1))));
}
-template<int Offset>
-struct palign_impl<Offset,Packet2cd>
-{
- static EIGEN_STRONG_INLINE void run(Packet2cd& first, const Packet2cd& second)
- {
- if (Offset==0) return;
- palign_impl<Offset*2,Packet4d>::run(first.v, second.v);
- }
-};
-
-template<> struct conj_helper<Packet2cd, Packet2cd, false,true>
-{
- EIGEN_STRONG_INLINE Packet2cd pmadd(const Packet2cd& x, const Packet2cd& y, const Packet2cd& c) const
- { return padd(pmul(x,y),c); }
-
- EIGEN_STRONG_INLINE Packet2cd pmul(const Packet2cd& a, const Packet2cd& b) const
- {
- return internal::pmul(a, pconj(b));
- }
-};
-
-template<> struct conj_helper<Packet2cd, Packet2cd, true,false>
-{
- EIGEN_STRONG_INLINE Packet2cd pmadd(const Packet2cd& x, const Packet2cd& y, const Packet2cd& c) const
- { return padd(pmul(x,y),c); }
-
- EIGEN_STRONG_INLINE Packet2cd pmul(const Packet2cd& a, const Packet2cd& b) const
- {
- return internal::pmul(pconj(a), b);
- }
-};
-
-template<> struct conj_helper<Packet2cd, Packet2cd, true,true>
-{
- EIGEN_STRONG_INLINE Packet2cd pmadd(const Packet2cd& x, const Packet2cd& y, const Packet2cd& c) const
- { return padd(pmul(x,y),c); }
-
- EIGEN_STRONG_INLINE Packet2cd pmul(const Packet2cd& a, const Packet2cd& b) const
- {
- return pconj(internal::pmul(a, b));
- }
-};
-
EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cd,Packet4d)
template<> EIGEN_STRONG_INLINE Packet2cd pdiv<Packet2cd>(const Packet2cd& a, const Packet2cd& b)
@@ -424,24 +359,12 @@ ptranspose(PacketBlock<Packet2cd,2>& kernel) {
kernel.packet[0].v = tmp;
}
-template<> EIGEN_STRONG_INLINE Packet4cf pinsertfirst(const Packet4cf& a, std::complex<float> b)
-{
- return Packet4cf(_mm256_blend_ps(a.v,pset1<Packet4cf>(b).v,1|2));
-}
-
-template<> EIGEN_STRONG_INLINE Packet2cd pinsertfirst(const Packet2cd& a, std::complex<double> b)
-{
- return Packet2cd(_mm256_blend_pd(a.v,pset1<Packet2cd>(b).v,1|2));
+template<> EIGEN_STRONG_INLINE Packet2cd psqrt<Packet2cd>(const Packet2cd& a) {
+ return psqrt_complex<Packet2cd>(a);
}
-template<> EIGEN_STRONG_INLINE Packet4cf pinsertlast(const Packet4cf& a, std::complex<float> b)
-{
- return Packet4cf(_mm256_blend_ps(a.v,pset1<Packet4cf>(b).v,(1<<7)|(1<<6)));
-}
-
-template<> EIGEN_STRONG_INLINE Packet2cd pinsertlast(const Packet2cd& a, std::complex<double> b)
-{
- return Packet2cd(_mm256_blend_pd(a.v,pset1<Packet2cd>(b).v,(1<<3)|(1<<2)));
+template<> EIGEN_STRONG_INLINE Packet4cf psqrt<Packet4cf>(const Packet4cf& a) {
+ return psqrt_complex<Packet4cf>(a);
}
} // end namespace internal
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX/MathFunctions.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX/MathFunctions.h
index 6af67ce2d..67041c812 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX/MathFunctions.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX/MathFunctions.h
@@ -10,7 +10,7 @@
#ifndef EIGEN_MATH_FUNCTIONS_AVX_H
#define EIGEN_MATH_FUNCTIONS_AVX_H
-/* The sin, cos, exp, and log functions of this file are loosely derived from
+/* The sin and cos functions of this file are loosely derived from
* Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/
*/
@@ -18,187 +18,50 @@ namespace Eigen {
namespace internal {
-inline Packet8i pshiftleft(Packet8i v, int n)
-{
-#ifdef EIGEN_VECTORIZE_AVX2
- return _mm256_slli_epi32(v, n);
-#else
- __m128i lo = _mm_slli_epi32(_mm256_extractf128_si256(v, 0), n);
- __m128i hi = _mm_slli_epi32(_mm256_extractf128_si256(v, 1), n);
- return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
-#endif
+template <>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f
+psin<Packet8f>(const Packet8f& _x) {
+ return psin_float(_x);
}
-inline Packet8f pshiftright(Packet8f v, int n)
-{
-#ifdef EIGEN_VECTORIZE_AVX2
- return _mm256_cvtepi32_ps(_mm256_srli_epi32(_mm256_castps_si256(v), n));
-#else
- __m128i lo = _mm_srli_epi32(_mm256_extractf128_si256(_mm256_castps_si256(v), 0), n);
- __m128i hi = _mm_srli_epi32(_mm256_extractf128_si256(_mm256_castps_si256(v), 1), n);
- return _mm256_cvtepi32_ps(_mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1));
-#endif
+template <>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f
+pcos<Packet8f>(const Packet8f& _x) {
+ return pcos_float(_x);
}
-// Sine function
-// Computes sin(x) by wrapping x to the interval [-Pi/4,3*Pi/4] and
-// evaluating interpolants in [-Pi/4,Pi/4] or [Pi/4,3*Pi/4]. The interpolants
-// are (anti-)symmetric and thus have only odd/even coefficients
template <>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f
-psin<Packet8f>(const Packet8f& _x) {
- Packet8f x = _x;
+plog<Packet8f>(const Packet8f& _x) {
+ return plog_float(_x);
+}
- // Some useful values.
- _EIGEN_DECLARE_CONST_Packet8i(one, 1);
- _EIGEN_DECLARE_CONST_Packet8f(one, 1.0f);
- _EIGEN_DECLARE_CONST_Packet8f(two, 2.0f);
- _EIGEN_DECLARE_CONST_Packet8f(one_over_four, 0.25f);
- _EIGEN_DECLARE_CONST_Packet8f(one_over_pi, 3.183098861837907e-01f);
- _EIGEN_DECLARE_CONST_Packet8f(neg_pi_first, -3.140625000000000e+00f);
- _EIGEN_DECLARE_CONST_Packet8f(neg_pi_second, -9.670257568359375e-04f);
- _EIGEN_DECLARE_CONST_Packet8f(neg_pi_third, -6.278329571784980e-07f);
- _EIGEN_DECLARE_CONST_Packet8f(four_over_pi, 1.273239544735163e+00f);
-
- // Map x from [-Pi/4,3*Pi/4] to z in [-1,3] and subtract the shifted period.
- Packet8f z = pmul(x, p8f_one_over_pi);
- Packet8f shift = _mm256_floor_ps(padd(z, p8f_one_over_four));
- x = pmadd(shift, p8f_neg_pi_first, x);
- x = pmadd(shift, p8f_neg_pi_second, x);
- x = pmadd(shift, p8f_neg_pi_third, x);
- z = pmul(x, p8f_four_over_pi);
-
- // Make a mask for the entries that need flipping, i.e. wherever the shift
- // is odd.
- Packet8i shift_ints = _mm256_cvtps_epi32(shift);
- Packet8i shift_isodd = _mm256_castps_si256(_mm256_and_ps(_mm256_castsi256_ps(shift_ints), _mm256_castsi256_ps(p8i_one)));
- Packet8i sign_flip_mask = pshiftleft(shift_isodd, 31);
-
- // Create a mask for which interpolant to use, i.e. if z > 1, then the mask
- // is set to ones for that entry.
- Packet8f ival_mask = _mm256_cmp_ps(z, p8f_one, _CMP_GT_OQ);
-
- // Evaluate the polynomial for the interval [1,3] in z.
- _EIGEN_DECLARE_CONST_Packet8f(coeff_right_0, 9.999999724233232e-01f);
- _EIGEN_DECLARE_CONST_Packet8f(coeff_right_2, -3.084242535619928e-01f);
- _EIGEN_DECLARE_CONST_Packet8f(coeff_right_4, 1.584991525700324e-02f);
- _EIGEN_DECLARE_CONST_Packet8f(coeff_right_6, -3.188805084631342e-04f);
- Packet8f z_minus_two = psub(z, p8f_two);
- Packet8f z_minus_two2 = pmul(z_minus_two, z_minus_two);
- Packet8f right = pmadd(p8f_coeff_right_6, z_minus_two2, p8f_coeff_right_4);
- right = pmadd(right, z_minus_two2, p8f_coeff_right_2);
- right = pmadd(right, z_minus_two2, p8f_coeff_right_0);
-
- // Evaluate the polynomial for the interval [-1,1] in z.
- _EIGEN_DECLARE_CONST_Packet8f(coeff_left_1, 7.853981525427295e-01f);
- _EIGEN_DECLARE_CONST_Packet8f(coeff_left_3, -8.074536727092352e-02f);
- _EIGEN_DECLARE_CONST_Packet8f(coeff_left_5, 2.489871967827018e-03f);
- _EIGEN_DECLARE_CONST_Packet8f(coeff_left_7, -3.587725841214251e-05f);
- Packet8f z2 = pmul(z, z);
- Packet8f left = pmadd(p8f_coeff_left_7, z2, p8f_coeff_left_5);
- left = pmadd(left, z2, p8f_coeff_left_3);
- left = pmadd(left, z2, p8f_coeff_left_1);
- left = pmul(left, z);
-
- // Assemble the results, i.e. select the left and right polynomials.
- left = _mm256_andnot_ps(ival_mask, left);
- right = _mm256_and_ps(ival_mask, right);
- Packet8f res = _mm256_or_ps(left, right);
-
- // Flip the sign on the odd intervals and return the result.
- res = _mm256_xor_ps(res, _mm256_castsi256_ps(sign_flip_mask));
- return res;
+template <>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4d
+plog<Packet4d>(const Packet4d& _x) {
+ return plog_double(_x);
}
-// Natural logarithm
-// Computes log(x) as log(2^e * m) = C*e + log(m), where the constant C =log(2)
-// and m is in the range [sqrt(1/2),sqrt(2)). In this range, the logarithm can
-// be easily approximated by a polynomial centered on m=1 for stability.
-// TODO(gonnet): Further reduce the interval allowing for lower-degree
-// polynomial interpolants -> ... -> profit!
template <>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f
-plog<Packet8f>(const Packet8f& _x) {
- Packet8f x = _x;
- _EIGEN_DECLARE_CONST_Packet8f(1, 1.0f);
- _EIGEN_DECLARE_CONST_Packet8f(half, 0.5f);
- _EIGEN_DECLARE_CONST_Packet8f(126f, 126.0f);
-
- _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(inv_mant_mask, ~0x7f800000);
-
- // The smallest non denormalized float number.
- _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(min_norm_pos, 0x00800000);
- _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(minus_inf, 0xff800000);
-
- // Polynomial coefficients.
- _EIGEN_DECLARE_CONST_Packet8f(cephes_SQRTHF, 0.707106781186547524f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p0, 7.0376836292E-2f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p1, -1.1514610310E-1f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p2, 1.1676998740E-1f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p3, -1.2420140846E-1f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p4, +1.4249322787E-1f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p5, -1.6668057665E-1f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p6, +2.0000714765E-1f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p7, -2.4999993993E-1f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p8, +3.3333331174E-1f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_log_q1, -2.12194440e-4f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_log_q2, 0.693359375f);
-
- Packet8f invalid_mask = _mm256_cmp_ps(x, _mm256_setzero_ps(), _CMP_NGE_UQ); // not greater equal is true if x is NaN
- Packet8f iszero_mask = _mm256_cmp_ps(x, _mm256_setzero_ps(), _CMP_EQ_OQ);
-
- // Truncate input values to the minimum positive normal.
- x = pmax(x, p8f_min_norm_pos);
-
- Packet8f emm0 = pshiftright(x,23);
- Packet8f e = _mm256_sub_ps(emm0, p8f_126f);
-
- // Set the exponents to -1, i.e. x are in the range [0.5,1).
- x = _mm256_and_ps(x, p8f_inv_mant_mask);
- x = _mm256_or_ps(x, p8f_half);
-
- // part2: Shift the inputs from the range [0.5,1) to [sqrt(1/2),sqrt(2))
- // and shift by -1. The values are then centered around 0, which improves
- // the stability of the polynomial evaluation.
- // if( x < SQRTHF ) {
- // e -= 1;
- // x = x + x - 1.0;
- // } else { x = x - 1.0; }
- Packet8f mask = _mm256_cmp_ps(x, p8f_cephes_SQRTHF, _CMP_LT_OQ);
- Packet8f tmp = _mm256_and_ps(x, mask);
- x = psub(x, p8f_1);
- e = psub(e, _mm256_and_ps(p8f_1, mask));
- x = padd(x, tmp);
-
- Packet8f x2 = pmul(x, x);
- Packet8f x3 = pmul(x2, x);
-
- // Evaluate the polynomial approximant of degree 8 in three parts, probably
- // to improve instruction-level parallelism.
- Packet8f y, y1, y2;
- y = pmadd(p8f_cephes_log_p0, x, p8f_cephes_log_p1);
- y1 = pmadd(p8f_cephes_log_p3, x, p8f_cephes_log_p4);
- y2 = pmadd(p8f_cephes_log_p6, x, p8f_cephes_log_p7);
- y = pmadd(y, x, p8f_cephes_log_p2);
- y1 = pmadd(y1, x, p8f_cephes_log_p5);
- y2 = pmadd(y2, x, p8f_cephes_log_p8);
- y = pmadd(y, x3, y1);
- y = pmadd(y, x3, y2);
- y = pmul(y, x3);
-
- // Add the logarithm of the exponent back to the result of the interpolation.
- y1 = pmul(e, p8f_cephes_log_q1);
- tmp = pmul(x2, p8f_half);
- y = padd(y, y1);
- x = psub(x, tmp);
- y2 = pmul(e, p8f_cephes_log_q2);
- x = padd(x, y);
- x = padd(x, y2);
-
- // Filter out invalid inputs, i.e. negative arg will be NAN, 0 will be -INF.
- return _mm256_or_ps(
- _mm256_andnot_ps(iszero_mask, _mm256_or_ps(x, invalid_mask)),
- _mm256_and_ps(iszero_mask, p8f_minus_inf));
+plog2<Packet8f>(const Packet8f& _x) {
+ return plog2_float(_x);
+}
+
+template <>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4d
+plog2<Packet4d>(const Packet4d& _x) {
+ return plog2_double(_x);
+}
+
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet8f plog1p<Packet8f>(const Packet8f& _x) {
+ return generic_plog1p(_x);
+}
+
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet8f pexpm1<Packet8f>(const Packet8f& _x) {
+ return generic_expm1(_x);
}
// Exponential function. Works by writing "x = m*log(2) + r" where
@@ -207,149 +70,21 @@ plog<Packet8f>(const Packet8f& _x) {
template <>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f
pexp<Packet8f>(const Packet8f& _x) {
- _EIGEN_DECLARE_CONST_Packet8f(1, 1.0f);
- _EIGEN_DECLARE_CONST_Packet8f(half, 0.5f);
- _EIGEN_DECLARE_CONST_Packet8f(127, 127.0f);
-
- _EIGEN_DECLARE_CONST_Packet8f(exp_hi, 88.3762626647950f);
- _EIGEN_DECLARE_CONST_Packet8f(exp_lo, -88.3762626647949f);
-
- _EIGEN_DECLARE_CONST_Packet8f(cephes_LOG2EF, 1.44269504088896341f);
-
- _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p0, 1.9875691500E-4f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p1, 1.3981999507E-3f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p2, 8.3334519073E-3f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p3, 4.1665795894E-2f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p4, 1.6666665459E-1f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p5, 5.0000001201E-1f);
-
- // Clamp x.
- Packet8f x = pmax(pmin(_x, p8f_exp_hi), p8f_exp_lo);
-
- // Express exp(x) as exp(m*ln(2) + r), start by extracting
- // m = floor(x/ln(2) + 0.5).
- Packet8f m = _mm256_floor_ps(pmadd(x, p8f_cephes_LOG2EF, p8f_half));
-
-// Get r = x - m*ln(2). If no FMA instructions are available, m*ln(2) is
-// subtracted out in two parts, m*C1+m*C2 = m*ln(2), to avoid accumulating
-// truncation errors. Note that we don't use the "pmadd" function here to
-// ensure that a precision-preserving FMA instruction is used.
-#ifdef EIGEN_VECTORIZE_FMA
- _EIGEN_DECLARE_CONST_Packet8f(nln2, -0.6931471805599453f);
- Packet8f r = _mm256_fmadd_ps(m, p8f_nln2, x);
-#else
- _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_C1, 0.693359375f);
- _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_C2, -2.12194440e-4f);
- Packet8f r = psub(x, pmul(m, p8f_cephes_exp_C1));
- r = psub(r, pmul(m, p8f_cephes_exp_C2));
-#endif
-
- Packet8f r2 = pmul(r, r);
-
- // TODO(gonnet): Split into odd/even polynomials and try to exploit
- // instruction-level parallelism.
- Packet8f y = p8f_cephes_exp_p0;
- y = pmadd(y, r, p8f_cephes_exp_p1);
- y = pmadd(y, r, p8f_cephes_exp_p2);
- y = pmadd(y, r, p8f_cephes_exp_p3);
- y = pmadd(y, r, p8f_cephes_exp_p4);
- y = pmadd(y, r, p8f_cephes_exp_p5);
- y = pmadd(y, r2, r);
- y = padd(y, p8f_1);
-
- // Build emm0 = 2^m.
- Packet8i emm0 = _mm256_cvttps_epi32(padd(m, p8f_127));
- emm0 = pshiftleft(emm0, 23);
-
- // Return 2^m * exp(r).
- return pmax(pmul(y, _mm256_castsi256_ps(emm0)), _x);
+ return pexp_float(_x);
}
// Hyperbolic Tangent function.
template <>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f
-ptanh<Packet8f>(const Packet8f& x) {
- return internal::generic_fast_tanh_float(x);
+ptanh<Packet8f>(const Packet8f& _x) {
+ return internal::generic_fast_tanh_float(_x);
}
+// Exponential function for doubles.
template <>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4d
pexp<Packet4d>(const Packet4d& _x) {
- Packet4d x = _x;
-
- _EIGEN_DECLARE_CONST_Packet4d(1, 1.0);
- _EIGEN_DECLARE_CONST_Packet4d(2, 2.0);
- _EIGEN_DECLARE_CONST_Packet4d(half, 0.5);
-
- _EIGEN_DECLARE_CONST_Packet4d(exp_hi, 709.437);
- _EIGEN_DECLARE_CONST_Packet4d(exp_lo, -709.436139303);
-
- _EIGEN_DECLARE_CONST_Packet4d(cephes_LOG2EF, 1.4426950408889634073599);
-
- _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_p0, 1.26177193074810590878e-4);
- _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_p1, 3.02994407707441961300e-2);
- _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_p2, 9.99999999999999999910e-1);
-
- _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_q0, 3.00198505138664455042e-6);
- _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_q1, 2.52448340349684104192e-3);
- _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_q2, 2.27265548208155028766e-1);
- _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_q3, 2.00000000000000000009e0);
-
- _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_C1, 0.693145751953125);
- _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_C2, 1.42860682030941723212e-6);
- _EIGEN_DECLARE_CONST_Packet4i(1023, 1023);
-
- Packet4d tmp, fx;
-
- // clamp x
- x = pmax(pmin(x, p4d_exp_hi), p4d_exp_lo);
- // Express exp(x) as exp(g + n*log(2)).
- fx = pmadd(p4d_cephes_LOG2EF, x, p4d_half);
-
- // Get the integer modulus of log(2), i.e. the "n" described above.
- fx = _mm256_floor_pd(fx);
-
- // Get the remainder modulo log(2), i.e. the "g" described above. Subtract
- // n*log(2) out in two steps, i.e. n*C1 + n*C2, C1+C2=log2 to get the last
- // digits right.
- tmp = pmul(fx, p4d_cephes_exp_C1);
- Packet4d z = pmul(fx, p4d_cephes_exp_C2);
- x = psub(x, tmp);
- x = psub(x, z);
-
- Packet4d x2 = pmul(x, x);
-
- // Evaluate the numerator polynomial of the rational interpolant.
- Packet4d px = p4d_cephes_exp_p0;
- px = pmadd(px, x2, p4d_cephes_exp_p1);
- px = pmadd(px, x2, p4d_cephes_exp_p2);
- px = pmul(px, x);
-
- // Evaluate the denominator polynomial of the rational interpolant.
- Packet4d qx = p4d_cephes_exp_q0;
- qx = pmadd(qx, x2, p4d_cephes_exp_q1);
- qx = pmadd(qx, x2, p4d_cephes_exp_q2);
- qx = pmadd(qx, x2, p4d_cephes_exp_q3);
-
- // I don't really get this bit, copied from the SSE2 routines, so...
- // TODO(gonnet): Figure out what is going on here, perhaps find a better
- // rational interpolant?
- x = _mm256_div_pd(px, psub(qx, px));
- x = pmadd(p4d_2, x, p4d_1);
-
- // Build e=2^n by constructing the exponents in a 128-bit vector and
- // shifting them to where they belong in double-precision values.
- __m128i emm0 = _mm256_cvtpd_epi32(fx);
- emm0 = _mm_add_epi32(emm0, p4i_1023);
- emm0 = _mm_shuffle_epi32(emm0, _MM_SHUFFLE(3, 1, 2, 0));
- __m128i lo = _mm_slli_epi64(emm0, 52);
- __m128i hi = _mm_slli_epi64(_mm_srli_epi64(emm0, 32), 52);
- __m256i e = _mm256_insertf128_si256(_mm256_setzero_si256(), lo, 0);
- e = _mm256_insertf128_si256(e, hi, 1);
-
- // Construct the result 2^n * exp(g) = e * x. The max is used to catch
- // non-finite values in the input.
- return pmax(pmul(x, _mm256_castsi256_pd(e)), _x);
+ return pexp_double(_x);
}
// Functions for sqrt.
@@ -362,37 +97,39 @@ pexp<Packet4d>(const Packet4d& _x) {
// For detail see here: http://www.beyond3d.com/content/articles/8/
#if EIGEN_FAST_MATH
template <>
-EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f
-psqrt<Packet8f>(const Packet8f& _x) {
- Packet8f half = pmul(_x, pset1<Packet8f>(.5f));
- Packet8f denormal_mask = _mm256_and_ps(
- _mm256_cmp_ps(_x, pset1<Packet8f>((std::numeric_limits<float>::min)()),
- _CMP_LT_OQ),
- _mm256_cmp_ps(_x, _mm256_setzero_ps(), _CMP_GE_OQ));
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet8f psqrt<Packet8f>(const Packet8f& _x) {
+ Packet8f minus_half_x = pmul(_x, pset1<Packet8f>(-0.5f));
+ Packet8f denormal_mask = pandnot(
+ pcmp_lt(_x, pset1<Packet8f>((std::numeric_limits<float>::min)())),
+ pcmp_lt(_x, pzero(_x)));
// Compute approximate reciprocal sqrt.
Packet8f x = _mm256_rsqrt_ps(_x);
// Do a single step of Newton's iteration.
- x = pmul(x, psub(pset1<Packet8f>(1.5f), pmul(half, pmul(x,x))));
+ x = pmul(x, pmadd(minus_half_x, pmul(x,x), pset1<Packet8f>(1.5f)));
// Flush results for denormals to zero.
- return _mm256_andnot_ps(denormal_mask, pmul(_x,x));
+ return pandnot(pmul(_x,x), denormal_mask);
}
+
#else
+
template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
-Packet8f psqrt<Packet8f>(const Packet8f& x) {
- return _mm256_sqrt_ps(x);
+Packet8f psqrt<Packet8f>(const Packet8f& _x) {
+ return _mm256_sqrt_ps(_x);
}
+
#endif
+
template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
-Packet4d psqrt<Packet4d>(const Packet4d& x) {
- return _mm256_sqrt_pd(x);
+Packet4d psqrt<Packet4d>(const Packet4d& _x) {
+ return _mm256_sqrt_pd(_x);
}
-#if EIGEN_FAST_MATH
+#if EIGEN_FAST_MATH
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet8f prsqrt<Packet8f>(const Packet8f& _x) {
_EIGEN_DECLARE_CONST_Packet8f_FROM_INT(inf, 0x7f800000);
- _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(nan, 0x7fc00000);
_EIGEN_DECLARE_CONST_Packet8f(one_point_five, 1.5f);
_EIGEN_DECLARE_CONST_Packet8f(minus_half, -0.5f);
_EIGEN_DECLARE_CONST_Packet8f_FROM_INT(flt_min, 0x00800000);
@@ -401,36 +138,88 @@ Packet8f prsqrt<Packet8f>(const Packet8f& _x) {
// select only the inverse sqrt of positive normal inputs (denormals are
// flushed to zero and cause infs as well).
- Packet8f le_zero_mask = _mm256_cmp_ps(_x, p8f_flt_min, _CMP_LT_OQ);
- Packet8f x = _mm256_andnot_ps(le_zero_mask, _mm256_rsqrt_ps(_x));
-
- // Fill in NaNs and Infs for the negative/zero entries.
- Packet8f neg_mask = _mm256_cmp_ps(_x, _mm256_setzero_ps(), _CMP_LT_OQ);
- Packet8f zero_mask = _mm256_andnot_ps(neg_mask, le_zero_mask);
- Packet8f infs_and_nans = _mm256_or_ps(_mm256_and_ps(neg_mask, p8f_nan),
- _mm256_and_ps(zero_mask, p8f_inf));
-
- // Do a single step of Newton's iteration.
- x = pmul(x, pmadd(neg_half, pmul(x, x), p8f_one_point_five));
-
- // Insert NaNs and Infs in all the right places.
- return _mm256_or_ps(x, infs_and_nans);
+ Packet8f lt_min_mask = _mm256_cmp_ps(_x, p8f_flt_min, _CMP_LT_OQ);
+ Packet8f inf_mask = _mm256_cmp_ps(_x, p8f_inf, _CMP_EQ_OQ);
+ Packet8f not_normal_finite_mask = _mm256_or_ps(lt_min_mask, inf_mask);
+
+ // Compute an approximate result using the rsqrt intrinsic.
+ Packet8f y_approx = _mm256_rsqrt_ps(_x);
+
+ // Do a single step of Newton-Raphson iteration to improve the approximation.
+ // This uses the formula y_{n+1} = y_n * (1.5 - y_n * (0.5 * x) * y_n).
+ // It is essential to evaluate the inner term like this because forming
+ // y_n^2 may over- or underflow.
+ Packet8f y_newton = pmul(y_approx, pmadd(y_approx, pmul(neg_half, y_approx), p8f_one_point_five));
+
+ // Select the result of the Newton-Raphson step for positive normal arguments.
+ // For other arguments, choose the output of the intrinsic. This will
+ // return rsqrt(+inf) = 0, rsqrt(x) = NaN if x < 0, and rsqrt(x) = +inf if
+ // x is zero or a positive denormalized float (equivalent to flushing positive
+ // denormalized inputs to zero).
+ return pselect<Packet8f>(not_normal_finite_mask, y_approx, y_newton);
}
#else
template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
-Packet8f prsqrt<Packet8f>(const Packet8f& x) {
+Packet8f prsqrt<Packet8f>(const Packet8f& _x) {
_EIGEN_DECLARE_CONST_Packet8f(one, 1.0f);
- return _mm256_div_ps(p8f_one, _mm256_sqrt_ps(x));
+ return _mm256_div_ps(p8f_one, _mm256_sqrt_ps(_x));
}
#endif
template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
-Packet4d prsqrt<Packet4d>(const Packet4d& x) {
+Packet4d prsqrt<Packet4d>(const Packet4d& _x) {
_EIGEN_DECLARE_CONST_Packet4d(one, 1.0);
- return _mm256_div_pd(p4d_one, _mm256_sqrt_pd(x));
+ return _mm256_div_pd(p4d_one, _mm256_sqrt_pd(_x));
}
+F16_PACKET_FUNCTION(Packet8f, Packet8h, psin)
+F16_PACKET_FUNCTION(Packet8f, Packet8h, pcos)
+F16_PACKET_FUNCTION(Packet8f, Packet8h, plog)
+F16_PACKET_FUNCTION(Packet8f, Packet8h, plog2)
+F16_PACKET_FUNCTION(Packet8f, Packet8h, plog1p)
+F16_PACKET_FUNCTION(Packet8f, Packet8h, pexpm1)
+F16_PACKET_FUNCTION(Packet8f, Packet8h, pexp)
+F16_PACKET_FUNCTION(Packet8f, Packet8h, ptanh)
+F16_PACKET_FUNCTION(Packet8f, Packet8h, psqrt)
+F16_PACKET_FUNCTION(Packet8f, Packet8h, prsqrt)
+
+template <>
+EIGEN_STRONG_INLINE Packet8h pfrexp(const Packet8h& a, Packet8h& exponent) {
+ Packet8f fexponent;
+ const Packet8h out = float2half(pfrexp<Packet8f>(half2float(a), fexponent));
+ exponent = float2half(fexponent);
+ return out;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8h pldexp(const Packet8h& a, const Packet8h& exponent) {
+ return float2half(pldexp<Packet8f>(half2float(a), half2float(exponent)));
+}
+
+BF16_PACKET_FUNCTION(Packet8f, Packet8bf, psin)
+BF16_PACKET_FUNCTION(Packet8f, Packet8bf, pcos)
+BF16_PACKET_FUNCTION(Packet8f, Packet8bf, plog)
+BF16_PACKET_FUNCTION(Packet8f, Packet8bf, plog2)
+BF16_PACKET_FUNCTION(Packet8f, Packet8bf, plog1p)
+BF16_PACKET_FUNCTION(Packet8f, Packet8bf, pexpm1)
+BF16_PACKET_FUNCTION(Packet8f, Packet8bf, pexp)
+BF16_PACKET_FUNCTION(Packet8f, Packet8bf, ptanh)
+BF16_PACKET_FUNCTION(Packet8f, Packet8bf, psqrt)
+BF16_PACKET_FUNCTION(Packet8f, Packet8bf, prsqrt)
+
+template <>
+EIGEN_STRONG_INLINE Packet8bf pfrexp(const Packet8bf& a, Packet8bf& exponent) {
+ Packet8f fexponent;
+ const Packet8bf out = F32ToBf16(pfrexp<Packet8f>(Bf16ToF32(a), fexponent));
+ exponent = F32ToBf16(fexponent);
+ return out;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8bf pldexp(const Packet8bf& a, const Packet8bf& exponent) {
+ return F32ToBf16(pldexp<Packet8f>(Bf16ToF32(a), Bf16ToF32(exponent)));
+}
} // end namespace internal
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX/PacketMath.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX/PacketMath.h
index 636230944..7fc32fd71 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX/PacketMath.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX/PacketMath.h
@@ -18,11 +18,11 @@ namespace internal {
#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
#endif
-#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
-#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*))
+#if !defined(EIGEN_VECTORIZE_AVX512) && !defined(EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS)
+#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16
#endif
-#ifdef __FMA__
+#ifdef EIGEN_VECTORIZE_FMA
#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
#endif
@@ -31,10 +31,14 @@ namespace internal {
typedef __m256 Packet8f;
typedef __m256i Packet8i;
typedef __m256d Packet4d;
+typedef eigen_packet_wrapper<__m128i, 2> Packet8h;
+typedef eigen_packet_wrapper<__m128i, 3> Packet8bf;
template<> struct is_arithmetic<__m256> { enum { value = true }; };
template<> struct is_arithmetic<__m256i> { enum { value = true }; };
template<> struct is_arithmetic<__m256d> { enum { value = true }; };
+template<> struct is_arithmetic<Packet8h> { enum { value = true }; };
+template<> struct is_arithmetic<Packet8bf> { enum { value = true }; };
#define _EIGEN_DECLARE_CONST_Packet8f(NAME,X) \
const Packet8f p8f_##NAME = pset1<Packet8f>(X)
@@ -58,21 +62,28 @@ template<> struct packet_traits<float> : default_packet_traits
enum {
Vectorizable = 1,
AlignedOnScalar = 1,
- size=8,
+ size = 8,
HasHalfPacket = 1,
- HasDiv = 1,
- HasSin = EIGEN_FAST_MATH,
- HasCos = 0,
- HasLog = 1,
- HasExp = 1,
+ HasCmp = 1,
+ HasDiv = 1,
+ HasSin = EIGEN_FAST_MATH,
+ HasCos = EIGEN_FAST_MATH,
+ HasLog = 1,
+ HasLog1p = 1,
+ HasExpm1 = 1,
+ HasExp = 1,
+ HasNdtri = 1,
+ HasBessel = 1,
HasSqrt = 1,
HasRsqrt = 1,
- HasTanh = EIGEN_FAST_MATH,
+ HasTanh = EIGEN_FAST_MATH,
+ HasErf = EIGEN_FAST_MATH,
HasBlend = 1,
HasRound = 1,
HasFloor = 1,
- HasCeil = 1
+ HasCeil = 1,
+ HasRint = 1
};
};
template<> struct packet_traits<double> : default_packet_traits
@@ -85,14 +96,104 @@ template<> struct packet_traits<double> : default_packet_traits
size=4,
HasHalfPacket = 1,
+ HasCmp = 1,
HasDiv = 1,
+ HasLog = 1,
HasExp = 1,
HasSqrt = 1,
HasRsqrt = 1,
HasBlend = 1,
HasRound = 1,
HasFloor = 1,
- HasCeil = 1
+ HasCeil = 1,
+ HasRint = 1
+ };
+};
+
+template <>
+struct packet_traits<Eigen::half> : default_packet_traits {
+ typedef Packet8h type;
+ // There is no half-size packet for Packet8h.
+ typedef Packet8h half;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 8,
+ HasHalfPacket = 0,
+
+ HasCmp = 1,
+ HasAdd = 1,
+ HasSub = 1,
+ HasMul = 1,
+ HasDiv = 1,
+ HasSin = EIGEN_FAST_MATH,
+ HasCos = EIGEN_FAST_MATH,
+ HasNegate = 1,
+ HasAbs = 1,
+ HasAbs2 = 0,
+ HasMin = 1,
+ HasMax = 1,
+ HasConj = 1,
+ HasSetLinear = 0,
+ HasLog = 1,
+ HasLog1p = 1,
+ HasExpm1 = 1,
+ HasExp = 1,
+ HasSqrt = 1,
+ HasRsqrt = 1,
+ HasTanh = EIGEN_FAST_MATH,
+ HasErf = EIGEN_FAST_MATH,
+ HasBlend = 0,
+ HasRound = 1,
+ HasFloor = 1,
+ HasCeil = 1,
+ HasRint = 1,
+ HasBessel = 1,
+ HasNdtri = 1
+ };
+};
+
+template <>
+struct packet_traits<bfloat16> : default_packet_traits {
+ typedef Packet8bf type;
+ // There is no half-size packet for current Packet8bf.
+ // TODO: support as SSE path.
+ typedef Packet8bf half;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 8,
+ HasHalfPacket = 0,
+
+ HasCmp = 1,
+ HasAdd = 1,
+ HasSub = 1,
+ HasMul = 1,
+ HasDiv = 1,
+ HasSin = EIGEN_FAST_MATH,
+ HasCos = EIGEN_FAST_MATH,
+ HasNegate = 1,
+ HasAbs = 1,
+ HasAbs2 = 0,
+ HasMin = 1,
+ HasMax = 1,
+ HasConj = 1,
+ HasSetLinear = 0,
+ HasLog = 1,
+ HasLog1p = 1,
+ HasExpm1 = 1,
+ HasExp = 1,
+ HasSqrt = 1,
+ HasRsqrt = 1,
+ HasTanh = EIGEN_FAST_MATH,
+ HasErf = EIGEN_FAST_MATH,
+ HasBlend = 0,
+ HasRound = 1,
+ HasFloor = 1,
+ HasCeil = 1,
+ HasRint = 1,
+ HasBessel = 1,
+ HasNdtri = 1
};
};
#endif
@@ -113,14 +214,45 @@ template<> struct packet_traits<int> : default_packet_traits
};
*/
-template<> struct unpacket_traits<Packet8f> { typedef float type; typedef Packet4f half; enum {size=8, alignment=Aligned32}; };
-template<> struct unpacket_traits<Packet4d> { typedef double type; typedef Packet2d half; enum {size=4, alignment=Aligned32}; };
-template<> struct unpacket_traits<Packet8i> { typedef int type; typedef Packet4i half; enum {size=8, alignment=Aligned32}; };
+template<> struct unpacket_traits<Packet8f> {
+ typedef float type;
+ typedef Packet4f half;
+ typedef Packet8i integer_packet;
+ typedef uint8_t mask_t;
+ enum {size=8, alignment=Aligned32, vectorizable=true, masked_load_available=true, masked_store_available=true};
+};
+template<> struct unpacket_traits<Packet4d> {
+ typedef double type;
+ typedef Packet2d half;
+ enum {size=4, alignment=Aligned32, vectorizable=true, masked_load_available=false, masked_store_available=false};
+};
+template<> struct unpacket_traits<Packet8i> { typedef int type; typedef Packet4i half; enum {size=8, alignment=Aligned32, vectorizable=false, masked_load_available=false, masked_store_available=false}; };
+template<> struct unpacket_traits<Packet8bf> { typedef bfloat16 type; typedef Packet8bf half; enum {size=8, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; };
+
+// Helper function for bit packing snippet of low precision comparison.
+// It packs the flags from 16x16 to 8x16.
+EIGEN_STRONG_INLINE __m128i Pack16To8(Packet8f rf) {
+ return _mm_packs_epi32(_mm256_extractf128_si256(_mm256_castps_si256(rf), 0),
+ _mm256_extractf128_si256(_mm256_castps_si256(rf), 1));
+}
+
template<> EIGEN_STRONG_INLINE Packet8f pset1<Packet8f>(const float& from) { return _mm256_set1_ps(from); }
template<> EIGEN_STRONG_INLINE Packet4d pset1<Packet4d>(const double& from) { return _mm256_set1_pd(from); }
template<> EIGEN_STRONG_INLINE Packet8i pset1<Packet8i>(const int& from) { return _mm256_set1_epi32(from); }
+template<> EIGEN_STRONG_INLINE Packet8f pset1frombits<Packet8f>(unsigned int from) { return _mm256_castsi256_ps(pset1<Packet8i>(from)); }
+template<> EIGEN_STRONG_INLINE Packet4d pset1frombits<Packet4d>(uint64_t from) { return _mm256_castsi256_pd(_mm256_set1_epi64x(from)); }
+
+template<> EIGEN_STRONG_INLINE Packet8f pzero(const Packet8f& /*a*/) { return _mm256_setzero_ps(); }
+template<> EIGEN_STRONG_INLINE Packet4d pzero(const Packet4d& /*a*/) { return _mm256_setzero_pd(); }
+template<> EIGEN_STRONG_INLINE Packet8i pzero(const Packet8i& /*a*/) { return _mm256_setzero_si256(); }
+
+
+template<> EIGEN_STRONG_INLINE Packet8f peven_mask(const Packet8f& /*a*/) { return _mm256_castsi256_ps(_mm256_set_epi32(0, -1, 0, -1, 0, -1, 0, -1)); }
+template<> EIGEN_STRONG_INLINE Packet8i peven_mask(const Packet8i& /*a*/) { return _mm256_set_epi32(0, -1, 0, -1, 0, -1, 0, -1); }
+template<> EIGEN_STRONG_INLINE Packet4d peven_mask(const Packet4d& /*a*/) { return _mm256_castsi256_pd(_mm256_set_epi32(0, 0, -1, -1, 0, 0, -1, -1)); }
+
template<> EIGEN_STRONG_INLINE Packet8f pload1<Packet8f>(const float* from) { return _mm256_broadcast_ss(from); }
template<> EIGEN_STRONG_INLINE Packet4d pload1<Packet4d>(const double* from) { return _mm256_broadcast_sd(from); }
@@ -129,9 +261,27 @@ template<> EIGEN_STRONG_INLINE Packet4d plset<Packet4d>(const double& a) { retur
template<> EIGEN_STRONG_INLINE Packet8f padd<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_add_ps(a,b); }
template<> EIGEN_STRONG_INLINE Packet4d padd<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_add_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8i padd<Packet8i>(const Packet8i& a, const Packet8i& b) {
+#ifdef EIGEN_VECTORIZE_AVX2
+ return _mm256_add_epi32(a,b);
+#else
+ __m128i lo = _mm_add_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
+ __m128i hi = _mm_add_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
+ return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
+#endif
+}
template<> EIGEN_STRONG_INLINE Packet8f psub<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_sub_ps(a,b); }
template<> EIGEN_STRONG_INLINE Packet4d psub<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_sub_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8i psub<Packet8i>(const Packet8i& a, const Packet8i& b) {
+#ifdef EIGEN_VECTORIZE_AVX2
+ return _mm256_sub_epi32(a,b);
+#else
+ __m128i lo = _mm_sub_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
+ __m128i hi = _mm_sub_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
+ return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
+#endif
+}
template<> EIGEN_STRONG_INLINE Packet8f pnegate(const Packet8f& a)
{
@@ -148,7 +298,15 @@ template<> EIGEN_STRONG_INLINE Packet8i pconj(const Packet8i& a) { return a; }
template<> EIGEN_STRONG_INLINE Packet8f pmul<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_mul_ps(a,b); }
template<> EIGEN_STRONG_INLINE Packet4d pmul<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_mul_pd(a,b); }
-
+template<> EIGEN_STRONG_INLINE Packet8i pmul<Packet8i>(const Packet8i& a, const Packet8i& b) {
+#ifdef EIGEN_VECTORIZE_AVX2
+ return _mm256_mullo_epi32(a,b);
+#else
+ const __m128i lo = _mm_mullo_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
+ const __m128i hi = _mm_mullo_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
+ return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
+#endif
+}
template<> EIGEN_STRONG_INLINE Packet8f pdiv<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_div_ps(a,b); }
template<> EIGEN_STRONG_INLINE Packet4d pdiv<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_div_pd(a,b); }
@@ -157,13 +315,14 @@ template<> EIGEN_STRONG_INLINE Packet8i pdiv<Packet8i>(const Packet8i& /*a*/, co
return pset1<Packet8i>(0);
}
-#ifdef __FMA__
+#ifdef EIGEN_VECTORIZE_FMA
template<> EIGEN_STRONG_INLINE Packet8f pmadd(const Packet8f& a, const Packet8f& b, const Packet8f& c) {
-#if ( EIGEN_COMP_GNUC_STRICT || (EIGEN_COMP_CLANG && (EIGEN_COMP_CLANG<308)) )
- // clang stupidly generates a vfmadd213ps instruction plus some vmovaps on registers,
- // and gcc stupidly generates a vfmadd132ps instruction,
- // so let's enforce it to generate a vfmadd231ps instruction since the most common use case is to accumulate
- // the result of the product.
+#if ( (EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC<80) || (EIGEN_COMP_CLANG) )
+ // Clang stupidly generates a vfmadd213ps instruction plus some vmovaps on registers,
+ // and even register spilling with clang>=6.0 (bug 1637).
+ // Gcc stupidly generates a vfmadd132ps instruction.
+ // So let's enforce it to generate a vfmadd231ps instruction since the most common use
+ // case is to accumulate the result of the product.
Packet8f res = c;
__asm__("vfmadd231ps %[a], %[b], %[c]" : [c] "+x" (res) : [a] "x" (a), [b] "x" (b));
return res;
@@ -172,7 +331,7 @@ template<> EIGEN_STRONG_INLINE Packet8f pmadd(const Packet8f& a, const Packet8f&
#endif
}
template<> EIGEN_STRONG_INLINE Packet4d pmadd(const Packet4d& a, const Packet4d& b, const Packet4d& c) {
-#if ( EIGEN_COMP_GNUC_STRICT || (EIGEN_COMP_CLANG && (EIGEN_COMP_CLANG<308)) )
+#if ( (EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC<80) || (EIGEN_COMP_CLANG) )
// see above
Packet4d res = c;
__asm__("vfmadd231pd %[a], %[b], %[c]" : [c] "+x" (res) : [a] "x" (a), [b] "x" (b));
@@ -183,24 +342,112 @@ template<> EIGEN_STRONG_INLINE Packet4d pmadd(const Packet4d& a, const Packet4d&
}
#endif
+template<> EIGEN_STRONG_INLINE Packet8f pcmp_le(const Packet8f& a, const Packet8f& b) { return _mm256_cmp_ps(a,b,_CMP_LE_OQ); }
+template<> EIGEN_STRONG_INLINE Packet8f pcmp_lt(const Packet8f& a, const Packet8f& b) { return _mm256_cmp_ps(a,b,_CMP_LT_OQ); }
+template<> EIGEN_STRONG_INLINE Packet8f pcmp_lt_or_nan(const Packet8f& a, const Packet8f& b) { return _mm256_cmp_ps(a, b, _CMP_NGE_UQ); }
+template<> EIGEN_STRONG_INLINE Packet8f pcmp_eq(const Packet8f& a, const Packet8f& b) { return _mm256_cmp_ps(a,b,_CMP_EQ_OQ); }
+
+template<> EIGEN_STRONG_INLINE Packet4d pcmp_le(const Packet4d& a, const Packet4d& b) { return _mm256_cmp_pd(a,b,_CMP_LE_OQ); }
+template<> EIGEN_STRONG_INLINE Packet4d pcmp_lt(const Packet4d& a, const Packet4d& b) { return _mm256_cmp_pd(a,b,_CMP_LT_OQ); }
+template<> EIGEN_STRONG_INLINE Packet4d pcmp_lt_or_nan(const Packet4d& a, const Packet4d& b) { return _mm256_cmp_pd(a, b, _CMP_NGE_UQ); }
+template<> EIGEN_STRONG_INLINE Packet4d pcmp_eq(const Packet4d& a, const Packet4d& b) { return _mm256_cmp_pd(a,b,_CMP_EQ_OQ); }
+
+
+template<> EIGEN_STRONG_INLINE Packet8i pcmp_eq(const Packet8i& a, const Packet8i& b) {
+#ifdef EIGEN_VECTORIZE_AVX2
+ return _mm256_cmpeq_epi32(a,b);
+#else
+ __m128i lo = _mm_cmpeq_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
+ __m128i hi = _mm_cmpeq_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
+ return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
+#endif
+}
+
template<> EIGEN_STRONG_INLINE Packet8f pmin<Packet8f>(const Packet8f& a, const Packet8f& b) {
+#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
+ // There appears to be a bug in GCC, by which the optimizer may flip
+ // the argument order in calls to _mm_min_ps/_mm_max_ps, so we have to
+ // resort to inline ASM here. This is supposed to be fixed in gcc6.3,
+ // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867
+ Packet8f res;
+ asm("vminps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
+ return res;
+#else
// Arguments are swapped to match NaN propagation behavior of std::min.
return _mm256_min_ps(b,a);
+#endif
}
template<> EIGEN_STRONG_INLINE Packet4d pmin<Packet4d>(const Packet4d& a, const Packet4d& b) {
+#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
+ // See pmin above
+ Packet4d res;
+ asm("vminpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
+ return res;
+#else
// Arguments are swapped to match NaN propagation behavior of std::min.
return _mm256_min_pd(b,a);
+#endif
}
+
template<> EIGEN_STRONG_INLINE Packet8f pmax<Packet8f>(const Packet8f& a, const Packet8f& b) {
+#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
+ // See pmin above
+ Packet8f res;
+ asm("vmaxps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
+ return res;
+#else
// Arguments are swapped to match NaN propagation behavior of std::max.
return _mm256_max_ps(b,a);
+#endif
}
template<> EIGEN_STRONG_INLINE Packet4d pmax<Packet4d>(const Packet4d& a, const Packet4d& b) {
+#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
+ // See pmin above
+ Packet4d res;
+ asm("vmaxpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
+ return res;
+#else
// Arguments are swapped to match NaN propagation behavior of std::max.
return _mm256_max_pd(b,a);
+#endif
+}
+
+// Add specializations for min/max with prescribed NaN progation.
+template<>
+EIGEN_STRONG_INLINE Packet8f pmin<PropagateNumbers, Packet8f>(const Packet8f& a, const Packet8f& b) {
+ return pminmax_propagate_numbers(a, b, pmin<Packet8f>);
+}
+template<>
+EIGEN_STRONG_INLINE Packet4d pmin<PropagateNumbers, Packet4d>(const Packet4d& a, const Packet4d& b) {
+ return pminmax_propagate_numbers(a, b, pmin<Packet4d>);
+}
+template<>
+EIGEN_STRONG_INLINE Packet8f pmax<PropagateNumbers, Packet8f>(const Packet8f& a, const Packet8f& b) {
+ return pminmax_propagate_numbers(a, b, pmax<Packet8f>);
+}
+template<>
+EIGEN_STRONG_INLINE Packet4d pmax<PropagateNumbers, Packet4d>(const Packet4d& a, const Packet4d& b) {
+ return pminmax_propagate_numbers(a, b, pmax<Packet4d>);
+}
+template<>
+EIGEN_STRONG_INLINE Packet8f pmin<PropagateNaN, Packet8f>(const Packet8f& a, const Packet8f& b) {
+ return pminmax_propagate_nan(a, b, pmin<Packet8f>);
+}
+template<>
+EIGEN_STRONG_INLINE Packet4d pmin<PropagateNaN, Packet4d>(const Packet4d& a, const Packet4d& b) {
+ return pminmax_propagate_nan(a, b, pmin<Packet4d>);
+}
+template<>
+EIGEN_STRONG_INLINE Packet8f pmax<PropagateNaN, Packet8f>(const Packet8f& a, const Packet8f& b) {
+ return pminmax_propagate_nan(a, b, pmax<Packet8f>);
+}
+template<>
+EIGEN_STRONG_INLINE Packet4d pmax<PropagateNaN, Packet4d>(const Packet4d& a, const Packet4d& b) {
+ return pminmax_propagate_nan(a, b, pmax<Packet4d>);
}
-template<> EIGEN_STRONG_INLINE Packet8f pround<Packet8f>(const Packet8f& a) { return _mm256_round_ps(a, _MM_FROUND_CUR_DIRECTION); }
-template<> EIGEN_STRONG_INLINE Packet4d pround<Packet4d>(const Packet4d& a) { return _mm256_round_pd(a, _MM_FROUND_CUR_DIRECTION); }
+
+template<> EIGEN_STRONG_INLINE Packet8f print<Packet8f>(const Packet8f& a) { return _mm256_round_ps(a, _MM_FROUND_CUR_DIRECTION); }
+template<> EIGEN_STRONG_INLINE Packet4d print<Packet4d>(const Packet4d& a) { return _mm256_round_pd(a, _MM_FROUND_CUR_DIRECTION); }
template<> EIGEN_STRONG_INLINE Packet8f pceil<Packet8f>(const Packet8f& a) { return _mm256_ceil_ps(a); }
template<> EIGEN_STRONG_INLINE Packet4d pceil<Packet4d>(const Packet4d& a) { return _mm256_ceil_pd(a); }
@@ -208,17 +455,124 @@ template<> EIGEN_STRONG_INLINE Packet4d pceil<Packet4d>(const Packet4d& a) { ret
template<> EIGEN_STRONG_INLINE Packet8f pfloor<Packet8f>(const Packet8f& a) { return _mm256_floor_ps(a); }
template<> EIGEN_STRONG_INLINE Packet4d pfloor<Packet4d>(const Packet4d& a) { return _mm256_floor_pd(a); }
+
+template<> EIGEN_STRONG_INLINE Packet8i ptrue<Packet8i>(const Packet8i& a) {
+#ifdef EIGEN_VECTORIZE_AVX2
+ // vpcmpeqd has lower latency than the more general vcmpps
+ return _mm256_cmpeq_epi32(a,a);
+#else
+ const __m256 b = _mm256_castsi256_ps(a);
+ return _mm256_castps_si256(_mm256_cmp_ps(b,b,_CMP_TRUE_UQ));
+#endif
+}
+
+template<> EIGEN_STRONG_INLINE Packet8f ptrue<Packet8f>(const Packet8f& a) {
+#ifdef EIGEN_VECTORIZE_AVX2
+ // vpcmpeqd has lower latency than the more general vcmpps
+ const __m256i b = _mm256_castps_si256(a);
+ return _mm256_castsi256_ps(_mm256_cmpeq_epi32(b,b));
+#else
+ return _mm256_cmp_ps(a,a,_CMP_TRUE_UQ);
+#endif
+}
+
+template<> EIGEN_STRONG_INLINE Packet4d ptrue<Packet4d>(const Packet4d& a) {
+#ifdef EIGEN_VECTORIZE_AVX2
+ // vpcmpeqq has lower latency than the more general vcmppd
+ const __m256i b = _mm256_castpd_si256(a);
+ return _mm256_castsi256_pd(_mm256_cmpeq_epi64(b,b));
+#else
+ return _mm256_cmp_pd(a,a,_CMP_TRUE_UQ);
+#endif
+}
+
template<> EIGEN_STRONG_INLINE Packet8f pand<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_and_ps(a,b); }
template<> EIGEN_STRONG_INLINE Packet4d pand<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_and_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8i pand<Packet8i>(const Packet8i& a, const Packet8i& b) {
+#ifdef EIGEN_VECTORIZE_AVX2
+ return _mm256_and_si256(a,b);
+#else
+ return _mm256_castps_si256(_mm256_and_ps(_mm256_castsi256_ps(a),_mm256_castsi256_ps(b)));
+#endif
+}
template<> EIGEN_STRONG_INLINE Packet8f por<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_or_ps(a,b); }
template<> EIGEN_STRONG_INLINE Packet4d por<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_or_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8i por<Packet8i>(const Packet8i& a, const Packet8i& b) {
+#ifdef EIGEN_VECTORIZE_AVX2
+ return _mm256_or_si256(a,b);
+#else
+ return _mm256_castps_si256(_mm256_or_ps(_mm256_castsi256_ps(a),_mm256_castsi256_ps(b)));
+#endif
+}
template<> EIGEN_STRONG_INLINE Packet8f pxor<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_xor_ps(a,b); }
template<> EIGEN_STRONG_INLINE Packet4d pxor<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_xor_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8i pxor<Packet8i>(const Packet8i& a, const Packet8i& b) {
+#ifdef EIGEN_VECTORIZE_AVX2
+ return _mm256_xor_si256(a,b);
+#else
+ return _mm256_castps_si256(_mm256_xor_ps(_mm256_castsi256_ps(a),_mm256_castsi256_ps(b)));
+#endif
+}
+
+template<> EIGEN_STRONG_INLINE Packet8f pandnot<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_andnot_ps(b,a); }
+template<> EIGEN_STRONG_INLINE Packet4d pandnot<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_andnot_pd(b,a); }
+template<> EIGEN_STRONG_INLINE Packet8i pandnot<Packet8i>(const Packet8i& a, const Packet8i& b) {
+#ifdef EIGEN_VECTORIZE_AVX2
+ return _mm256_andnot_si256(b,a);
+#else
+ return _mm256_castps_si256(_mm256_andnot_ps(_mm256_castsi256_ps(b),_mm256_castsi256_ps(a)));
+#endif
+}
+
+template<> EIGEN_STRONG_INLINE Packet8f pround<Packet8f>(const Packet8f& a)
+{
+ const Packet8f mask = pset1frombits<Packet8f>(static_cast<numext::uint32_t>(0x80000000u));
+ const Packet8f prev0dot5 = pset1frombits<Packet8f>(static_cast<numext::uint32_t>(0x3EFFFFFFu));
+ return _mm256_round_ps(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
+}
+template<> EIGEN_STRONG_INLINE Packet4d pround<Packet4d>(const Packet4d& a)
+{
+ const Packet4d mask = pset1frombits<Packet4d>(static_cast<numext::uint64_t>(0x8000000000000000ull));
+ const Packet4d prev0dot5 = pset1frombits<Packet4d>(static_cast<numext::uint64_t>(0x3FDFFFFFFFFFFFFFull));
+ return _mm256_round_pd(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
+}
-template<> EIGEN_STRONG_INLINE Packet8f pandnot<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_andnot_ps(a,b); }
-template<> EIGEN_STRONG_INLINE Packet4d pandnot<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_andnot_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8f pselect<Packet8f>(const Packet8f& mask, const Packet8f& a, const Packet8f& b)
+{ return _mm256_blendv_ps(b,a,mask); }
+template<> EIGEN_STRONG_INLINE Packet4d pselect<Packet4d>(const Packet4d& mask, const Packet4d& a, const Packet4d& b)
+{ return _mm256_blendv_pd(b,a,mask); }
+
+template<int N> EIGEN_STRONG_INLINE Packet8i parithmetic_shift_right(Packet8i a) {
+#ifdef EIGEN_VECTORIZE_AVX2
+ return _mm256_srai_epi32(a, N);
+#else
+ __m128i lo = _mm_srai_epi32(_mm256_extractf128_si256(a, 0), N);
+ __m128i hi = _mm_srai_epi32(_mm256_extractf128_si256(a, 1), N);
+ return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
+#endif
+}
+
+template<int N> EIGEN_STRONG_INLINE Packet8i plogical_shift_right(Packet8i a) {
+#ifdef EIGEN_VECTORIZE_AVX2
+ return _mm256_srli_epi32(a, N);
+#else
+ __m128i lo = _mm_srli_epi32(_mm256_extractf128_si256(a, 0), N);
+ __m128i hi = _mm_srli_epi32(_mm256_extractf128_si256(a, 1), N);
+ return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
+#endif
+}
+
+template<int N> EIGEN_STRONG_INLINE Packet8i plogical_shift_left(Packet8i a) {
+#ifdef EIGEN_VECTORIZE_AVX2
+ return _mm256_slli_epi32(a, N);
+#else
+ __m128i lo = _mm_slli_epi32(_mm256_extractf128_si256(a, 0), N);
+ __m128i hi = _mm_slli_epi32(_mm256_extractf128_si256(a, 1), N);
+ return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
+#endif
+}
template<> EIGEN_STRONG_INLINE Packet8f pload<Packet8f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_ps(from); }
template<> EIGEN_STRONG_INLINE Packet4d pload<Packet4d>(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_pd(from); }
@@ -228,6 +582,14 @@ template<> EIGEN_STRONG_INLINE Packet8f ploadu<Packet8f>(const float* from) { EI
template<> EIGEN_STRONG_INLINE Packet4d ploadu<Packet4d>(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_pd(from); }
template<> EIGEN_STRONG_INLINE Packet8i ploadu<Packet8i>(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from)); }
+template<> EIGEN_STRONG_INLINE Packet8f ploadu<Packet8f>(const float* from, uint8_t umask) {
+ Packet8i mask = _mm256_set1_epi8(static_cast<char>(umask));
+ const Packet8i bit_mask = _mm256_set_epi32(0xffffff7f, 0xffffffbf, 0xffffffdf, 0xffffffef, 0xfffffff7, 0xfffffffb, 0xfffffffd, 0xfffffffe);
+ mask = por<Packet8i>(mask, bit_mask);
+ mask = pcmp_eq<Packet8i>(mask, _mm256_set1_epi32(0xffffffff));
+ EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_maskload_ps(from, mask);
+}
+
// Loads 4 floats from memory a returns the packet {a0, a0 a1, a1, a2, a2, a3, a3}
template<> EIGEN_STRONG_INLINE Packet8f ploaddup<Packet8f>(const float* from)
{
@@ -265,6 +627,14 @@ template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet8f&
template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet4d& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_pd(to, from); }
template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet8i& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); }
+template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet8f& from, uint8_t umask) {
+ Packet8i mask = _mm256_set1_epi8(static_cast<char>(umask));
+ const Packet8i bit_mask = _mm256_set_epi32(0xffffff7f, 0xffffffbf, 0xffffffdf, 0xffffffef, 0xfffffff7, 0xfffffffb, 0xfffffffd, 0xfffffffe);
+ mask = por<Packet8i>(mask, bit_mask);
+ mask = pcmp_eq<Packet8i>(mask, _mm256_set1_epi32(0xffffffff));
+ EIGEN_DEBUG_UNALIGNED_STORE return _mm256_maskstore_ps(to, mask, from);
+}
+
// NOTE: leverage _mm256_i32gather_ps and _mm256_i32gather_pd if AVX2 instructions are available
// NOTE: for the record the following seems to be slower: return _mm256_i32gather_ps(from, _mm256_set1_epi32(stride), 4);
template<> EIGEN_DEVICE_FUNC inline Packet8f pgather<float, Packet8f>(const float* from, Index stride)
@@ -318,9 +688,9 @@ template<> EIGEN_STRONG_INLINE void pstore1<Packet8i>(int* to, const int& a)
}
#ifndef EIGEN_VECTORIZE_AVX512
-template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
-template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
-template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
+template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
+template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
+template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
#endif
template<> EIGEN_STRONG_INLINE float pfirst<Packet8f>(const Packet8f& a) {
@@ -343,9 +713,12 @@ template<> EIGEN_STRONG_INLINE Packet4d preverse(const Packet4d& a)
{
__m256d tmp = _mm256_shuffle_pd(a,a,5);
return _mm256_permute2f128_pd(tmp, tmp, 1);
-
+ #if 0
+ // This version is unlikely to be faster as _mm256_shuffle_ps and _mm256_permute_pd
+ // exhibit the same latency/throughput, but it is here for future reference/benchmarking...
__m256d swap_halves = _mm256_permute2f128_pd(a,a,1);
return _mm256_permute_pd(swap_halves,5);
+ #endif
}
// pabs should be ok
@@ -360,47 +733,66 @@ template<> EIGEN_STRONG_INLINE Packet4d pabs(const Packet4d& a)
return _mm256_and_pd(a,mask);
}
-// preduxp should be ok
-// FIXME: why is this ok? why isn't the simply implementation working as expected?
-template<> EIGEN_STRONG_INLINE Packet8f preduxp<Packet8f>(const Packet8f* vecs)
-{
- __m256 hsum1 = _mm256_hadd_ps(vecs[0], vecs[1]);
- __m256 hsum2 = _mm256_hadd_ps(vecs[2], vecs[3]);
- __m256 hsum3 = _mm256_hadd_ps(vecs[4], vecs[5]);
- __m256 hsum4 = _mm256_hadd_ps(vecs[6], vecs[7]);
-
- __m256 hsum5 = _mm256_hadd_ps(hsum1, hsum1);
- __m256 hsum6 = _mm256_hadd_ps(hsum2, hsum2);
- __m256 hsum7 = _mm256_hadd_ps(hsum3, hsum3);
- __m256 hsum8 = _mm256_hadd_ps(hsum4, hsum4);
-
- __m256 perm1 = _mm256_permute2f128_ps(hsum5, hsum5, 0x23);
- __m256 perm2 = _mm256_permute2f128_ps(hsum6, hsum6, 0x23);
- __m256 perm3 = _mm256_permute2f128_ps(hsum7, hsum7, 0x23);
- __m256 perm4 = _mm256_permute2f128_ps(hsum8, hsum8, 0x23);
+template<> EIGEN_STRONG_INLINE Packet8f pfrexp<Packet8f>(const Packet8f& a, Packet8f& exponent) {
+ return pfrexp_generic(a,exponent);
+}
- __m256 sum1 = _mm256_add_ps(perm1, hsum5);
- __m256 sum2 = _mm256_add_ps(perm2, hsum6);
- __m256 sum3 = _mm256_add_ps(perm3, hsum7);
- __m256 sum4 = _mm256_add_ps(perm4, hsum8);
+// Extract exponent without existence of Packet4l.
+template<>
+EIGEN_STRONG_INLINE
+Packet4d pfrexp_generic_get_biased_exponent(const Packet4d& a) {
+ const Packet4d cst_exp_mask = pset1frombits<Packet4d>(static_cast<uint64_t>(0x7ff0000000000000ull));
+ __m256i a_expo = _mm256_castpd_si256(pand(a, cst_exp_mask));
+#ifdef EIGEN_VECTORIZE_AVX2
+ a_expo = _mm256_srli_epi64(a_expo, 52);
+ __m128i lo = _mm256_extractf128_si256(a_expo, 0);
+ __m128i hi = _mm256_extractf128_si256(a_expo, 1);
+#else
+ __m128i lo = _mm256_extractf128_si256(a_expo, 0);
+ __m128i hi = _mm256_extractf128_si256(a_expo, 1);
+ lo = _mm_srli_epi64(lo, 52);
+ hi = _mm_srli_epi64(hi, 52);
+#endif
+ Packet2d exponent_lo = _mm_cvtepi32_pd(vec4i_swizzle1(lo, 0, 2, 1, 3));
+ Packet2d exponent_hi = _mm_cvtepi32_pd(vec4i_swizzle1(hi, 0, 2, 1, 3));
+ Packet4d exponent = _mm256_insertf128_pd(_mm256_setzero_pd(), exponent_lo, 0);
+ exponent = _mm256_insertf128_pd(exponent, exponent_hi, 1);
+ return exponent;
+}
- __m256 blend1 = _mm256_blend_ps(sum1, sum2, 0xcc);
- __m256 blend2 = _mm256_blend_ps(sum3, sum4, 0xcc);
- __m256 final = _mm256_blend_ps(blend1, blend2, 0xf0);
- return final;
+template<> EIGEN_STRONG_INLINE Packet4d pfrexp<Packet4d>(const Packet4d& a, Packet4d& exponent) {
+ return pfrexp_generic(a, exponent);
}
-template<> EIGEN_STRONG_INLINE Packet4d preduxp<Packet4d>(const Packet4d* vecs)
-{
- Packet4d tmp0, tmp1;
- tmp0 = _mm256_hadd_pd(vecs[0], vecs[1]);
- tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1));
-
- tmp1 = _mm256_hadd_pd(vecs[2], vecs[3]);
- tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1));
+template<> EIGEN_STRONG_INLINE Packet8f pldexp<Packet8f>(const Packet8f& a, const Packet8f& exponent) {
+ return pldexp_generic(a, exponent);
+}
- return _mm256_blend_pd(tmp0, tmp1, 0xC);
+template<> EIGEN_STRONG_INLINE Packet4d pldexp<Packet4d>(const Packet4d& a, const Packet4d& exponent) {
+ // Clamp exponent to [-2099, 2099]
+ const Packet4d max_exponent = pset1<Packet4d>(2099.0);
+ const Packet4i e = _mm256_cvtpd_epi32(pmin(pmax(exponent, pnegate(max_exponent)), max_exponent));
+
+ // Split 2^e into four factors and multiply.
+ const Packet4i bias = pset1<Packet4i>(1023);
+ Packet4i b = parithmetic_shift_right<2>(e); // floor(e/4)
+
+ // 2^b
+ Packet4i hi = vec4i_swizzle1(padd(b, bias), 0, 2, 1, 3);
+ Packet4i lo = _mm_slli_epi64(hi, 52);
+ hi = _mm_slli_epi64(_mm_srli_epi64(hi, 32), 52);
+ Packet4d c = _mm256_castsi256_pd(_mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1));
+ Packet4d out = pmul(pmul(pmul(a, c), c), c); // a * 2^(3b)
+
+ // 2^(e - 3b)
+ b = psub(psub(psub(e, b), b), b); // e - 3b
+ hi = vec4i_swizzle1(padd(b, bias), 0, 2, 1, 3);
+ lo = _mm_slli_epi64(hi, 52);
+ hi = _mm_slli_epi64(_mm_srli_epi64(hi, 32), 52);
+ c = _mm256_castsi256_pd(_mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1));
+ out = pmul(out, c); // a * 2^e
+ return out;
}
template<> EIGEN_STRONG_INLINE float predux<Packet8f>(const Packet8f& a)
@@ -412,7 +804,7 @@ template<> EIGEN_STRONG_INLINE double predux<Packet4d>(const Packet4d& a)
return predux(Packet2d(_mm_add_pd(_mm256_castpd256_pd128(a),_mm256_extractf128_pd(a,1))));
}
-template<> EIGEN_STRONG_INLINE Packet4f predux_downto4<Packet8f>(const Packet8f& a)
+template<> EIGEN_STRONG_INLINE Packet4f predux_half_dowto4<Packet8f>(const Packet8f& a)
{
return _mm_add_ps(_mm256_castps256_ps128(a),_mm256_extractf128_ps(a,1));
}
@@ -456,93 +848,16 @@ template<> EIGEN_STRONG_INLINE double predux_max<Packet4d>(const Packet4d& a)
return pfirst(_mm256_max_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
}
+// not needed yet
+// template<> EIGEN_STRONG_INLINE bool predux_all(const Packet8f& x)
+// {
+// return _mm256_movemask_ps(x)==0xFF;
+// }
-template<int Offset>
-struct palign_impl<Offset,Packet8f>
+template<> EIGEN_STRONG_INLINE bool predux_any(const Packet8f& x)
{
- static EIGEN_STRONG_INLINE void run(Packet8f& first, const Packet8f& second)
- {
- if (Offset==1)
- {
- first = _mm256_blend_ps(first, second, 1);
- Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(0,3,2,1));
- Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);
- first = _mm256_blend_ps(tmp1, tmp2, 0x88);
- }
- else if (Offset==2)
- {
- first = _mm256_blend_ps(first, second, 3);
- Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(1,0,3,2));
- Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);
- first = _mm256_blend_ps(tmp1, tmp2, 0xcc);
- }
- else if (Offset==3)
- {
- first = _mm256_blend_ps(first, second, 7);
- Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(2,1,0,3));
- Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);
- first = _mm256_blend_ps(tmp1, tmp2, 0xee);
- }
- else if (Offset==4)
- {
- first = _mm256_blend_ps(first, second, 15);
- Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(3,2,1,0));
- Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);
- first = _mm256_permute_ps(tmp2, _MM_SHUFFLE(3,2,1,0));
- }
- else if (Offset==5)
- {
- first = _mm256_blend_ps(first, second, 31);
- first = _mm256_permute2f128_ps(first, first, 1);
- Packet8f tmp = _mm256_permute_ps (first, _MM_SHUFFLE(0,3,2,1));
- first = _mm256_permute2f128_ps(tmp, tmp, 1);
- first = _mm256_blend_ps(tmp, first, 0x88);
- }
- else if (Offset==6)
- {
- first = _mm256_blend_ps(first, second, 63);
- first = _mm256_permute2f128_ps(first, first, 1);
- Packet8f tmp = _mm256_permute_ps (first, _MM_SHUFFLE(1,0,3,2));
- first = _mm256_permute2f128_ps(tmp, tmp, 1);
- first = _mm256_blend_ps(tmp, first, 0xcc);
- }
- else if (Offset==7)
- {
- first = _mm256_blend_ps(first, second, 127);
- first = _mm256_permute2f128_ps(first, first, 1);
- Packet8f tmp = _mm256_permute_ps (first, _MM_SHUFFLE(2,1,0,3));
- first = _mm256_permute2f128_ps(tmp, tmp, 1);
- first = _mm256_blend_ps(tmp, first, 0xee);
- }
- }
-};
-
-template<int Offset>
-struct palign_impl<Offset,Packet4d>
-{
- static EIGEN_STRONG_INLINE void run(Packet4d& first, const Packet4d& second)
- {
- if (Offset==1)
- {
- first = _mm256_blend_pd(first, second, 1);
- __m256d tmp = _mm256_permute_pd(first, 5);
- first = _mm256_permute2f128_pd(tmp, tmp, 1);
- first = _mm256_blend_pd(tmp, first, 0xA);
- }
- else if (Offset==2)
- {
- first = _mm256_blend_pd(first, second, 3);
- first = _mm256_permute2f128_pd(first, first, 1);
- }
- else if (Offset==3)
- {
- first = _mm256_blend_pd(first, second, 7);
- __m256d tmp = _mm256_permute_pd(first, 5);
- first = _mm256_permute2f128_pd(tmp, tmp, 1);
- first = _mm256_blend_pd(tmp, first, 5);
- }
- }
-};
+ return _mm256_movemask_ps(x)!=0;
+}
EIGEN_DEVICE_FUNC inline void
ptranspose(PacketBlock<Packet8f,8>& kernel) {
@@ -616,24 +931,640 @@ template<> EIGEN_STRONG_INLINE Packet4d pblend(const Selector<4>& ifPacket, cons
return _mm256_blendv_pd(thenPacket, elsePacket, false_mask);
}
-template<> EIGEN_STRONG_INLINE Packet8f pinsertfirst(const Packet8f& a, float b)
+// Packet math for Eigen::half
+
+template<> struct unpacket_traits<Packet8h> { typedef Eigen::half type; enum {size=8, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet8h half; };
+
+template<> EIGEN_STRONG_INLINE Packet8h pset1<Packet8h>(const Eigen::half& from) {
+ return _mm_set1_epi16(numext::bit_cast<numext::uint16_t>(from));
+}
+
+template<> EIGEN_STRONG_INLINE Eigen::half pfirst<Packet8h>(const Packet8h& from) {
+ return numext::bit_cast<Eigen::half>(static_cast<numext::uint16_t>(_mm_extract_epi16(from, 0)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8h pload<Packet8h>(const Eigen::half* from) {
+ return _mm_load_si128(reinterpret_cast<const __m128i*>(from));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8h ploadu<Packet8h>(const Eigen::half* from) {
+ return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));
+}
+
+template<> EIGEN_STRONG_INLINE void pstore<Eigen::half>(Eigen::half* to, const Packet8h& from) {
+ _mm_store_si128(reinterpret_cast<__m128i*>(to), from);
+}
+
+template<> EIGEN_STRONG_INLINE void pstoreu<Eigen::half>(Eigen::half* to, const Packet8h& from) {
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8h
+ploaddup<Packet8h>(const Eigen::half* from) {
+ const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
+ const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
+ const numext::uint16_t c = numext::bit_cast<numext::uint16_t>(from[2]);
+ const numext::uint16_t d = numext::bit_cast<numext::uint16_t>(from[3]);
+ return _mm_set_epi16(d, d, c, c, b, b, a, a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8h
+ploadquad<Packet8h>(const Eigen::half* from) {
+ const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
+ const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
+ return _mm_set_epi16(b, b, b, b, a, a, a, a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8h ptrue(const Packet8h& a) {
+ return _mm_cmpeq_epi32(a, a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8h pabs(const Packet8h& a) {
+ const __m128i sign_mask = _mm_set1_epi16(static_cast<numext::uint16_t>(0x8000));
+ return _mm_andnot_si128(sign_mask, a);
+}
+
+EIGEN_STRONG_INLINE Packet8f half2float(const Packet8h& a) {
+#ifdef EIGEN_HAS_FP16_C
+ return _mm256_cvtph_ps(a);
+#else
+ EIGEN_ALIGN32 Eigen::half aux[8];
+ pstore(aux, a);
+ float f0(aux[0]);
+ float f1(aux[1]);
+ float f2(aux[2]);
+ float f3(aux[3]);
+ float f4(aux[4]);
+ float f5(aux[5]);
+ float f6(aux[6]);
+ float f7(aux[7]);
+
+ return _mm256_set_ps(f7, f6, f5, f4, f3, f2, f1, f0);
+#endif
+}
+
+EIGEN_STRONG_INLINE Packet8h float2half(const Packet8f& a) {
+#ifdef EIGEN_HAS_FP16_C
+ return _mm256_cvtps_ph(a, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
+#else
+ EIGEN_ALIGN32 float aux[8];
+ pstore(aux, a);
+ const numext::uint16_t s0 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[0]));
+ const numext::uint16_t s1 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[1]));
+ const numext::uint16_t s2 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[2]));
+ const numext::uint16_t s3 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[3]));
+ const numext::uint16_t s4 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[4]));
+ const numext::uint16_t s5 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[5]));
+ const numext::uint16_t s6 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[6]));
+ const numext::uint16_t s7 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[7]));
+ return _mm_set_epi16(s7, s6, s5, s4, s3, s2, s1, s0);
+#endif
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8h pmin<Packet8h>(const Packet8h& a,
+ const Packet8h& b) {
+ return float2half(pmin<Packet8f>(half2float(a), half2float(b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8h pmax<Packet8h>(const Packet8h& a,
+ const Packet8h& b) {
+ return float2half(pmax<Packet8f>(half2float(a), half2float(b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8h plset<Packet8h>(const half& a) {
+ return float2half(plset<Packet8f>(static_cast<float>(a)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8h por(const Packet8h& a,const Packet8h& b) {
+ // in some cases Packet4i is a wrapper around __m128i, so we either need to
+ // cast to Packet4i to directly call the intrinsics as below:
+ return _mm_or_si128(a,b);
+}
+template<> EIGEN_STRONG_INLINE Packet8h pxor(const Packet8h& a,const Packet8h& b) {
+ return _mm_xor_si128(a,b);
+}
+template<> EIGEN_STRONG_INLINE Packet8h pand(const Packet8h& a,const Packet8h& b) {
+ return _mm_and_si128(a,b);
+}
+template<> EIGEN_STRONG_INLINE Packet8h pandnot(const Packet8h& a,const Packet8h& b) {
+ return _mm_andnot_si128(b,a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8h pselect(const Packet8h& mask, const Packet8h& a, const Packet8h& b) {
+ return _mm_blendv_epi8(b, a, mask);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8h pround<Packet8h>(const Packet8h& a) {
+ return float2half(pround<Packet8f>(half2float(a)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8h print<Packet8h>(const Packet8h& a) {
+ return float2half(print<Packet8f>(half2float(a)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8h pceil<Packet8h>(const Packet8h& a) {
+ return float2half(pceil<Packet8f>(half2float(a)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8h pfloor<Packet8h>(const Packet8h& a) {
+ return float2half(pfloor<Packet8f>(half2float(a)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8h pcmp_eq(const Packet8h& a,const Packet8h& b) {
+ return Pack16To8(pcmp_eq(half2float(a), half2float(b)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8h pcmp_le(const Packet8h& a,const Packet8h& b) {
+ return Pack16To8(pcmp_le(half2float(a), half2float(b)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8h pcmp_lt(const Packet8h& a,const Packet8h& b) {
+ return Pack16To8(pcmp_lt(half2float(a), half2float(b)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8h pcmp_lt_or_nan(const Packet8h& a,const Packet8h& b) {
+ return Pack16To8(pcmp_lt_or_nan(half2float(a), half2float(b)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8h pconj(const Packet8h& a) { return a; }
+
+template<> EIGEN_STRONG_INLINE Packet8h pnegate(const Packet8h& a) {
+ Packet8h sign_mask = _mm_set1_epi16(static_cast<numext::uint16_t>(0x8000));
+ return _mm_xor_si128(a, sign_mask);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8h padd<Packet8h>(const Packet8h& a, const Packet8h& b) {
+ Packet8f af = half2float(a);
+ Packet8f bf = half2float(b);
+ Packet8f rf = padd(af, bf);
+ return float2half(rf);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8h psub<Packet8h>(const Packet8h& a, const Packet8h& b) {
+ Packet8f af = half2float(a);
+ Packet8f bf = half2float(b);
+ Packet8f rf = psub(af, bf);
+ return float2half(rf);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8h pmul<Packet8h>(const Packet8h& a, const Packet8h& b) {
+ Packet8f af = half2float(a);
+ Packet8f bf = half2float(b);
+ Packet8f rf = pmul(af, bf);
+ return float2half(rf);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8h pdiv<Packet8h>(const Packet8h& a, const Packet8h& b) {
+ Packet8f af = half2float(a);
+ Packet8f bf = half2float(b);
+ Packet8f rf = pdiv(af, bf);
+ return float2half(rf);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8h pgather<Eigen::half, Packet8h>(const Eigen::half* from, Index stride)
+{
+ const numext::uint16_t s0 = numext::bit_cast<numext::uint16_t>(from[0*stride]);
+ const numext::uint16_t s1 = numext::bit_cast<numext::uint16_t>(from[1*stride]);
+ const numext::uint16_t s2 = numext::bit_cast<numext::uint16_t>(from[2*stride]);
+ const numext::uint16_t s3 = numext::bit_cast<numext::uint16_t>(from[3*stride]);
+ const numext::uint16_t s4 = numext::bit_cast<numext::uint16_t>(from[4*stride]);
+ const numext::uint16_t s5 = numext::bit_cast<numext::uint16_t>(from[5*stride]);
+ const numext::uint16_t s6 = numext::bit_cast<numext::uint16_t>(from[6*stride]);
+ const numext::uint16_t s7 = numext::bit_cast<numext::uint16_t>(from[7*stride]);
+ return _mm_set_epi16(s7, s6, s5, s4, s3, s2, s1, s0);
+}
+
+template<> EIGEN_STRONG_INLINE void pscatter<Eigen::half, Packet8h>(Eigen::half* to, const Packet8h& from, Index stride)
+{
+ EIGEN_ALIGN32 Eigen::half aux[8];
+ pstore(aux, from);
+ to[stride*0] = aux[0];
+ to[stride*1] = aux[1];
+ to[stride*2] = aux[2];
+ to[stride*3] = aux[3];
+ to[stride*4] = aux[4];
+ to[stride*5] = aux[5];
+ to[stride*6] = aux[6];
+ to[stride*7] = aux[7];
+}
+
+template<> EIGEN_STRONG_INLINE Eigen::half predux<Packet8h>(const Packet8h& a) {
+ Packet8f af = half2float(a);
+ float reduced = predux<Packet8f>(af);
+ return Eigen::half(reduced);
+}
+
+template<> EIGEN_STRONG_INLINE Eigen::half predux_max<Packet8h>(const Packet8h& a) {
+ Packet8f af = half2float(a);
+ float reduced = predux_max<Packet8f>(af);
+ return Eigen::half(reduced);
+}
+
+template<> EIGEN_STRONG_INLINE Eigen::half predux_min<Packet8h>(const Packet8h& a) {
+ Packet8f af = half2float(a);
+ float reduced = predux_min<Packet8f>(af);
+ return Eigen::half(reduced);
+}
+
+template<> EIGEN_STRONG_INLINE Eigen::half predux_mul<Packet8h>(const Packet8h& a) {
+ Packet8f af = half2float(a);
+ float reduced = predux_mul<Packet8f>(af);
+ return Eigen::half(reduced);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8h preverse(const Packet8h& a)
+{
+ __m128i m = _mm_setr_epi8(14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1);
+ return _mm_shuffle_epi8(a,m);
+}
+
+EIGEN_STRONG_INLINE void
+ptranspose(PacketBlock<Packet8h,8>& kernel) {
+ __m128i a = kernel.packet[0];
+ __m128i b = kernel.packet[1];
+ __m128i c = kernel.packet[2];
+ __m128i d = kernel.packet[3];
+ __m128i e = kernel.packet[4];
+ __m128i f = kernel.packet[5];
+ __m128i g = kernel.packet[6];
+ __m128i h = kernel.packet[7];
+
+ __m128i a03b03 = _mm_unpacklo_epi16(a, b);
+ __m128i c03d03 = _mm_unpacklo_epi16(c, d);
+ __m128i e03f03 = _mm_unpacklo_epi16(e, f);
+ __m128i g03h03 = _mm_unpacklo_epi16(g, h);
+ __m128i a47b47 = _mm_unpackhi_epi16(a, b);
+ __m128i c47d47 = _mm_unpackhi_epi16(c, d);
+ __m128i e47f47 = _mm_unpackhi_epi16(e, f);
+ __m128i g47h47 = _mm_unpackhi_epi16(g, h);
+
+ __m128i a01b01c01d01 = _mm_unpacklo_epi32(a03b03, c03d03);
+ __m128i a23b23c23d23 = _mm_unpackhi_epi32(a03b03, c03d03);
+ __m128i e01f01g01h01 = _mm_unpacklo_epi32(e03f03, g03h03);
+ __m128i e23f23g23h23 = _mm_unpackhi_epi32(e03f03, g03h03);
+ __m128i a45b45c45d45 = _mm_unpacklo_epi32(a47b47, c47d47);
+ __m128i a67b67c67d67 = _mm_unpackhi_epi32(a47b47, c47d47);
+ __m128i e45f45g45h45 = _mm_unpacklo_epi32(e47f47, g47h47);
+ __m128i e67f67g67h67 = _mm_unpackhi_epi32(e47f47, g47h47);
+
+ __m128i a0b0c0d0e0f0g0h0 = _mm_unpacklo_epi64(a01b01c01d01, e01f01g01h01);
+ __m128i a1b1c1d1e1f1g1h1 = _mm_unpackhi_epi64(a01b01c01d01, e01f01g01h01);
+ __m128i a2b2c2d2e2f2g2h2 = _mm_unpacklo_epi64(a23b23c23d23, e23f23g23h23);
+ __m128i a3b3c3d3e3f3g3h3 = _mm_unpackhi_epi64(a23b23c23d23, e23f23g23h23);
+ __m128i a4b4c4d4e4f4g4h4 = _mm_unpacklo_epi64(a45b45c45d45, e45f45g45h45);
+ __m128i a5b5c5d5e5f5g5h5 = _mm_unpackhi_epi64(a45b45c45d45, e45f45g45h45);
+ __m128i a6b6c6d6e6f6g6h6 = _mm_unpacklo_epi64(a67b67c67d67, e67f67g67h67);
+ __m128i a7b7c7d7e7f7g7h7 = _mm_unpackhi_epi64(a67b67c67d67, e67f67g67h67);
+
+ kernel.packet[0] = a0b0c0d0e0f0g0h0;
+ kernel.packet[1] = a1b1c1d1e1f1g1h1;
+ kernel.packet[2] = a2b2c2d2e2f2g2h2;
+ kernel.packet[3] = a3b3c3d3e3f3g3h3;
+ kernel.packet[4] = a4b4c4d4e4f4g4h4;
+ kernel.packet[5] = a5b5c5d5e5f5g5h5;
+ kernel.packet[6] = a6b6c6d6e6f6g6h6;
+ kernel.packet[7] = a7b7c7d7e7f7g7h7;
+}
+
+EIGEN_STRONG_INLINE void
+ptranspose(PacketBlock<Packet8h,4>& kernel) {
+ EIGEN_ALIGN32 Eigen::half in[4][8];
+ pstore<Eigen::half>(in[0], kernel.packet[0]);
+ pstore<Eigen::half>(in[1], kernel.packet[1]);
+ pstore<Eigen::half>(in[2], kernel.packet[2]);
+ pstore<Eigen::half>(in[3], kernel.packet[3]);
+
+ EIGEN_ALIGN32 Eigen::half out[4][8];
+
+ for (int i = 0; i < 4; ++i) {
+ for (int j = 0; j < 4; ++j) {
+ out[i][j] = in[j][2*i];
+ }
+ for (int j = 0; j < 4; ++j) {
+ out[i][j+4] = in[j][2*i+1];
+ }
+ }
+
+ kernel.packet[0] = pload<Packet8h>(out[0]);
+ kernel.packet[1] = pload<Packet8h>(out[1]);
+ kernel.packet[2] = pload<Packet8h>(out[2]);
+ kernel.packet[3] = pload<Packet8h>(out[3]);
+}
+
+// BFloat16 implementation.
+
+EIGEN_STRONG_INLINE Packet8f Bf16ToF32(const Packet8bf& a) {
+#ifdef EIGEN_VECTORIZE_AVX2
+ __m256i extend = _mm256_cvtepu16_epi32(a);
+ return _mm256_castsi256_ps(_mm256_slli_epi32(extend, 16));
+#else
+ __m128i lo = _mm_cvtepu16_epi32(a);
+ __m128i hi = _mm_cvtepu16_epi32(_mm_srli_si128(a, 8));
+ __m128i lo_shift = _mm_slli_epi32(lo, 16);
+ __m128i hi_shift = _mm_slli_epi32(hi, 16);
+ return _mm256_castsi256_ps(_mm256_insertf128_si256(_mm256_castsi128_si256(lo_shift), hi_shift, 1));
+#endif
+}
+
+// Convert float to bfloat16 according to round-to-nearest-even/denormals algorithm.
+EIGEN_STRONG_INLINE Packet8bf F32ToBf16(const Packet8f& a) {
+ Packet8bf r;
+
+ __m256i input = _mm256_castps_si256(a);
+
+#ifdef EIGEN_VECTORIZE_AVX2
+ // uint32_t lsb = (input >> 16);
+ __m256i t = _mm256_srli_epi32(input, 16);
+ // uint32_t lsb = lsb & 1;
+ t = _mm256_and_si256(t, _mm256_set1_epi32(1));
+ // uint32_t rounding_bias = 0x7fff + lsb;
+ t = _mm256_add_epi32(t, _mm256_set1_epi32(0x7fff));
+ // input += rounding_bias;
+ t = _mm256_add_epi32(t, input);
+ // input = input >> 16;
+ t = _mm256_srli_epi32(t, 16);
+ // Check NaN before converting back to bf16
+ __m256 mask = _mm256_cmp_ps(a, a, _CMP_ORD_Q);
+ __m256i nan = _mm256_set1_epi32(0x7fc0);
+ t = _mm256_blendv_epi8(nan, t, _mm256_castps_si256(mask));
+ // output = numext::bit_cast<uint16_t>(input);
+ return _mm_packus_epi32(_mm256_extractf128_si256(t, 0),
+ _mm256_extractf128_si256(t, 1));
+#else
+ // uint32_t lsb = (input >> 16);
+ __m128i lo = _mm_srli_epi32(_mm256_extractf128_si256(input, 0), 16);
+ __m128i hi = _mm_srli_epi32(_mm256_extractf128_si256(input, 1), 16);
+ // uint32_t lsb = lsb & 1;
+ lo = _mm_and_si128(lo, _mm_set1_epi32(1));
+ hi = _mm_and_si128(hi, _mm_set1_epi32(1));
+ // uint32_t rounding_bias = 0x7fff + lsb;
+ lo = _mm_add_epi32(lo, _mm_set1_epi32(0x7fff));
+ hi = _mm_add_epi32(hi, _mm_set1_epi32(0x7fff));
+ // input += rounding_bias;
+ lo = _mm_add_epi32(lo, _mm256_extractf128_si256(input, 0));
+ hi = _mm_add_epi32(hi, _mm256_extractf128_si256(input, 1));
+ // input = input >> 16;
+ lo = _mm_srli_epi32(lo, 16);
+ hi = _mm_srli_epi32(hi, 16);
+ // Check NaN before converting back to bf16
+ __m256 mask = _mm256_cmp_ps(a, a, _CMP_ORD_Q);
+ __m128i nan = _mm_set1_epi32(0x7fc0);
+ lo = _mm_blendv_epi8(nan, lo, _mm_castps_si128(_mm256_castps256_ps128(mask)));
+ hi = _mm_blendv_epi8(nan, hi, _mm_castps_si128(_mm256_extractf128_ps(mask, 1)));
+ // output = numext::bit_cast<uint16_t>(input);
+ return _mm_packus_epi32(lo, hi);
+#endif
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf pset1<Packet8bf>(const bfloat16& from) {
+ return _mm_set1_epi16(numext::bit_cast<numext::uint16_t>(from));
+}
+
+template<> EIGEN_STRONG_INLINE bfloat16 pfirst<Packet8bf>(const Packet8bf& from) {
+ return numext::bit_cast<bfloat16>(static_cast<numext::uint16_t>(_mm_extract_epi16(from, 0)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf pload<Packet8bf>(const bfloat16* from) {
+ return _mm_load_si128(reinterpret_cast<const __m128i*>(from));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf ploadu<Packet8bf>(const bfloat16* from) {
+ return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));
+}
+
+template<> EIGEN_STRONG_INLINE void pstore<bfloat16>(bfloat16* to, const Packet8bf& from) {
+ _mm_store_si128(reinterpret_cast<__m128i*>(to), from);
+}
+
+template<> EIGEN_STRONG_INLINE void pstoreu<bfloat16>(bfloat16* to, const Packet8bf& from) {
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf
+ploaddup<Packet8bf>(const bfloat16* from) {
+ const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
+ const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
+ const numext::uint16_t c = numext::bit_cast<numext::uint16_t>(from[2]);
+ const numext::uint16_t d = numext::bit_cast<numext::uint16_t>(from[3]);
+ return _mm_set_epi16(d, d, c, c, b, b, a, a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf
+ploadquad<Packet8bf>(const bfloat16* from) {
+ const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
+ const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
+ return _mm_set_epi16(b, b, b, b, a, a, a, a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf ptrue(const Packet8bf& a) {
+ return _mm_cmpeq_epi32(a, a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8bf pabs(const Packet8bf& a) {
+ const __m128i sign_mask = _mm_set1_epi16(static_cast<numext::uint16_t>(0x8000));
+ return _mm_andnot_si128(sign_mask, a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8bf pmin<Packet8bf>(const Packet8bf& a,
+ const Packet8bf& b) {
+ return F32ToBf16(pmin<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8bf pmax<Packet8bf>(const Packet8bf& a,
+ const Packet8bf& b) {
+ return F32ToBf16(pmax<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8bf plset<Packet8bf>(const bfloat16& a) {
+ return F32ToBf16(plset<Packet8f>(static_cast<float>(a)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf por(const Packet8bf& a,const Packet8bf& b) {
+ return _mm_or_si128(a,b);
+}
+template<> EIGEN_STRONG_INLINE Packet8bf pxor(const Packet8bf& a,const Packet8bf& b) {
+ return _mm_xor_si128(a,b);
+}
+template<> EIGEN_STRONG_INLINE Packet8bf pand(const Packet8bf& a,const Packet8bf& b) {
+ return _mm_and_si128(a,b);
+}
+template<> EIGEN_STRONG_INLINE Packet8bf pandnot(const Packet8bf& a,const Packet8bf& b) {
+ return _mm_andnot_si128(b,a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf pselect(const Packet8bf& mask, const Packet8bf& a, const Packet8bf& b) {
+ return _mm_blendv_epi8(b, a, mask);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf pround<Packet8bf>(const Packet8bf& a)
{
- return _mm256_blend_ps(a,pset1<Packet8f>(b),1);
+ return F32ToBf16(pround<Packet8f>(Bf16ToF32(a)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf print<Packet8bf>(const Packet8bf& a) {
+ return F32ToBf16(print<Packet8f>(Bf16ToF32(a)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf pceil<Packet8bf>(const Packet8bf& a) {
+ return F32ToBf16(pceil<Packet8f>(Bf16ToF32(a)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf pfloor<Packet8bf>(const Packet8bf& a) {
+ return F32ToBf16(pfloor<Packet8f>(Bf16ToF32(a)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf pcmp_eq(const Packet8bf& a,const Packet8bf& b) {
+ return Pack16To8(pcmp_eq(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf pcmp_le(const Packet8bf& a,const Packet8bf& b) {
+ return Pack16To8(pcmp_le(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf pcmp_lt(const Packet8bf& a,const Packet8bf& b) {
+ return Pack16To8(pcmp_lt(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf pcmp_lt_or_nan(const Packet8bf& a,const Packet8bf& b) {
+ return Pack16To8(pcmp_lt_or_nan(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf pconj(const Packet8bf& a) { return a; }
+
+template<> EIGEN_STRONG_INLINE Packet8bf pnegate(const Packet8bf& a) {
+ Packet8bf sign_mask = _mm_set1_epi16(static_cast<numext::uint16_t>(0x8000));
+ return _mm_xor_si128(a, sign_mask);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf padd<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
+ return F32ToBf16(padd<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf psub<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
+ return F32ToBf16(psub<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf pmul<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
+ return F32ToBf16(pmul<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
}
-template<> EIGEN_STRONG_INLINE Packet4d pinsertfirst(const Packet4d& a, double b)
+template<> EIGEN_STRONG_INLINE Packet8bf pdiv<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
+ return F32ToBf16(pdiv<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+
+template<> EIGEN_STRONG_INLINE Packet8bf pgather<bfloat16, Packet8bf>(const bfloat16* from, Index stride)
{
- return _mm256_blend_pd(a,pset1<Packet4d>(b),1);
+ const numext::uint16_t s0 = numext::bit_cast<numext::uint16_t>(from[0*stride]);
+ const numext::uint16_t s1 = numext::bit_cast<numext::uint16_t>(from[1*stride]);
+ const numext::uint16_t s2 = numext::bit_cast<numext::uint16_t>(from[2*stride]);
+ const numext::uint16_t s3 = numext::bit_cast<numext::uint16_t>(from[3*stride]);
+ const numext::uint16_t s4 = numext::bit_cast<numext::uint16_t>(from[4*stride]);
+ const numext::uint16_t s5 = numext::bit_cast<numext::uint16_t>(from[5*stride]);
+ const numext::uint16_t s6 = numext::bit_cast<numext::uint16_t>(from[6*stride]);
+ const numext::uint16_t s7 = numext::bit_cast<numext::uint16_t>(from[7*stride]);
+ return _mm_set_epi16(s7, s6, s5, s4, s3, s2, s1, s0);
}
-template<> EIGEN_STRONG_INLINE Packet8f pinsertlast(const Packet8f& a, float b)
+template<> EIGEN_STRONG_INLINE void pscatter<bfloat16, Packet8bf>(bfloat16* to, const Packet8bf& from, Index stride)
{
- return _mm256_blend_ps(a,pset1<Packet8f>(b),(1<<7));
+ EIGEN_ALIGN32 bfloat16 aux[8];
+ pstore(aux, from);
+ to[stride*0] = aux[0];
+ to[stride*1] = aux[1];
+ to[stride*2] = aux[2];
+ to[stride*3] = aux[3];
+ to[stride*4] = aux[4];
+ to[stride*5] = aux[5];
+ to[stride*6] = aux[6];
+ to[stride*7] = aux[7];
+}
+
+template<> EIGEN_STRONG_INLINE bfloat16 predux<Packet8bf>(const Packet8bf& a) {
+ return static_cast<bfloat16>(predux<Packet8f>(Bf16ToF32(a)));
}
-template<> EIGEN_STRONG_INLINE Packet4d pinsertlast(const Packet4d& a, double b)
+template<> EIGEN_STRONG_INLINE bfloat16 predux_max<Packet8bf>(const Packet8bf& a) {
+ return static_cast<bfloat16>(predux_max<Packet8f>(Bf16ToF32(a)));
+}
+
+template<> EIGEN_STRONG_INLINE bfloat16 predux_min<Packet8bf>(const Packet8bf& a) {
+ return static_cast<bfloat16>(predux_min<Packet8f>(Bf16ToF32(a)));
+}
+
+template<> EIGEN_STRONG_INLINE bfloat16 predux_mul<Packet8bf>(const Packet8bf& a) {
+ return static_cast<bfloat16>(predux_mul<Packet8f>(Bf16ToF32(a)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf preverse(const Packet8bf& a)
{
- return _mm256_blend_pd(a,pset1<Packet4d>(b),(1<<3));
+ __m128i m = _mm_setr_epi8(14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1);
+ return _mm_shuffle_epi8(a,m);
+}
+
+EIGEN_STRONG_INLINE void
+ptranspose(PacketBlock<Packet8bf,8>& kernel) {
+ __m128i a = kernel.packet[0];
+ __m128i b = kernel.packet[1];
+ __m128i c = kernel.packet[2];
+ __m128i d = kernel.packet[3];
+ __m128i e = kernel.packet[4];
+ __m128i f = kernel.packet[5];
+ __m128i g = kernel.packet[6];
+ __m128i h = kernel.packet[7];
+
+ __m128i a03b03 = _mm_unpacklo_epi16(a, b);
+ __m128i c03d03 = _mm_unpacklo_epi16(c, d);
+ __m128i e03f03 = _mm_unpacklo_epi16(e, f);
+ __m128i g03h03 = _mm_unpacklo_epi16(g, h);
+ __m128i a47b47 = _mm_unpackhi_epi16(a, b);
+ __m128i c47d47 = _mm_unpackhi_epi16(c, d);
+ __m128i e47f47 = _mm_unpackhi_epi16(e, f);
+ __m128i g47h47 = _mm_unpackhi_epi16(g, h);
+
+ __m128i a01b01c01d01 = _mm_unpacklo_epi32(a03b03, c03d03);
+ __m128i a23b23c23d23 = _mm_unpackhi_epi32(a03b03, c03d03);
+ __m128i e01f01g01h01 = _mm_unpacklo_epi32(e03f03, g03h03);
+ __m128i e23f23g23h23 = _mm_unpackhi_epi32(e03f03, g03h03);
+ __m128i a45b45c45d45 = _mm_unpacklo_epi32(a47b47, c47d47);
+ __m128i a67b67c67d67 = _mm_unpackhi_epi32(a47b47, c47d47);
+ __m128i e45f45g45h45 = _mm_unpacklo_epi32(e47f47, g47h47);
+ __m128i e67f67g67h67 = _mm_unpackhi_epi32(e47f47, g47h47);
+
+ kernel.packet[0] = _mm_unpacklo_epi64(a01b01c01d01, e01f01g01h01);
+ kernel.packet[1] = _mm_unpackhi_epi64(a01b01c01d01, e01f01g01h01);
+ kernel.packet[2] = _mm_unpacklo_epi64(a23b23c23d23, e23f23g23h23);
+ kernel.packet[3] = _mm_unpackhi_epi64(a23b23c23d23, e23f23g23h23);
+ kernel.packet[4] = _mm_unpacklo_epi64(a45b45c45d45, e45f45g45h45);
+ kernel.packet[5] = _mm_unpackhi_epi64(a45b45c45d45, e45f45g45h45);
+ kernel.packet[6] = _mm_unpacklo_epi64(a67b67c67d67, e67f67g67h67);
+ kernel.packet[7] = _mm_unpackhi_epi64(a67b67c67d67, e67f67g67h67);
+}
+
+EIGEN_STRONG_INLINE void
+ptranspose(PacketBlock<Packet8bf,4>& kernel) {
+ __m128i a = kernel.packet[0];
+ __m128i b = kernel.packet[1];
+ __m128i c = kernel.packet[2];
+ __m128i d = kernel.packet[3];
+
+ __m128i ab_03 = _mm_unpacklo_epi16(a, b);
+ __m128i cd_03 = _mm_unpacklo_epi16(c, d);
+ __m128i ab_47 = _mm_unpackhi_epi16(a, b);
+ __m128i cd_47 = _mm_unpackhi_epi16(c, d);
+
+ kernel.packet[0] = _mm_unpacklo_epi32(ab_03, cd_03);
+ kernel.packet[1] = _mm_unpackhi_epi32(ab_03, cd_03);
+ kernel.packet[2] = _mm_unpacklo_epi32(ab_47, cd_47);
+ kernel.packet[3] = _mm_unpackhi_epi32(ab_47, cd_47);
}
} // end namespace internal
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX/TypeCasting.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX/TypeCasting.h
index 83bfdc604..d507fb67b 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX/TypeCasting.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX/TypeCasting.h
@@ -35,15 +35,79 @@ struct type_casting_traits<int, float> {
};
+#ifndef EIGEN_VECTORIZE_AVX512
+
+template <>
+struct type_casting_traits<Eigen::half, float> {
+ enum {
+ VectorizedCast = 1,
+ SrcCoeffRatio = 1,
+ TgtCoeffRatio = 1
+ };
+};
+
+
+template <>
+struct type_casting_traits<float, Eigen::half> {
+ enum {
+ VectorizedCast = 1,
+ SrcCoeffRatio = 1,
+ TgtCoeffRatio = 1
+ };
+};
+
+template <>
+struct type_casting_traits<bfloat16, float> {
+ enum {
+ VectorizedCast = 1,
+ SrcCoeffRatio = 1,
+ TgtCoeffRatio = 1
+ };
+};
+
+template <>
+struct type_casting_traits<float, bfloat16> {
+ enum {
+ VectorizedCast = 1,
+ SrcCoeffRatio = 1,
+ TgtCoeffRatio = 1
+ };
+};
+
+#endif // EIGEN_VECTORIZE_AVX512
template<> EIGEN_STRONG_INLINE Packet8i pcast<Packet8f, Packet8i>(const Packet8f& a) {
- return _mm256_cvtps_epi32(a);
+ return _mm256_cvttps_epi32(a);
}
template<> EIGEN_STRONG_INLINE Packet8f pcast<Packet8i, Packet8f>(const Packet8i& a) {
return _mm256_cvtepi32_ps(a);
}
+template<> EIGEN_STRONG_INLINE Packet8i preinterpret<Packet8i,Packet8f>(const Packet8f& a) {
+ return _mm256_castps_si256(a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8f preinterpret<Packet8f,Packet8i>(const Packet8i& a) {
+ return _mm256_castsi256_ps(a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8f pcast<Packet8h, Packet8f>(const Packet8h& a) {
+ return half2float(a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8f pcast<Packet8bf, Packet8f>(const Packet8bf& a) {
+ return Bf16ToF32(a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8h pcast<Packet8f, Packet8h>(const Packet8f& a) {
+ return float2half(a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf pcast<Packet8f, Packet8bf>(const Packet8f& a) {
+ return F32ToBf16(a);
+}
+
} // end namespace internal
} // end namespace Eigen
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX512/Complex.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX512/Complex.h
new file mode 100644
index 000000000..074253859
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX512/Complex.h
@@ -0,0 +1,424 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2018 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_COMPLEX_AVX512_H
+#define EIGEN_COMPLEX_AVX512_H
+
+namespace Eigen {
+
+namespace internal {
+
+//---------- float ----------
+struct Packet8cf
+{
+ EIGEN_STRONG_INLINE Packet8cf() {}
+ EIGEN_STRONG_INLINE explicit Packet8cf(const __m512& a) : v(a) {}
+ __m512 v;
+};
+
+template<> struct packet_traits<std::complex<float> > : default_packet_traits
+{
+ typedef Packet8cf type;
+ typedef Packet4cf half;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 8,
+ HasHalfPacket = 1,
+
+ HasAdd = 1,
+ HasSub = 1,
+ HasMul = 1,
+ HasDiv = 1,
+ HasNegate = 1,
+ HasSqrt = 1,
+ HasAbs = 0,
+ HasAbs2 = 0,
+ HasMin = 0,
+ HasMax = 0,
+ HasSetLinear = 0
+ };
+};
+
+template<> struct unpacket_traits<Packet8cf> {
+ typedef std::complex<float> type;
+ typedef Packet4cf half;
+ typedef Packet16f as_real;
+ enum {
+ size = 8,
+ alignment=unpacket_traits<Packet16f>::alignment,
+ vectorizable=true,
+ masked_load_available=false,
+ masked_store_available=false
+ };
+};
+
+template<> EIGEN_STRONG_INLINE Packet8cf ptrue<Packet8cf>(const Packet8cf& a) { return Packet8cf(ptrue(Packet16f(a.v))); }
+template<> EIGEN_STRONG_INLINE Packet8cf padd<Packet8cf>(const Packet8cf& a, const Packet8cf& b) { return Packet8cf(_mm512_add_ps(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet8cf psub<Packet8cf>(const Packet8cf& a, const Packet8cf& b) { return Packet8cf(_mm512_sub_ps(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet8cf pnegate(const Packet8cf& a)
+{
+ return Packet8cf(pnegate(a.v));
+}
+template<> EIGEN_STRONG_INLINE Packet8cf pconj(const Packet8cf& a)
+{
+ const __m512 mask = _mm512_castsi512_ps(_mm512_setr_epi32(
+ 0x00000000,0x80000000,0x00000000,0x80000000,0x00000000,0x80000000,0x00000000,0x80000000,
+ 0x00000000,0x80000000,0x00000000,0x80000000,0x00000000,0x80000000,0x00000000,0x80000000));
+ return Packet8cf(pxor(a.v,mask));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8cf pmul<Packet8cf>(const Packet8cf& a, const Packet8cf& b)
+{
+ __m512 tmp2 = _mm512_mul_ps(_mm512_movehdup_ps(a.v), _mm512_permute_ps(b.v, _MM_SHUFFLE(2,3,0,1)));
+ return Packet8cf(_mm512_fmaddsub_ps(_mm512_moveldup_ps(a.v), b.v, tmp2));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8cf pand <Packet8cf>(const Packet8cf& a, const Packet8cf& b) { return Packet8cf(pand(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet8cf por <Packet8cf>(const Packet8cf& a, const Packet8cf& b) { return Packet8cf(por(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet8cf pxor <Packet8cf>(const Packet8cf& a, const Packet8cf& b) { return Packet8cf(pxor(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet8cf pandnot<Packet8cf>(const Packet8cf& a, const Packet8cf& b) { return Packet8cf(pandnot(a.v,b.v)); }
+
+template <>
+EIGEN_STRONG_INLINE Packet8cf pcmp_eq(const Packet8cf& a, const Packet8cf& b) {
+ __m512 eq = pcmp_eq<Packet16f>(a.v, b.v);
+ return Packet8cf(pand(eq, _mm512_permute_ps(eq, 0xB1)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8cf pload <Packet8cf>(const std::complex<float>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet8cf(pload<Packet16f>(&numext::real_ref(*from))); }
+template<> EIGEN_STRONG_INLINE Packet8cf ploadu<Packet8cf>(const std::complex<float>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet8cf(ploadu<Packet16f>(&numext::real_ref(*from))); }
+
+
+template<> EIGEN_STRONG_INLINE Packet8cf pset1<Packet8cf>(const std::complex<float>& from)
+{
+ const float re = std::real(from);
+ const float im = std::imag(from);
+ return Packet8cf(_mm512_set_ps(im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8cf ploaddup<Packet8cf>(const std::complex<float>* from)
+{
+ return Packet8cf( _mm512_castpd_ps( ploaddup<Packet8d>((const double*)(const void*)from )) );
+}
+template<> EIGEN_STRONG_INLINE Packet8cf ploadquad<Packet8cf>(const std::complex<float>* from)
+{
+ return Packet8cf( _mm512_castpd_ps( ploadquad<Packet8d>((const double*)(const void*)from )) );
+}
+
+template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float>* to, const Packet8cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore(&numext::real_ref(*to), from.v); }
+template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float>* to, const Packet8cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&numext::real_ref(*to), from.v); }
+
+template<> EIGEN_DEVICE_FUNC inline Packet8cf pgather<std::complex<float>, Packet8cf>(const std::complex<float>* from, Index stride)
+{
+ return Packet8cf(_mm512_castpd_ps(pgather<double,Packet8d>((const double*)(const void*)from, stride)));
+}
+
+template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet8cf>(std::complex<float>* to, const Packet8cf& from, Index stride)
+{
+ pscatter((double*)(void*)to, _mm512_castps_pd(from.v), stride);
+}
+
+template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet8cf>(const Packet8cf& a)
+{
+ return pfirst(Packet2cf(_mm512_castps512_ps128(a.v)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8cf preverse(const Packet8cf& a) {
+ return Packet8cf(_mm512_castsi512_ps(
+ _mm512_permutexvar_epi64( _mm512_set_epi32(0, 0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7),
+ _mm512_castps_si512(a.v))));
+}
+
+template<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet8cf>(const Packet8cf& a)
+{
+ return predux(padd(Packet4cf(extract256<0>(a.v)),
+ Packet4cf(extract256<1>(a.v))));
+}
+
+template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet8cf>(const Packet8cf& a)
+{
+ return predux_mul(pmul(Packet4cf(extract256<0>(a.v)),
+ Packet4cf(extract256<1>(a.v))));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4cf predux_half_dowto4<Packet8cf>(const Packet8cf& a) {
+ __m256 lane0 = extract256<0>(a.v);
+ __m256 lane1 = extract256<1>(a.v);
+ __m256 res = _mm256_add_ps(lane0, lane1);
+ return Packet4cf(res);
+}
+
+EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet8cf,Packet16f)
+
+template<> EIGEN_STRONG_INLINE Packet8cf pdiv<Packet8cf>(const Packet8cf& a, const Packet8cf& b)
+{
+ Packet8cf num = pmul(a, pconj(b));
+ __m512 tmp = _mm512_mul_ps(b.v, b.v);
+ __m512 tmp2 = _mm512_shuffle_ps(tmp,tmp,0xB1);
+ __m512 denom = _mm512_add_ps(tmp, tmp2);
+ return Packet8cf(_mm512_div_ps(num.v, denom));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8cf pcplxflip<Packet8cf>(const Packet8cf& x)
+{
+ return Packet8cf(_mm512_shuffle_ps(x.v, x.v, _MM_SHUFFLE(2, 3, 0 ,1)));
+}
+
+//---------- double ----------
+struct Packet4cd
+{
+ EIGEN_STRONG_INLINE Packet4cd() {}
+ EIGEN_STRONG_INLINE explicit Packet4cd(const __m512d& a) : v(a) {}
+ __m512d v;
+};
+
+template<> struct packet_traits<std::complex<double> > : default_packet_traits
+{
+ typedef Packet4cd type;
+ typedef Packet2cd half;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 0,
+ size = 4,
+ HasHalfPacket = 1,
+
+ HasAdd = 1,
+ HasSub = 1,
+ HasMul = 1,
+ HasDiv = 1,
+ HasNegate = 1,
+ HasSqrt = 1,
+ HasAbs = 0,
+ HasAbs2 = 0,
+ HasMin = 0,
+ HasMax = 0,
+ HasSetLinear = 0
+ };
+};
+
+template<> struct unpacket_traits<Packet4cd> {
+ typedef std::complex<double> type;
+ typedef Packet2cd half;
+ typedef Packet8d as_real;
+ enum {
+ size = 4,
+ alignment = unpacket_traits<Packet8d>::alignment,
+ vectorizable=true,
+ masked_load_available=false,
+ masked_store_available=false
+ };
+};
+
+template<> EIGEN_STRONG_INLINE Packet4cd padd<Packet4cd>(const Packet4cd& a, const Packet4cd& b) { return Packet4cd(_mm512_add_pd(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet4cd psub<Packet4cd>(const Packet4cd& a, const Packet4cd& b) { return Packet4cd(_mm512_sub_pd(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet4cd pnegate(const Packet4cd& a) { return Packet4cd(pnegate(a.v)); }
+template<> EIGEN_STRONG_INLINE Packet4cd pconj(const Packet4cd& a)
+{
+ const __m512d mask = _mm512_castsi512_pd(
+ _mm512_set_epi32(0x80000000,0x0,0x0,0x0,0x80000000,0x0,0x0,0x0,
+ 0x80000000,0x0,0x0,0x0,0x80000000,0x0,0x0,0x0));
+ return Packet4cd(pxor(a.v,mask));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4cd pmul<Packet4cd>(const Packet4cd& a, const Packet4cd& b)
+{
+ __m512d tmp1 = _mm512_shuffle_pd(a.v,a.v,0x0);
+ __m512d tmp2 = _mm512_shuffle_pd(a.v,a.v,0xFF);
+ __m512d tmp3 = _mm512_shuffle_pd(b.v,b.v,0x55);
+ __m512d odd = _mm512_mul_pd(tmp2, tmp3);
+ return Packet4cd(_mm512_fmaddsub_pd(tmp1, b.v, odd));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4cd ptrue<Packet4cd>(const Packet4cd& a) { return Packet4cd(ptrue(Packet8d(a.v))); }
+template<> EIGEN_STRONG_INLINE Packet4cd pand <Packet4cd>(const Packet4cd& a, const Packet4cd& b) { return Packet4cd(pand(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet4cd por <Packet4cd>(const Packet4cd& a, const Packet4cd& b) { return Packet4cd(por(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet4cd pxor <Packet4cd>(const Packet4cd& a, const Packet4cd& b) { return Packet4cd(pxor(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet4cd pandnot<Packet4cd>(const Packet4cd& a, const Packet4cd& b) { return Packet4cd(pandnot(a.v,b.v)); }
+
+template <>
+EIGEN_STRONG_INLINE Packet4cd pcmp_eq(const Packet4cd& a, const Packet4cd& b) {
+ __m512d eq = pcmp_eq<Packet8d>(a.v, b.v);
+ return Packet4cd(pand(eq, _mm512_permute_pd(eq, 0x55)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4cd pload <Packet4cd>(const std::complex<double>* from)
+{ EIGEN_DEBUG_ALIGNED_LOAD return Packet4cd(pload<Packet8d>((const double*)from)); }
+template<> EIGEN_STRONG_INLINE Packet4cd ploadu<Packet4cd>(const std::complex<double>* from)
+{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet4cd(ploadu<Packet8d>((const double*)from)); }
+
+template<> EIGEN_STRONG_INLINE Packet4cd pset1<Packet4cd>(const std::complex<double>& from)
+{
+ #ifdef EIGEN_VECTORIZE_AVX512DQ
+ return Packet4cd(_mm512_broadcast_f64x2(pset1<Packet1cd>(from).v));
+ #else
+ return Packet4cd(_mm512_castps_pd(_mm512_broadcast_f32x4( _mm_castpd_ps(pset1<Packet1cd>(from).v))));
+ #endif
+}
+
+template<> EIGEN_STRONG_INLINE Packet4cd ploaddup<Packet4cd>(const std::complex<double>* from) {
+ return Packet4cd(_mm512_insertf64x4(
+ _mm512_castpd256_pd512(ploaddup<Packet2cd>(from).v), ploaddup<Packet2cd>(from+1).v, 1));
+}
+
+template<> EIGEN_STRONG_INLINE void pstore <std::complex<double> >(std::complex<double> * to, const Packet4cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); }
+template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double> * to, const Packet4cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); }
+
+template<> EIGEN_DEVICE_FUNC inline Packet4cd pgather<std::complex<double>, Packet4cd>(const std::complex<double>* from, Index stride)
+{
+ return Packet4cd(_mm512_insertf64x4(_mm512_castpd256_pd512(
+ _mm256_insertf128_pd(_mm256_castpd128_pd256(ploadu<Packet1cd>(from+0*stride).v), ploadu<Packet1cd>(from+1*stride).v,1)),
+ _mm256_insertf128_pd(_mm256_castpd128_pd256(ploadu<Packet1cd>(from+2*stride).v), ploadu<Packet1cd>(from+3*stride).v,1), 1));
+}
+
+template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet4cd>(std::complex<double>* to, const Packet4cd& from, Index stride)
+{
+ __m512i fromi = _mm512_castpd_si512(from.v);
+ double* tod = (double*)(void*)to;
+ _mm_storeu_pd(tod+0*stride, _mm_castsi128_pd(_mm512_extracti32x4_epi32(fromi,0)) );
+ _mm_storeu_pd(tod+2*stride, _mm_castsi128_pd(_mm512_extracti32x4_epi32(fromi,1)) );
+ _mm_storeu_pd(tod+4*stride, _mm_castsi128_pd(_mm512_extracti32x4_epi32(fromi,2)) );
+ _mm_storeu_pd(tod+6*stride, _mm_castsi128_pd(_mm512_extracti32x4_epi32(fromi,3)) );
+}
+
+template<> EIGEN_STRONG_INLINE std::complex<double> pfirst<Packet4cd>(const Packet4cd& a)
+{
+ __m128d low = extract128<0>(a.v);
+ EIGEN_ALIGN16 double res[2];
+ _mm_store_pd(res, low);
+ return std::complex<double>(res[0],res[1]);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4cd preverse(const Packet4cd& a) {
+ return Packet4cd(_mm512_shuffle_f64x2(a.v, a.v, (shuffle_mask<3,2,1,0>::mask)));
+}
+
+template<> EIGEN_STRONG_INLINE std::complex<double> predux<Packet4cd>(const Packet4cd& a)
+{
+ return predux(padd(Packet2cd(_mm512_extractf64x4_pd(a.v,0)),
+ Packet2cd(_mm512_extractf64x4_pd(a.v,1))));
+}
+
+template<> EIGEN_STRONG_INLINE std::complex<double> predux_mul<Packet4cd>(const Packet4cd& a)
+{
+ return predux_mul(pmul(Packet2cd(_mm512_extractf64x4_pd(a.v,0)),
+ Packet2cd(_mm512_extractf64x4_pd(a.v,1))));
+}
+
+template<> struct conj_helper<Packet4cd, Packet4cd, false,true>
+{
+ EIGEN_STRONG_INLINE Packet4cd pmadd(const Packet4cd& x, const Packet4cd& y, const Packet4cd& c) const
+ { return padd(pmul(x,y),c); }
+
+ EIGEN_STRONG_INLINE Packet4cd pmul(const Packet4cd& a, const Packet4cd& b) const
+ {
+ return internal::pmul(a, pconj(b));
+ }
+};
+
+template<> struct conj_helper<Packet4cd, Packet4cd, true,false>
+{
+ EIGEN_STRONG_INLINE Packet4cd pmadd(const Packet4cd& x, const Packet4cd& y, const Packet4cd& c) const
+ { return padd(pmul(x,y),c); }
+
+ EIGEN_STRONG_INLINE Packet4cd pmul(const Packet4cd& a, const Packet4cd& b) const
+ {
+ return internal::pmul(pconj(a), b);
+ }
+};
+
+template<> struct conj_helper<Packet4cd, Packet4cd, true,true>
+{
+ EIGEN_STRONG_INLINE Packet4cd pmadd(const Packet4cd& x, const Packet4cd& y, const Packet4cd& c) const
+ { return padd(pmul(x,y),c); }
+
+ EIGEN_STRONG_INLINE Packet4cd pmul(const Packet4cd& a, const Packet4cd& b) const
+ {
+ return pconj(internal::pmul(a, b));
+ }
+};
+
+EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet4cd,Packet8d)
+
+template<> EIGEN_STRONG_INLINE Packet4cd pdiv<Packet4cd>(const Packet4cd& a, const Packet4cd& b)
+{
+ Packet4cd num = pmul(a, pconj(b));
+ __m512d tmp = _mm512_mul_pd(b.v, b.v);
+ __m512d denom = padd(_mm512_permute_pd(tmp,0x55), tmp);
+ return Packet4cd(_mm512_div_pd(num.v, denom));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4cd pcplxflip<Packet4cd>(const Packet4cd& x)
+{
+ return Packet4cd(_mm512_permute_pd(x.v,0x55));
+}
+
+EIGEN_DEVICE_FUNC inline void
+ptranspose(PacketBlock<Packet8cf,4>& kernel) {
+ PacketBlock<Packet8d,4> pb;
+
+ pb.packet[0] = _mm512_castps_pd(kernel.packet[0].v);
+ pb.packet[1] = _mm512_castps_pd(kernel.packet[1].v);
+ pb.packet[2] = _mm512_castps_pd(kernel.packet[2].v);
+ pb.packet[3] = _mm512_castps_pd(kernel.packet[3].v);
+ ptranspose(pb);
+ kernel.packet[0].v = _mm512_castpd_ps(pb.packet[0]);
+ kernel.packet[1].v = _mm512_castpd_ps(pb.packet[1]);
+ kernel.packet[2].v = _mm512_castpd_ps(pb.packet[2]);
+ kernel.packet[3].v = _mm512_castpd_ps(pb.packet[3]);
+}
+
+EIGEN_DEVICE_FUNC inline void
+ptranspose(PacketBlock<Packet8cf,8>& kernel) {
+ PacketBlock<Packet8d,8> pb;
+
+ pb.packet[0] = _mm512_castps_pd(kernel.packet[0].v);
+ pb.packet[1] = _mm512_castps_pd(kernel.packet[1].v);
+ pb.packet[2] = _mm512_castps_pd(kernel.packet[2].v);
+ pb.packet[3] = _mm512_castps_pd(kernel.packet[3].v);
+ pb.packet[4] = _mm512_castps_pd(kernel.packet[4].v);
+ pb.packet[5] = _mm512_castps_pd(kernel.packet[5].v);
+ pb.packet[6] = _mm512_castps_pd(kernel.packet[6].v);
+ pb.packet[7] = _mm512_castps_pd(kernel.packet[7].v);
+ ptranspose(pb);
+ kernel.packet[0].v = _mm512_castpd_ps(pb.packet[0]);
+ kernel.packet[1].v = _mm512_castpd_ps(pb.packet[1]);
+ kernel.packet[2].v = _mm512_castpd_ps(pb.packet[2]);
+ kernel.packet[3].v = _mm512_castpd_ps(pb.packet[3]);
+ kernel.packet[4].v = _mm512_castpd_ps(pb.packet[4]);
+ kernel.packet[5].v = _mm512_castpd_ps(pb.packet[5]);
+ kernel.packet[6].v = _mm512_castpd_ps(pb.packet[6]);
+ kernel.packet[7].v = _mm512_castpd_ps(pb.packet[7]);
+}
+
+EIGEN_DEVICE_FUNC inline void
+ptranspose(PacketBlock<Packet4cd,4>& kernel) {
+ __m512d T0 = _mm512_shuffle_f64x2(kernel.packet[0].v, kernel.packet[1].v, (shuffle_mask<0,1,0,1>::mask)); // [a0 a1 b0 b1]
+ __m512d T1 = _mm512_shuffle_f64x2(kernel.packet[0].v, kernel.packet[1].v, (shuffle_mask<2,3,2,3>::mask)); // [a2 a3 b2 b3]
+ __m512d T2 = _mm512_shuffle_f64x2(kernel.packet[2].v, kernel.packet[3].v, (shuffle_mask<0,1,0,1>::mask)); // [c0 c1 d0 d1]
+ __m512d T3 = _mm512_shuffle_f64x2(kernel.packet[2].v, kernel.packet[3].v, (shuffle_mask<2,3,2,3>::mask)); // [c2 c3 d2 d3]
+
+ kernel.packet[3] = Packet4cd(_mm512_shuffle_f64x2(T1, T3, (shuffle_mask<1,3,1,3>::mask))); // [a3 b3 c3 d3]
+ kernel.packet[2] = Packet4cd(_mm512_shuffle_f64x2(T1, T3, (shuffle_mask<0,2,0,2>::mask))); // [a2 b2 c2 d2]
+ kernel.packet[1] = Packet4cd(_mm512_shuffle_f64x2(T0, T2, (shuffle_mask<1,3,1,3>::mask))); // [a1 b1 c1 d1]
+ kernel.packet[0] = Packet4cd(_mm512_shuffle_f64x2(T0, T2, (shuffle_mask<0,2,0,2>::mask))); // [a0 b0 c0 d0]
+}
+
+template<> EIGEN_STRONG_INLINE Packet4cd psqrt<Packet4cd>(const Packet4cd& a) {
+ return psqrt_complex<Packet4cd>(a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8cf psqrt<Packet8cf>(const Packet8cf& a) {
+ return psqrt_complex<Packet8cf>(a);
+}
+
+} // end namespace internal
+} // end namespace Eigen
+
+#endif // EIGEN_COMPLEX_AVX512_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX512/MathFunctions.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX512/MathFunctions.h
index 399be0ee4..6fd726d29 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX512/MathFunctions.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX512/MathFunctions.h
@@ -15,13 +15,13 @@ namespace Eigen {
namespace internal {
// Disable the code for older versions of gcc that don't support many of the required avx512 instrinsics.
-#if EIGEN_GNUC_AT_LEAST(5, 3)
+#if EIGEN_GNUC_AT_LEAST(5, 3) || EIGEN_COMP_CLANG || EIGEN_COMP_MSVC >= 1923
#define _EIGEN_DECLARE_CONST_Packet16f(NAME, X) \
const Packet16f p16f_##NAME = pset1<Packet16f>(X)
#define _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(NAME, X) \
- const Packet16f p16f_##NAME = (__m512)pset1<Packet16i>(X)
+ const Packet16f p16f_##NAME = preinterpret<Packet16f,Packet16i>(pset1<Packet16i>(X))
#define _EIGEN_DECLARE_CONST_Packet8d(NAME, X) \
const Packet8d p8d_##NAME = pset1<Packet8d>(X)
@@ -29,100 +29,41 @@ namespace internal {
#define _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(NAME, X) \
const Packet8d p8d_##NAME = _mm512_castsi512_pd(_mm512_set1_epi64(X))
-// Natural logarithm
-// Computes log(x) as log(2^e * m) = C*e + log(m), where the constant C =log(2)
-// and m is in the range [sqrt(1/2),sqrt(2)). In this range, the logarithm can
-// be easily approximated by a polynomial centered on m=1 for stability.
-#if defined(EIGEN_VECTORIZE_AVX512DQ)
+#define _EIGEN_DECLARE_CONST_Packet16bf(NAME, X) \
+ const Packet16bf p16bf_##NAME = pset1<Packet16bf>(X)
+
+#define _EIGEN_DECLARE_CONST_Packet16bf_FROM_INT(NAME, X) \
+ const Packet16bf p16bf_##NAME = preinterpret<Packet16bf,Packet16i>(pset1<Packet16i>(X))
+
template <>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f
plog<Packet16f>(const Packet16f& _x) {
- Packet16f x = _x;
- _EIGEN_DECLARE_CONST_Packet16f(1, 1.0f);
- _EIGEN_DECLARE_CONST_Packet16f(half, 0.5f);
- _EIGEN_DECLARE_CONST_Packet16f(126f, 126.0f);
-
- _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(inv_mant_mask, ~0x7f800000);
-
- // The smallest non denormalized float number.
- _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(min_norm_pos, 0x00800000);
- _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(minus_inf, 0xff800000);
- _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(nan, 0x7fc00000);
-
- // Polynomial coefficients.
- _EIGEN_DECLARE_CONST_Packet16f(cephes_SQRTHF, 0.707106781186547524f);
- _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p0, 7.0376836292E-2f);
- _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p1, -1.1514610310E-1f);
- _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p2, 1.1676998740E-1f);
- _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p3, -1.2420140846E-1f);
- _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p4, +1.4249322787E-1f);
- _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p5, -1.6668057665E-1f);
- _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p6, +2.0000714765E-1f);
- _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p7, -2.4999993993E-1f);
- _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p8, +3.3333331174E-1f);
- _EIGEN_DECLARE_CONST_Packet16f(cephes_log_q1, -2.12194440e-4f);
- _EIGEN_DECLARE_CONST_Packet16f(cephes_log_q2, 0.693359375f);
-
- // invalid_mask is set to true when x is NaN
- __mmask16 invalid_mask =
- _mm512_cmp_ps_mask(x, _mm512_setzero_ps(), _CMP_NGE_UQ);
- __mmask16 iszero_mask =
- _mm512_cmp_ps_mask(x, _mm512_setzero_ps(), _CMP_EQ_UQ);
-
- // Truncate input values to the minimum positive normal.
- x = pmax(x, p16f_min_norm_pos);
-
- // Extract the shifted exponents.
- Packet16f emm0 = _mm512_cvtepi32_ps(_mm512_srli_epi32((__m512i)x, 23));
- Packet16f e = _mm512_sub_ps(emm0, p16f_126f);
-
- // Set the exponents to -1, i.e. x are in the range [0.5,1).
- x = _mm512_and_ps(x, p16f_inv_mant_mask);
- x = _mm512_or_ps(x, p16f_half);
-
- // part2: Shift the inputs from the range [0.5,1) to [sqrt(1/2),sqrt(2))
- // and shift by -1. The values are then centered around 0, which improves
- // the stability of the polynomial evaluation.
- // if( x < SQRTHF ) {
- // e -= 1;
- // x = x + x - 1.0;
- // } else { x = x - 1.0; }
- __mmask16 mask = _mm512_cmp_ps_mask(x, p16f_cephes_SQRTHF, _CMP_LT_OQ);
- Packet16f tmp = _mm512_mask_blend_ps(mask, x, _mm512_setzero_ps());
- x = psub(x, p16f_1);
- e = psub(e, _mm512_mask_blend_ps(mask, p16f_1, _mm512_setzero_ps()));
- x = padd(x, tmp);
-
- Packet16f x2 = pmul(x, x);
- Packet16f x3 = pmul(x2, x);
-
- // Evaluate the polynomial approximant of degree 8 in three parts, probably
- // to improve instruction-level parallelism.
- Packet16f y, y1, y2;
- y = pmadd(p16f_cephes_log_p0, x, p16f_cephes_log_p1);
- y1 = pmadd(p16f_cephes_log_p3, x, p16f_cephes_log_p4);
- y2 = pmadd(p16f_cephes_log_p6, x, p16f_cephes_log_p7);
- y = pmadd(y, x, p16f_cephes_log_p2);
- y1 = pmadd(y1, x, p16f_cephes_log_p5);
- y2 = pmadd(y2, x, p16f_cephes_log_p8);
- y = pmadd(y, x3, y1);
- y = pmadd(y, x3, y2);
- y = pmul(y, x3);
-
- // Add the logarithm of the exponent back to the result of the interpolation.
- y1 = pmul(e, p16f_cephes_log_q1);
- tmp = pmul(x2, p16f_half);
- y = padd(y, y1);
- x = psub(x, tmp);
- y2 = pmul(e, p16f_cephes_log_q2);
- x = padd(x, y);
- x = padd(x, y2);
-
- // Filter out invalid inputs, i.e. negative arg will be NAN, 0 will be -INF.
- return _mm512_mask_blend_ps(iszero_mask, p16f_minus_inf,
- _mm512_mask_blend_ps(invalid_mask, p16f_nan, x));
+ return plog_float(_x);
}
-#endif
+
+template <>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d
+plog<Packet8d>(const Packet8d& _x) {
+ return plog_double(_x);
+}
+
+F16_PACKET_FUNCTION(Packet16f, Packet16h, plog)
+BF16_PACKET_FUNCTION(Packet16f, Packet16bf, plog)
+
+template <>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f
+plog2<Packet16f>(const Packet16f& _x) {
+ return plog2_float(_x);
+}
+
+template <>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d
+plog2<Packet8d>(const Packet8d& _x) {
+ return plog2_double(_x);
+}
+
+F16_PACKET_FUNCTION(Packet16f, Packet16h, plog2)
+BF16_PACKET_FUNCTION(Packet16f, Packet16bf, plog2)
// Exponential function. Works by writing "x = m*log(2) + r" where
// "m = floor(x/log(2)+1/2)" and "r" is the remainder. The result is then
@@ -158,17 +99,17 @@ pexp<Packet16f>(const Packet16f& _x) {
_EIGEN_DECLARE_CONST_Packet16f(nln2, -0.6931471805599453f);
Packet16f r = _mm512_fmadd_ps(m, p16f_nln2, x);
Packet16f r2 = pmul(r, r);
+ Packet16f r3 = pmul(r2, r);
- // TODO(gonnet): Split into odd/even polynomials and try to exploit
- // instruction-level parallelism.
- Packet16f y = p16f_cephes_exp_p0;
- y = pmadd(y, r, p16f_cephes_exp_p1);
- y = pmadd(y, r, p16f_cephes_exp_p2);
- y = pmadd(y, r, p16f_cephes_exp_p3);
- y = pmadd(y, r, p16f_cephes_exp_p4);
- y = pmadd(y, r, p16f_cephes_exp_p5);
- y = pmadd(y, r2, r);
- y = padd(y, p16f_1);
+ // Evaluate the polynomial approximant,improved by instruction-level parallelism.
+ Packet16f y, y1, y2;
+ y = pmadd(p16f_cephes_exp_p0, r, p16f_cephes_exp_p1);
+ y1 = pmadd(p16f_cephes_exp_p3, r, p16f_cephes_exp_p4);
+ y2 = padd(r, p16f_1);
+ y = pmadd(y, r, p16f_cephes_exp_p2);
+ y1 = pmadd(y1, r, p16f_cephes_exp_p5);
+ y = pmadd(y, r3, y1);
+ y = pmadd(y, r2, y2);
// Build emm0 = 2^m.
Packet16i emm0 = _mm512_cvttps_epi32(padd(m, p16f_127));
@@ -178,74 +119,40 @@ pexp<Packet16f>(const Packet16f& _x) {
return pmax(pmul(y, _mm512_castsi512_ps(emm0)), _x);
}
-/*template <>
+template <>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d
pexp<Packet8d>(const Packet8d& _x) {
- Packet8d x = _x;
-
- _EIGEN_DECLARE_CONST_Packet8d(1, 1.0);
- _EIGEN_DECLARE_CONST_Packet8d(2, 2.0);
-
- _EIGEN_DECLARE_CONST_Packet8d(exp_hi, 709.437);
- _EIGEN_DECLARE_CONST_Packet8d(exp_lo, -709.436139303);
-
- _EIGEN_DECLARE_CONST_Packet8d(cephes_LOG2EF, 1.4426950408889634073599);
-
- _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_p0, 1.26177193074810590878e-4);
- _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_p1, 3.02994407707441961300e-2);
- _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_p2, 9.99999999999999999910e-1);
-
- _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_q0, 3.00198505138664455042e-6);
- _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_q1, 2.52448340349684104192e-3);
- _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_q2, 2.27265548208155028766e-1);
- _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_q3, 2.00000000000000000009e0);
-
- _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_C1, 0.693145751953125);
- _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_C2, 1.42860682030941723212e-6);
-
- // clamp x
- x = pmax(pmin(x, p8d_exp_hi), p8d_exp_lo);
-
- // Express exp(x) as exp(g + n*log(2)).
- const Packet8d n =
- _mm512_mul_round_pd(p8d_cephes_LOG2EF, x, _MM_FROUND_TO_NEAREST_INT);
-
- // Get the remainder modulo log(2), i.e. the "g" described above. Subtract
- // n*log(2) out in two steps, i.e. n*C1 + n*C2, C1+C2=log2 to get the last
- // digits right.
- const Packet8d nC1 = pmul(n, p8d_cephes_exp_C1);
- const Packet8d nC2 = pmul(n, p8d_cephes_exp_C2);
- x = psub(x, nC1);
- x = psub(x, nC2);
-
- const Packet8d x2 = pmul(x, x);
+ return pexp_double(_x);
+}
- // Evaluate the numerator polynomial of the rational interpolant.
- Packet8d px = p8d_cephes_exp_p0;
- px = pmadd(px, x2, p8d_cephes_exp_p1);
- px = pmadd(px, x2, p8d_cephes_exp_p2);
- px = pmul(px, x);
+F16_PACKET_FUNCTION(Packet16f, Packet16h, pexp)
+BF16_PACKET_FUNCTION(Packet16f, Packet16bf, pexp)
- // Evaluate the denominator polynomial of the rational interpolant.
- Packet8d qx = p8d_cephes_exp_q0;
- qx = pmadd(qx, x2, p8d_cephes_exp_q1);
- qx = pmadd(qx, x2, p8d_cephes_exp_q2);
- qx = pmadd(qx, x2, p8d_cephes_exp_q3);
+template <>
+EIGEN_STRONG_INLINE Packet16h pfrexp(const Packet16h& a, Packet16h& exponent) {
+ Packet16f fexponent;
+ const Packet16h out = float2half(pfrexp<Packet16f>(half2float(a), fexponent));
+ exponent = float2half(fexponent);
+ return out;
+}
- // I don't really get this bit, copied from the SSE2 routines, so...
- // TODO(gonnet): Figure out what is going on here, perhaps find a better
- // rational interpolant?
- x = _mm512_div_pd(px, psub(qx, px));
- x = pmadd(p8d_2, x, p8d_1);
+template <>
+EIGEN_STRONG_INLINE Packet16h pldexp(const Packet16h& a, const Packet16h& exponent) {
+ return float2half(pldexp<Packet16f>(half2float(a), half2float(exponent)));
+}
- // Build e=2^n.
- const Packet8d e = _mm512_castsi512_pd(_mm512_slli_epi64(
- _mm512_add_epi64(_mm512_cvtpd_epi64(n), _mm512_set1_epi64(1023)), 52));
+template <>
+EIGEN_STRONG_INLINE Packet16bf pfrexp(const Packet16bf& a, Packet16bf& exponent) {
+ Packet16f fexponent;
+ const Packet16bf out = F32ToBf16(pfrexp<Packet16f>(Bf16ToF32(a), fexponent));
+ exponent = F32ToBf16(fexponent);
+ return out;
+}
- // Construct the result 2^n * exp(g) = e * x. The max is used to catch
- // non-finite values in the input.
- return pmax(pmul(x, e), _x);
- }*/
+template <>
+EIGEN_STRONG_INLINE Packet16bf pldexp(const Packet16bf& a, const Packet16bf& exponent) {
+ return F32ToBf16(pldexp<Packet16f>(Bf16ToF32(a), Bf16ToF32(exponent)));
+}
// Functions for sqrt.
// The EIGEN_FAST_MATH version uses the _mm_rsqrt_ps approximation and one step
@@ -257,138 +164,197 @@ pexp<Packet8d>(const Packet8d& _x) {
template <>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f
psqrt<Packet16f>(const Packet16f& _x) {
- _EIGEN_DECLARE_CONST_Packet16f(one_point_five, 1.5f);
- _EIGEN_DECLARE_CONST_Packet16f(minus_half, -0.5f);
- _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(flt_min, 0x00800000);
-
- Packet16f neg_half = pmul(_x, p16f_minus_half);
+ Packet16f neg_half = pmul(_x, pset1<Packet16f>(-.5f));
+ __mmask16 denormal_mask = _mm512_kand(
+ _mm512_cmp_ps_mask(_x, pset1<Packet16f>((std::numeric_limits<float>::min)()),
+ _CMP_LT_OQ),
+ _mm512_cmp_ps_mask(_x, _mm512_setzero_ps(), _CMP_GE_OQ));
- // select only the inverse sqrt of positive normal inputs (denormals are
- // flushed to zero and cause infs as well).
- __mmask16 non_zero_mask = _mm512_cmp_ps_mask(_x, p16f_flt_min, _CMP_GE_OQ);
- Packet16f x = _mm512_mask_blend_ps(non_zero_mask, _mm512_rsqrt14_ps(_x),
- _mm512_setzero_ps());
+ Packet16f x = _mm512_rsqrt14_ps(_x);
// Do a single step of Newton's iteration.
- x = pmul(x, pmadd(neg_half, pmul(x, x), p16f_one_point_five));
+ x = pmul(x, pmadd(neg_half, pmul(x, x), pset1<Packet16f>(1.5f)));
- // Multiply the original _x by it's reciprocal square root to extract the
- // square root.
- return pmul(_x, x);
+ // Flush results for denormals to zero.
+ return _mm512_mask_blend_ps(denormal_mask, pmul(_x,x), _mm512_setzero_ps());
}
template <>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d
psqrt<Packet8d>(const Packet8d& _x) {
- _EIGEN_DECLARE_CONST_Packet8d(one_point_five, 1.5);
- _EIGEN_DECLARE_CONST_Packet8d(minus_half, -0.5);
- _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(dbl_min, 0x0010000000000000LL);
-
- Packet8d neg_half = pmul(_x, p8d_minus_half);
+ Packet8d neg_half = pmul(_x, pset1<Packet8d>(-.5));
+ __mmask16 denormal_mask = _mm512_kand(
+ _mm512_cmp_pd_mask(_x, pset1<Packet8d>((std::numeric_limits<double>::min)()),
+ _CMP_LT_OQ),
+ _mm512_cmp_pd_mask(_x, _mm512_setzero_pd(), _CMP_GE_OQ));
- // select only the inverse sqrt of positive normal inputs (denormals are
- // flushed to zero and cause infs as well).
- __mmask8 non_zero_mask = _mm512_cmp_pd_mask(_x, p8d_dbl_min, _CMP_GE_OQ);
- Packet8d x = _mm512_mask_blend_pd(non_zero_mask, _mm512_rsqrt14_pd(_x),
- _mm512_setzero_pd());
+ Packet8d x = _mm512_rsqrt14_pd(_x);
- // Do a first step of Newton's iteration.
- x = pmul(x, pmadd(neg_half, pmul(x, x), p8d_one_point_five));
+ // Do a single step of Newton's iteration.
+ x = pmul(x, pmadd(neg_half, pmul(x, x), pset1<Packet8d>(1.5)));
// Do a second step of Newton's iteration.
- x = pmul(x, pmadd(neg_half, pmul(x, x), p8d_one_point_five));
+ x = pmul(x, pmadd(neg_half, pmul(x, x), pset1<Packet8d>(1.5)));
- // Multiply the original _x by it's reciprocal square root to extract the
- // square root.
- return pmul(_x, x);
+ return _mm512_mask_blend_pd(denormal_mask, pmul(_x,x), _mm512_setzero_pd());
}
#else
template <>
EIGEN_STRONG_INLINE Packet16f psqrt<Packet16f>(const Packet16f& x) {
return _mm512_sqrt_ps(x);
}
+
template <>
EIGEN_STRONG_INLINE Packet8d psqrt<Packet8d>(const Packet8d& x) {
return _mm512_sqrt_pd(x);
}
#endif
-// Functions for rsqrt.
-// Almost identical to the sqrt routine, just leave out the last multiplication
-// and fill in NaN/Inf where needed. Note that this function only exists as an
-// iterative version for doubles since there is no instruction for diretly
-// computing the reciprocal square root in AVX-512.
-#ifdef EIGEN_FAST_MATH
+F16_PACKET_FUNCTION(Packet16f, Packet16h, psqrt)
+BF16_PACKET_FUNCTION(Packet16f, Packet16bf, psqrt)
+
+// prsqrt for float.
+#if defined(EIGEN_VECTORIZE_AVX512ER)
+
+template <>
+EIGEN_STRONG_INLINE Packet16f prsqrt<Packet16f>(const Packet16f& x) {
+ return _mm512_rsqrt28_ps(x);
+}
+#elif EIGEN_FAST_MATH
+
template <>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f
prsqrt<Packet16f>(const Packet16f& _x) {
_EIGEN_DECLARE_CONST_Packet16f_FROM_INT(inf, 0x7f800000);
- _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(nan, 0x7fc00000);
_EIGEN_DECLARE_CONST_Packet16f(one_point_five, 1.5f);
_EIGEN_DECLARE_CONST_Packet16f(minus_half, -0.5f);
- _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(flt_min, 0x00800000);
Packet16f neg_half = pmul(_x, p16f_minus_half);
- // select only the inverse sqrt of positive normal inputs (denormals are
- // flushed to zero and cause infs as well).
- __mmask16 le_zero_mask = _mm512_cmp_ps_mask(_x, p16f_flt_min, _CMP_LT_OQ);
- Packet16f x = _mm512_mask_blend_ps(le_zero_mask, _mm512_setzero_ps(),
- _mm512_rsqrt14_ps(_x));
-
- // Fill in NaNs and Infs for the negative/zero entries.
- __mmask16 neg_mask = _mm512_cmp_ps_mask(_x, _mm512_setzero_ps(), _CMP_LT_OQ);
- Packet16f infs_and_nans = _mm512_mask_blend_ps(
- neg_mask, p16f_nan,
- _mm512_mask_blend_ps(le_zero_mask, p16f_inf, _mm512_setzero_ps()));
-
- // Do a single step of Newton's iteration.
- x = pmul(x, pmadd(neg_half, pmul(x, x), p16f_one_point_five));
+ // Identity infinite, negative and denormal arguments.
+ __mmask16 inf_mask = _mm512_cmp_ps_mask(_x, p16f_inf, _CMP_EQ_OQ);
+ __mmask16 not_pos_mask = _mm512_cmp_ps_mask(_x, _mm512_setzero_ps(), _CMP_LE_OQ);
+ __mmask16 not_finite_pos_mask = not_pos_mask | inf_mask;
+
+ // Compute an approximate result using the rsqrt intrinsic, forcing +inf
+ // for denormals for consistency with AVX and SSE implementations.
+ Packet16f y_approx = _mm512_rsqrt14_ps(_x);
+
+ // Do a single step of Newton-Raphson iteration to improve the approximation.
+ // This uses the formula y_{n+1} = y_n * (1.5 - y_n * (0.5 * x) * y_n).
+ // It is essential to evaluate the inner term like this because forming
+ // y_n^2 may over- or underflow.
+ Packet16f y_newton = pmul(y_approx, pmadd(y_approx, pmul(neg_half, y_approx), p16f_one_point_five));
+
+ // Select the result of the Newton-Raphson step for positive finite arguments.
+ // For other arguments, choose the output of the intrinsic. This will
+ // return rsqrt(+inf) = 0, rsqrt(x) = NaN if x < 0, and rsqrt(0) = +inf.
+ return _mm512_mask_blend_ps(not_finite_pos_mask, y_newton, y_approx);
+}
+#else
- // Insert NaNs and Infs in all the right places.
- return _mm512_mask_blend_ps(le_zero_mask, infs_and_nans, x);
+template <>
+EIGEN_STRONG_INLINE Packet16f prsqrt<Packet16f>(const Packet16f& x) {
+ _EIGEN_DECLARE_CONST_Packet16f(one, 1.0f);
+ return _mm512_div_ps(p16f_one, _mm512_sqrt_ps(x));
}
+#endif
+
+F16_PACKET_FUNCTION(Packet16f, Packet16h, prsqrt)
+BF16_PACKET_FUNCTION(Packet16f, Packet16bf, prsqrt)
+// prsqrt for double.
+#if EIGEN_FAST_MATH
template <>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d
prsqrt<Packet8d>(const Packet8d& _x) {
- _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(inf, 0x7ff0000000000000LL);
- _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(nan, 0x7ff1000000000000LL);
_EIGEN_DECLARE_CONST_Packet8d(one_point_five, 1.5);
_EIGEN_DECLARE_CONST_Packet8d(minus_half, -0.5);
- _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(dbl_min, 0x0010000000000000LL);
+ _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(inf, 0x7ff0000000000000LL);
Packet8d neg_half = pmul(_x, p8d_minus_half);
- // select only the inverse sqrt of positive normal inputs (denormals are
- // flushed to zero and cause infs as well).
- __mmask8 le_zero_mask = _mm512_cmp_pd_mask(_x, p8d_dbl_min, _CMP_LT_OQ);
- Packet8d x = _mm512_mask_blend_pd(le_zero_mask, _mm512_setzero_pd(),
- _mm512_rsqrt14_pd(_x));
+ // Identity infinite, negative and denormal arguments.
+ __mmask8 inf_mask = _mm512_cmp_pd_mask(_x, p8d_inf, _CMP_EQ_OQ);
+ __mmask8 not_pos_mask = _mm512_cmp_pd_mask(_x, _mm512_setzero_pd(), _CMP_LE_OQ);
+ __mmask8 not_finite_pos_mask = not_pos_mask | inf_mask;
- // Fill in NaNs and Infs for the negative/zero entries.
- __mmask8 neg_mask = _mm512_cmp_pd_mask(_x, _mm512_setzero_pd(), _CMP_LT_OQ);
- Packet8d infs_and_nans = _mm512_mask_blend_pd(
- neg_mask, p8d_nan,
- _mm512_mask_blend_pd(le_zero_mask, p8d_inf, _mm512_setzero_pd()));
-
- // Do a first step of Newton's iteration.
- x = pmul(x, pmadd(neg_half, pmul(x, x), p8d_one_point_five));
-
- // Do a second step of Newton's iteration.
- x = pmul(x, pmadd(neg_half, pmul(x, x), p8d_one_point_five));
-
- // Insert NaNs and Infs in all the right places.
- return _mm512_mask_blend_pd(le_zero_mask, infs_and_nans, x);
+ // Compute an approximate result using the rsqrt intrinsic, forcing +inf
+ // for denormals for consistency with AVX and SSE implementations.
+#if defined(EIGEN_VECTORIZE_AVX512ER)
+ Packet8d y_approx = _mm512_rsqrt28_pd(_x);
+#else
+ Packet8d y_approx = _mm512_rsqrt14_pd(_x);
+#endif
+ // Do one or two steps of Newton-Raphson's to improve the approximation, depending on the
+ // starting accuracy (either 2^-14 or 2^-28, depending on whether AVX512ER is available).
+ // The Newton-Raphson algorithm has quadratic convergence and roughly doubles the number
+ // of correct digits for each step.
+ // This uses the formula y_{n+1} = y_n * (1.5 - y_n * (0.5 * x) * y_n).
+ // It is essential to evaluate the inner term like this because forming
+ // y_n^2 may over- or underflow.
+ Packet8d y_newton = pmul(y_approx, pmadd(neg_half, pmul(y_approx, y_approx), p8d_one_point_five));
+#if !defined(EIGEN_VECTORIZE_AVX512ER)
+ y_newton = pmul(y_newton, pmadd(y_newton, pmul(neg_half, y_newton), p8d_one_point_five));
+#endif
+ // Select the result of the Newton-Raphson step for positive finite arguments.
+ // For other arguments, choose the output of the intrinsic. This will
+ // return rsqrt(+inf) = 0, rsqrt(x) = NaN if x < 0, and rsqrt(0) = +inf.
+ return _mm512_mask_blend_pd(not_finite_pos_mask, y_newton, y_approx);
}
#else
template <>
-EIGEN_STRONG_INLINE Packet16f prsqrt<Packet16f>(const Packet16f& x) {
- return _mm512_rsqrt28_ps(x);
+EIGEN_STRONG_INLINE Packet8d prsqrt<Packet8d>(const Packet8d& x) {
+ _EIGEN_DECLARE_CONST_Packet8d(one, 1.0f);
+ return _mm512_div_pd(p8d_one, _mm512_sqrt_pd(x));
}
#endif
+
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet16f plog1p<Packet16f>(const Packet16f& _x) {
+ return generic_plog1p(_x);
+}
+
+F16_PACKET_FUNCTION(Packet16f, Packet16h, plog1p)
+BF16_PACKET_FUNCTION(Packet16f, Packet16bf, plog1p)
+
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet16f pexpm1<Packet16f>(const Packet16f& _x) {
+ return generic_expm1(_x);
+}
+
+F16_PACKET_FUNCTION(Packet16f, Packet16h, pexpm1)
+BF16_PACKET_FUNCTION(Packet16f, Packet16bf, pexpm1)
+
#endif
+
+template <>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f
+psin<Packet16f>(const Packet16f& _x) {
+ return psin_float(_x);
+}
+
+template <>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f
+pcos<Packet16f>(const Packet16f& _x) {
+ return pcos_float(_x);
+}
+
+template <>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f
+ptanh<Packet16f>(const Packet16f& _x) {
+ return internal::generic_fast_tanh_float(_x);
+}
+
+F16_PACKET_FUNCTION(Packet16f, Packet16h, psin)
+F16_PACKET_FUNCTION(Packet16f, Packet16h, pcos)
+F16_PACKET_FUNCTION(Packet16f, Packet16h, ptanh)
+
+BF16_PACKET_FUNCTION(Packet16f, Packet16bf, psin)
+BF16_PACKET_FUNCTION(Packet16f, Packet16bf, pcos)
+BF16_PACKET_FUNCTION(Packet16f, Packet16bf, ptanh)
+
} // end namespace internal
} // end namespace Eigen
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX512/PacketMath.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX512/PacketMath.h
index 12b897572..34d49ab66 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX512/PacketMath.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX512/PacketMath.h
@@ -19,10 +19,10 @@ namespace internal {
#endif
#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
-#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*))
+#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32
#endif
-#ifdef __FMA__
+#ifdef EIGEN_VECTORIZE_FMA
#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
#endif
@@ -31,6 +31,8 @@ namespace internal {
typedef __m512 Packet16f;
typedef __m512i Packet16i;
typedef __m512d Packet8d;
+typedef eigen_packet_wrapper<__m256i, 1> Packet16h;
+typedef eigen_packet_wrapper<__m256i, 2> Packet16bf;
template <>
struct is_arithmetic<__m512> {
@@ -45,6 +47,51 @@ struct is_arithmetic<__m512d> {
enum { value = true };
};
+template<> struct is_arithmetic<Packet16h> { enum { value = true }; };
+
+template <>
+struct packet_traits<half> : default_packet_traits {
+ typedef Packet16h type;
+ // There is no half-size packet for Packet16h.
+ typedef Packet16h half;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 16,
+ HasHalfPacket = 1,
+
+ HasCmp = 1,
+ HasAdd = 1,
+ HasSub = 1,
+ HasMul = 1,
+ HasDiv = 1,
+ HasNegate = 1,
+ HasAbs = 1,
+ HasAbs2 = 0,
+ HasMin = 1,
+ HasMax = 1,
+ HasConj = 1,
+ HasSetLinear = 0,
+ HasLog = 1,
+ HasLog1p = 1,
+ HasExpm1 = 1,
+ HasExp = 1,
+ HasSqrt = 1,
+ HasRsqrt = 1,
+ HasSin = EIGEN_FAST_MATH,
+ HasCos = EIGEN_FAST_MATH,
+ HasTanh = EIGEN_FAST_MATH,
+ HasErf = EIGEN_FAST_MATH,
+ HasBlend = 0,
+ HasRound = 1,
+ HasFloor = 1,
+ HasCeil = 1,
+ HasRint = 1,
+ HasBessel = 1,
+ HasNdtri = 1
+ };
+};
+
template<> struct packet_traits<float> : default_packet_traits
{
typedef Packet16f type;
@@ -54,15 +101,32 @@ template<> struct packet_traits<float> : default_packet_traits
AlignedOnScalar = 1,
size = 16,
HasHalfPacket = 1,
-#if EIGEN_GNUC_AT_LEAST(5, 3)
-#ifdef EIGEN_VECTORIZE_AVX512DQ
+
+ HasAbs = 1,
+ HasMin = 1,
+ HasMax = 1,
+ HasConj = 1,
+ HasBlend = 0,
+ HasSin = EIGEN_FAST_MATH,
+ HasCos = EIGEN_FAST_MATH,
+#if EIGEN_GNUC_AT_LEAST(5, 3) || (!EIGEN_COMP_GNUC_STRICT)
HasLog = 1,
-#endif
+ HasLog1p = 1,
+ HasExpm1 = 1,
+ HasNdtri = 1,
+ HasBessel = 1,
HasExp = 1,
HasSqrt = EIGEN_FAST_MATH,
HasRsqrt = EIGEN_FAST_MATH,
+ HasTanh = EIGEN_FAST_MATH,
+ HasErf = EIGEN_FAST_MATH,
#endif
- HasDiv = 1
+ HasCmp = 1,
+ HasDiv = 1,
+ HasRound = 1,
+ HasFloor = 1,
+ HasCeil = 1,
+ HasRint = 1
};
};
template<> struct packet_traits<double> : default_packet_traits
@@ -74,11 +138,18 @@ template<> struct packet_traits<double> : default_packet_traits
AlignedOnScalar = 1,
size = 8,
HasHalfPacket = 1,
-#if EIGEN_GNUC_AT_LEAST(5, 3)
+#if EIGEN_GNUC_AT_LEAST(5, 3) || (!EIGEN_COMP_GNUC_STRICT)
+ HasLog = 1,
+ HasExp = 1,
HasSqrt = EIGEN_FAST_MATH,
HasRsqrt = EIGEN_FAST_MATH,
#endif
- HasDiv = 1
+ HasCmp = 1,
+ HasDiv = 1,
+ HasRound = 1,
+ HasFloor = 1,
+ HasCeil = 1,
+ HasRint = 1
};
};
@@ -98,19 +169,28 @@ template <>
struct unpacket_traits<Packet16f> {
typedef float type;
typedef Packet8f half;
- enum { size = 16, alignment=Aligned64 };
+ typedef Packet16i integer_packet;
+ typedef uint16_t mask_t;
+ enum { size = 16, alignment=Aligned64, vectorizable=true, masked_load_available=true, masked_store_available=true };
};
template <>
struct unpacket_traits<Packet8d> {
typedef double type;
typedef Packet4d half;
- enum { size = 8, alignment=Aligned64 };
+ enum { size = 8, alignment=Aligned64, vectorizable=true, masked_load_available=false, masked_store_available=false };
};
template <>
struct unpacket_traits<Packet16i> {
typedef int type;
typedef Packet8i half;
- enum { size = 16, alignment=Aligned64 };
+ enum { size = 16, alignment=Aligned64, vectorizable=false, masked_load_available=false, masked_store_available=false };
+};
+
+template<>
+struct unpacket_traits<Packet16h> {
+ typedef Eigen::half type;
+ typedef Packet8h half;
+ enum {size=16, alignment=Aligned32, vectorizable=true, masked_load_available=false, masked_store_available=false};
};
template <>
@@ -127,12 +207,39 @@ EIGEN_STRONG_INLINE Packet16i pset1<Packet16i>(const int& from) {
}
template <>
+EIGEN_STRONG_INLINE Packet16f pset1frombits<Packet16f>(unsigned int from) {
+ return _mm512_castsi512_ps(_mm512_set1_epi32(from));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8d pset1frombits<Packet8d>(const numext::uint64_t from) {
+ return _mm512_castsi512_pd(_mm512_set1_epi64(from));
+}
+
+template<> EIGEN_STRONG_INLINE Packet16f pzero(const Packet16f& /*a*/) { return _mm512_setzero_ps(); }
+template<> EIGEN_STRONG_INLINE Packet8d pzero(const Packet8d& /*a*/) { return _mm512_setzero_pd(); }
+template<> EIGEN_STRONG_INLINE Packet16i pzero(const Packet16i& /*a*/) { return _mm512_setzero_si512(); }
+
+template<> EIGEN_STRONG_INLINE Packet16f peven_mask(const Packet16f& /*a*/) {
+ return _mm512_castsi512_ps(_mm512_set_epi32(0, -1, 0, -1, 0, -1, 0, -1,
+ 0, -1, 0, -1, 0, -1, 0, -1));
+}
+template<> EIGEN_STRONG_INLINE Packet16i peven_mask(const Packet16i& /*a*/) {
+ return _mm512_set_epi32(0, -1, 0, -1, 0, -1, 0, -1,
+ 0, -1, 0, -1, 0, -1, 0, -1);
+}
+template<> EIGEN_STRONG_INLINE Packet8d peven_mask(const Packet8d& /*a*/) {
+ return _mm512_castsi512_pd(_mm512_set_epi32(0, 0, -1, -1, 0, 0, -1, -1,
+ 0, 0, -1, -1, 0, 0, -1, -1));
+}
+
+template <>
EIGEN_STRONG_INLINE Packet16f pload1<Packet16f>(const float* from) {
return _mm512_broadcastss_ps(_mm_load_ps1(from));
}
template <>
EIGEN_STRONG_INLINE Packet8d pload1<Packet8d>(const double* from) {
- return _mm512_broadcastsd_pd(_mm_load_pd1(from));
+ return _mm512_set1_pd(*from);
}
template <>
@@ -158,6 +265,11 @@ EIGEN_STRONG_INLINE Packet8d padd<Packet8d>(const Packet8d& a,
const Packet8d& b) {
return _mm512_add_pd(a, b);
}
+template <>
+EIGEN_STRONG_INLINE Packet16i padd<Packet16i>(const Packet16i& a,
+ const Packet16i& b) {
+ return _mm512_add_epi32(a, b);
+}
template <>
EIGEN_STRONG_INLINE Packet16f psub<Packet16f>(const Packet16f& a,
@@ -169,6 +281,11 @@ EIGEN_STRONG_INLINE Packet8d psub<Packet8d>(const Packet8d& a,
const Packet8d& b) {
return _mm512_sub_pd(a, b);
}
+template <>
+EIGEN_STRONG_INLINE Packet16i psub<Packet16i>(const Packet16i& a,
+ const Packet16i& b) {
+ return _mm512_sub_epi32(a, b);
+}
template <>
EIGEN_STRONG_INLINE Packet16f pnegate(const Packet16f& a) {
@@ -202,6 +319,11 @@ EIGEN_STRONG_INLINE Packet8d pmul<Packet8d>(const Packet8d& a,
const Packet8d& b) {
return _mm512_mul_pd(a, b);
}
+template <>
+EIGEN_STRONG_INLINE Packet16i pmul<Packet16i>(const Packet16i& a,
+ const Packet16i& b) {
+ return _mm512_mullo_epi32(a, b);
+}
template <>
EIGEN_STRONG_INLINE Packet16f pdiv<Packet16f>(const Packet16f& a,
@@ -214,7 +336,7 @@ EIGEN_STRONG_INLINE Packet8d pdiv<Packet8d>(const Packet8d& a,
return _mm512_div_pd(a, b);
}
-#ifdef __FMA__
+#ifdef EIGEN_VECTORIZE_FMA
template <>
EIGEN_STRONG_INLINE Packet16f pmadd(const Packet16f& a, const Packet16f& b,
const Packet16f& c) {
@@ -228,6 +350,24 @@ EIGEN_STRONG_INLINE Packet8d pmadd(const Packet8d& a, const Packet8d& b,
#endif
template <>
+EIGEN_DEVICE_FUNC inline Packet16f pselect(const Packet16f& mask,
+ const Packet16f& a,
+ const Packet16f& b) {
+ __mmask16 mask16 = _mm512_cmp_epi32_mask(
+ _mm512_castps_si512(mask), _mm512_setzero_epi32(), _MM_CMPINT_EQ);
+ return _mm512_mask_blend_ps(mask16, a, b);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline Packet8d pselect(const Packet8d& mask,
+ const Packet8d& a,
+ const Packet8d& b) {
+ __mmask8 mask8 = _mm512_cmp_epi64_mask(_mm512_castpd_si512(mask),
+ _mm512_setzero_epi32(), _MM_CMPINT_EQ);
+ return _mm512_mask_blend_pd(mask8, a, b);
+}
+
+template <>
EIGEN_STRONG_INLINE Packet16f pmin<Packet16f>(const Packet16f& a,
const Packet16f& b) {
// Arguments are reversed to match NaN propagation behavior of std::min.
@@ -253,30 +393,173 @@ EIGEN_STRONG_INLINE Packet8d pmax<Packet8d>(const Packet8d& a,
return _mm512_max_pd(b, a);
}
-template <>
-EIGEN_STRONG_INLINE Packet16f pand<Packet16f>(const Packet16f& a,
- const Packet16f& b) {
+// Add specializations for min/max with prescribed NaN progation.
+template<>
+EIGEN_STRONG_INLINE Packet16f pmin<PropagateNumbers, Packet16f>(const Packet16f& a, const Packet16f& b) {
+ return pminmax_propagate_numbers(a, b, pmin<Packet16f>);
+}
+template<>
+EIGEN_STRONG_INLINE Packet8d pmin<PropagateNumbers, Packet8d>(const Packet8d& a, const Packet8d& b) {
+ return pminmax_propagate_numbers(a, b, pmin<Packet8d>);
+}
+template<>
+EIGEN_STRONG_INLINE Packet16f pmax<PropagateNumbers, Packet16f>(const Packet16f& a, const Packet16f& b) {
+ return pminmax_propagate_numbers(a, b, pmax<Packet16f>);
+}
+template<>
+EIGEN_STRONG_INLINE Packet8d pmax<PropagateNumbers, Packet8d>(const Packet8d& a, const Packet8d& b) {
+ return pminmax_propagate_numbers(a, b, pmax<Packet8d>);
+}
+template<>
+EIGEN_STRONG_INLINE Packet16f pmin<PropagateNaN, Packet16f>(const Packet16f& a, const Packet16f& b) {
+ return pminmax_propagate_nan(a, b, pmin<Packet16f>);
+}
+template<>
+EIGEN_STRONG_INLINE Packet8d pmin<PropagateNaN, Packet8d>(const Packet8d& a, const Packet8d& b) {
+ return pminmax_propagate_nan(a, b, pmin<Packet8d>);
+}
+template<>
+EIGEN_STRONG_INLINE Packet16f pmax<PropagateNaN, Packet16f>(const Packet16f& a, const Packet16f& b) {
+ return pminmax_propagate_nan(a, b, pmax<Packet16f>);
+}
+template<>
+EIGEN_STRONG_INLINE Packet8d pmax<PropagateNaN, Packet8d>(const Packet8d& a, const Packet8d& b) {
+ return pminmax_propagate_nan(a, b, pmax<Packet8d>);
+}
+
+
#ifdef EIGEN_VECTORIZE_AVX512DQ
- return _mm512_and_ps(a, b);
+template<int I_> EIGEN_STRONG_INLINE Packet8f extract256(Packet16f x) { return _mm512_extractf32x8_ps(x,I_); }
+template<int I_> EIGEN_STRONG_INLINE Packet2d extract128(Packet8d x) { return _mm512_extractf64x2_pd(x,I_); }
+EIGEN_STRONG_INLINE Packet16f cat256(Packet8f a, Packet8f b) { return _mm512_insertf32x8(_mm512_castps256_ps512(a),b,1); }
#else
- Packet16f res = _mm512_undefined_ps();
- Packet4f lane0_a = _mm512_extractf32x4_ps(a, 0);
- Packet4f lane0_b = _mm512_extractf32x4_ps(b, 0);
- res = _mm512_insertf32x4(res, _mm_and_ps(lane0_a, lane0_b), 0);
+// AVX512F does not define _mm512_extractf32x8_ps to extract _m256 from _m512
+template<int I_> EIGEN_STRONG_INLINE Packet8f extract256(Packet16f x) {
+ return _mm256_castsi256_ps(_mm512_extracti64x4_epi64( _mm512_castps_si512(x),I_));
+}
- Packet4f lane1_a = _mm512_extractf32x4_ps(a, 1);
- Packet4f lane1_b = _mm512_extractf32x4_ps(b, 1);
- res = _mm512_insertf32x4(res, _mm_and_ps(lane1_a, lane1_b), 1);
+// AVX512F does not define _mm512_extractf64x2_pd to extract _m128 from _m512
+template<int I_> EIGEN_STRONG_INLINE Packet2d extract128(Packet8d x) {
+ return _mm_castsi128_pd(_mm512_extracti32x4_epi32( _mm512_castpd_si512(x),I_));
+}
+
+EIGEN_STRONG_INLINE Packet16f cat256(Packet8f a, Packet8f b) {
+ return _mm512_castsi512_ps(_mm512_inserti64x4(_mm512_castsi256_si512(_mm256_castps_si256(a)),
+ _mm256_castps_si256(b),1));
+}
+#endif
+
+// Helper function for bit packing snippet of low precision comparison.
+// It packs the flags from 32x16 to 16x16.
+EIGEN_STRONG_INLINE __m256i Pack32To16(Packet16f rf) {
+ // Split data into small pieces and handle with AVX instructions
+ // to guarantee internal order of vector.
+ // Operation:
+ // dst[15:0] := Saturate16(rf[31:0])
+ // dst[31:16] := Saturate16(rf[63:32])
+ // ...
+ // dst[255:240] := Saturate16(rf[255:224])
+ __m256i lo = _mm256_castps_si256(extract256<0>(rf));
+ __m256i hi = _mm256_castps_si256(extract256<1>(rf));
+ __m128i result_lo = _mm_packs_epi32(_mm256_extractf128_si256(lo, 0),
+ _mm256_extractf128_si256(lo, 1));
+ __m128i result_hi = _mm_packs_epi32(_mm256_extractf128_si256(hi, 0),
+ _mm256_extractf128_si256(hi, 1));
+ return _mm256_insertf128_si256(_mm256_castsi128_si256(result_lo), result_hi, 1);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16f pcmp_eq(const Packet16f& a, const Packet16f& b) {
+ __mmask16 mask = _mm512_cmp_ps_mask(a, b, _CMP_EQ_OQ);
+ return _mm512_castsi512_ps(
+ _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu));
+}
+template<> EIGEN_STRONG_INLINE Packet16f pcmp_le(const Packet16f& a, const Packet16f& b) {
+ __mmask16 mask = _mm512_cmp_ps_mask(a, b, _CMP_LE_OQ);
+ return _mm512_castsi512_ps(
+ _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu));
+}
+
+template<> EIGEN_STRONG_INLINE Packet16f pcmp_lt(const Packet16f& a, const Packet16f& b) {
+ __mmask16 mask = _mm512_cmp_ps_mask(a, b, _CMP_LT_OQ);
+ return _mm512_castsi512_ps(
+ _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu));
+}
+
+template<> EIGEN_STRONG_INLINE Packet16f pcmp_lt_or_nan(const Packet16f& a, const Packet16f& b) {
+ __mmask16 mask = _mm512_cmp_ps_mask(a, b, _CMP_NGE_UQ);
+ return _mm512_castsi512_ps(
+ _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu));
+}
+
+template<> EIGEN_STRONG_INLINE Packet16i pcmp_eq(const Packet16i& a, const Packet16i& b) {
+ __mmask16 mask = _mm512_cmp_epi32_mask(a, b, _CMP_EQ_OQ);
+ return _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu);
+}
+
+
+template <>
+EIGEN_STRONG_INLINE Packet8d pcmp_eq(const Packet8d& a, const Packet8d& b) {
+ __mmask8 mask = _mm512_cmp_pd_mask(a, b, _CMP_EQ_OQ);
+ return _mm512_castsi512_pd(
+ _mm512_mask_set1_epi64(_mm512_set1_epi64(0), mask, 0xffffffffffffffffu));
+}
+template <>
+EIGEN_STRONG_INLINE Packet8d pcmp_le(const Packet8d& a, const Packet8d& b) {
+ __mmask8 mask = _mm512_cmp_pd_mask(a, b, _CMP_LE_OQ);
+ return _mm512_castsi512_pd(
+ _mm512_mask_set1_epi64(_mm512_set1_epi64(0), mask, 0xffffffffffffffffu));
+}
+template <>
+EIGEN_STRONG_INLINE Packet8d pcmp_lt(const Packet8d& a, const Packet8d& b) {
+ __mmask8 mask = _mm512_cmp_pd_mask(a, b, _CMP_LT_OQ);
+ return _mm512_castsi512_pd(
+ _mm512_mask_set1_epi64(_mm512_set1_epi64(0), mask, 0xffffffffffffffffu));
+}
+template <>
+EIGEN_STRONG_INLINE Packet8d pcmp_lt_or_nan(const Packet8d& a, const Packet8d& b) {
+ __mmask8 mask = _mm512_cmp_pd_mask(a, b, _CMP_NGE_UQ);
+ return _mm512_castsi512_pd(
+ _mm512_mask_set1_epi64(_mm512_set1_epi64(0), mask, 0xffffffffffffffffu));
+}
+
+template<> EIGEN_STRONG_INLINE Packet16f print<Packet16f>(const Packet16f& a) { return _mm512_roundscale_ps(a, _MM_FROUND_CUR_DIRECTION); }
+template<> EIGEN_STRONG_INLINE Packet8d print<Packet8d>(const Packet8d& a) { return _mm512_roundscale_pd(a, _MM_FROUND_CUR_DIRECTION); }
+
+template<> EIGEN_STRONG_INLINE Packet16f pceil<Packet16f>(const Packet16f& a) { return _mm512_roundscale_ps(a, _MM_FROUND_TO_POS_INF); }
+template<> EIGEN_STRONG_INLINE Packet8d pceil<Packet8d>(const Packet8d& a) { return _mm512_roundscale_pd(a, _MM_FROUND_TO_POS_INF); }
+
+template<> EIGEN_STRONG_INLINE Packet16f pfloor<Packet16f>(const Packet16f& a) { return _mm512_roundscale_ps(a, _MM_FROUND_TO_NEG_INF); }
+template<> EIGEN_STRONG_INLINE Packet8d pfloor<Packet8d>(const Packet8d& a) { return _mm512_roundscale_pd(a, _MM_FROUND_TO_NEG_INF); }
- Packet4f lane2_a = _mm512_extractf32x4_ps(a, 2);
- Packet4f lane2_b = _mm512_extractf32x4_ps(b, 2);
- res = _mm512_insertf32x4(res, _mm_and_ps(lane2_a, lane2_b), 2);
+template <>
+EIGEN_STRONG_INLINE Packet16i ptrue<Packet16i>(const Packet16i& /*a*/) {
+ return _mm512_set1_epi32(0xffffffffu);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16f ptrue<Packet16f>(const Packet16f& a) {
+ return _mm512_castsi512_ps(ptrue<Packet16i>(_mm512_castps_si512(a)));
+}
- Packet4f lane3_a = _mm512_extractf32x4_ps(a, 3);
- Packet4f lane3_b = _mm512_extractf32x4_ps(b, 3);
- res = _mm512_insertf32x4(res, _mm_and_ps(lane3_a, lane3_b), 3);
+template <>
+EIGEN_STRONG_INLINE Packet8d ptrue<Packet8d>(const Packet8d& a) {
+ return _mm512_castsi512_pd(ptrue<Packet16i>(_mm512_castpd_si512(a)));
+}
- return res;
+template <>
+EIGEN_STRONG_INLINE Packet16i pand<Packet16i>(const Packet16i& a,
+ const Packet16i& b) {
+ return _mm512_and_si512(a,b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16f pand<Packet16f>(const Packet16f& a,
+ const Packet16f& b) {
+#ifdef EIGEN_VECTORIZE_AVX512DQ
+ return _mm512_and_ps(a, b);
+#else
+ return _mm512_castsi512_ps(pand(_mm512_castps_si512(a),_mm512_castps_si512(b)));
#endif
}
template <>
@@ -292,35 +575,21 @@ EIGEN_STRONG_INLINE Packet8d pand<Packet8d>(const Packet8d& a,
Packet4d lane1_a = _mm512_extractf64x4_pd(a, 1);
Packet4d lane1_b = _mm512_extractf64x4_pd(b, 1);
- res = _mm512_insertf64x4(res, _mm256_and_pd(lane1_a, lane1_b), 1);
-
- return res;
+ return _mm512_insertf64x4(res, _mm256_and_pd(lane1_a, lane1_b), 1);
#endif
}
+
template <>
-EIGEN_STRONG_INLINE Packet16f por<Packet16f>(const Packet16f& a,
- const Packet16f& b) {
+EIGEN_STRONG_INLINE Packet16i por<Packet16i>(const Packet16i& a, const Packet16i& b) {
+ return _mm512_or_si512(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16f por<Packet16f>(const Packet16f& a, const Packet16f& b) {
#ifdef EIGEN_VECTORIZE_AVX512DQ
return _mm512_or_ps(a, b);
#else
- Packet16f res = _mm512_undefined_ps();
- Packet4f lane0_a = _mm512_extractf32x4_ps(a, 0);
- Packet4f lane0_b = _mm512_extractf32x4_ps(b, 0);
- res = _mm512_insertf32x4(res, _mm_or_ps(lane0_a, lane0_b), 0);
-
- Packet4f lane1_a = _mm512_extractf32x4_ps(a, 1);
- Packet4f lane1_b = _mm512_extractf32x4_ps(b, 1);
- res = _mm512_insertf32x4(res, _mm_or_ps(lane1_a, lane1_b), 1);
-
- Packet4f lane2_a = _mm512_extractf32x4_ps(a, 2);
- Packet4f lane2_b = _mm512_extractf32x4_ps(b, 2);
- res = _mm512_insertf32x4(res, _mm_or_ps(lane2_a, lane2_b), 2);
-
- Packet4f lane3_a = _mm512_extractf32x4_ps(a, 3);
- Packet4f lane3_b = _mm512_extractf32x4_ps(b, 3);
- res = _mm512_insertf32x4(res, _mm_or_ps(lane3_a, lane3_b), 3);
-
- return res;
+ return _mm512_castsi512_ps(por(_mm512_castps_si512(a),_mm512_castps_si512(b)));
#endif
}
@@ -330,107 +599,80 @@ EIGEN_STRONG_INLINE Packet8d por<Packet8d>(const Packet8d& a,
#ifdef EIGEN_VECTORIZE_AVX512DQ
return _mm512_or_pd(a, b);
#else
- Packet8d res = _mm512_undefined_pd();
- Packet4d lane0_a = _mm512_extractf64x4_pd(a, 0);
- Packet4d lane0_b = _mm512_extractf64x4_pd(b, 0);
- res = _mm512_insertf64x4(res, _mm256_or_pd(lane0_a, lane0_b), 0);
-
- Packet4d lane1_a = _mm512_extractf64x4_pd(a, 1);
- Packet4d lane1_b = _mm512_extractf64x4_pd(b, 1);
- res = _mm512_insertf64x4(res, _mm256_or_pd(lane1_a, lane1_b), 1);
-
- return res;
+ return _mm512_castsi512_pd(por(_mm512_castpd_si512(a),_mm512_castpd_si512(b)));
#endif
}
template <>
-EIGEN_STRONG_INLINE Packet16f pxor<Packet16f>(const Packet16f& a,
- const Packet16f& b) {
+EIGEN_STRONG_INLINE Packet16i pxor<Packet16i>(const Packet16i& a, const Packet16i& b) {
+ return _mm512_xor_si512(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16f pxor<Packet16f>(const Packet16f& a, const Packet16f& b) {
#ifdef EIGEN_VECTORIZE_AVX512DQ
return _mm512_xor_ps(a, b);
#else
- Packet16f res = _mm512_undefined_ps();
- Packet4f lane0_a = _mm512_extractf32x4_ps(a, 0);
- Packet4f lane0_b = _mm512_extractf32x4_ps(b, 0);
- res = _mm512_insertf32x4(res, _mm_xor_ps(lane0_a, lane0_b), 0);
-
- Packet4f lane1_a = _mm512_extractf32x4_ps(a, 1);
- Packet4f lane1_b = _mm512_extractf32x4_ps(b, 1);
- res = _mm512_insertf32x4(res, _mm_xor_ps(lane1_a, lane1_b), 1);
-
- Packet4f lane2_a = _mm512_extractf32x4_ps(a, 2);
- Packet4f lane2_b = _mm512_extractf32x4_ps(b, 2);
- res = _mm512_insertf32x4(res, _mm_xor_ps(lane2_a, lane2_b), 2);
-
- Packet4f lane3_a = _mm512_extractf32x4_ps(a, 3);
- Packet4f lane3_b = _mm512_extractf32x4_ps(b, 3);
- res = _mm512_insertf32x4(res, _mm_xor_ps(lane3_a, lane3_b), 3);
-
- return res;
+ return _mm512_castsi512_ps(pxor(_mm512_castps_si512(a),_mm512_castps_si512(b)));
#endif
}
+
template <>
-EIGEN_STRONG_INLINE Packet8d pxor<Packet8d>(const Packet8d& a,
- const Packet8d& b) {
+EIGEN_STRONG_INLINE Packet8d pxor<Packet8d>(const Packet8d& a, const Packet8d& b) {
#ifdef EIGEN_VECTORIZE_AVX512DQ
return _mm512_xor_pd(a, b);
#else
- Packet8d res = _mm512_undefined_pd();
- Packet4d lane0_a = _mm512_extractf64x4_pd(a, 0);
- Packet4d lane0_b = _mm512_extractf64x4_pd(b, 0);
- res = _mm512_insertf64x4(res, _mm256_xor_pd(lane0_a, lane0_b), 0);
-
- Packet4d lane1_a = _mm512_extractf64x4_pd(a, 1);
- Packet4d lane1_b = _mm512_extractf64x4_pd(b, 1);
- res = _mm512_insertf64x4(res, _mm256_xor_pd(lane1_a, lane1_b), 1);
-
- return res;
+ return _mm512_castsi512_pd(pxor(_mm512_castpd_si512(a),_mm512_castpd_si512(b)));
#endif
}
template <>
-EIGEN_STRONG_INLINE Packet16f pandnot<Packet16f>(const Packet16f& a,
- const Packet16f& b) {
+EIGEN_STRONG_INLINE Packet16i pandnot<Packet16i>(const Packet16i& a, const Packet16i& b) {
+ return _mm512_andnot_si512(b, a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16f pandnot<Packet16f>(const Packet16f& a, const Packet16f& b) {
#ifdef EIGEN_VECTORIZE_AVX512DQ
- return _mm512_andnot_ps(a, b);
+ return _mm512_andnot_ps(b, a);
#else
- Packet16f res = _mm512_undefined_ps();
- Packet4f lane0_a = _mm512_extractf32x4_ps(a, 0);
- Packet4f lane0_b = _mm512_extractf32x4_ps(b, 0);
- res = _mm512_insertf32x4(res, _mm_andnot_ps(lane0_a, lane0_b), 0);
-
- Packet4f lane1_a = _mm512_extractf32x4_ps(a, 1);
- Packet4f lane1_b = _mm512_extractf32x4_ps(b, 1);
- res = _mm512_insertf32x4(res, _mm_andnot_ps(lane1_a, lane1_b), 1);
-
- Packet4f lane2_a = _mm512_extractf32x4_ps(a, 2);
- Packet4f lane2_b = _mm512_extractf32x4_ps(b, 2);
- res = _mm512_insertf32x4(res, _mm_andnot_ps(lane2_a, lane2_b), 2);
-
- Packet4f lane3_a = _mm512_extractf32x4_ps(a, 3);
- Packet4f lane3_b = _mm512_extractf32x4_ps(b, 3);
- res = _mm512_insertf32x4(res, _mm_andnot_ps(lane3_a, lane3_b), 3);
-
- return res;
+ return _mm512_castsi512_ps(pandnot(_mm512_castps_si512(a),_mm512_castps_si512(b)));
#endif
}
template <>
-EIGEN_STRONG_INLINE Packet8d pandnot<Packet8d>(const Packet8d& a,
- const Packet8d& b) {
+EIGEN_STRONG_INLINE Packet8d pandnot<Packet8d>(const Packet8d& a,const Packet8d& b) {
#ifdef EIGEN_VECTORIZE_AVX512DQ
- return _mm512_andnot_pd(a, b);
+ return _mm512_andnot_pd(b, a);
#else
- Packet8d res = _mm512_undefined_pd();
- Packet4d lane0_a = _mm512_extractf64x4_pd(a, 0);
- Packet4d lane0_b = _mm512_extractf64x4_pd(b, 0);
- res = _mm512_insertf64x4(res, _mm256_andnot_pd(lane0_a, lane0_b), 0);
+ return _mm512_castsi512_pd(pandnot(_mm512_castpd_si512(a),_mm512_castpd_si512(b)));
+#endif
+}
- Packet4d lane1_a = _mm512_extractf64x4_pd(a, 1);
- Packet4d lane1_b = _mm512_extractf64x4_pd(b, 1);
- res = _mm512_insertf64x4(res, _mm256_andnot_pd(lane1_a, lane1_b), 1);
+template<> EIGEN_STRONG_INLINE Packet16f pround<Packet16f>(const Packet16f& a)
+{
+ // Work-around for default std::round rounding mode.
+ const Packet16f mask = pset1frombits<Packet16f>(static_cast<numext::uint32_t>(0x80000000u));
+ const Packet16f prev0dot5 = pset1frombits<Packet16f>(static_cast<numext::uint32_t>(0x3EFFFFFFu));
+ return _mm512_roundscale_ps(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
+}
+template<> EIGEN_STRONG_INLINE Packet8d pround<Packet8d>(const Packet8d& a)
+{
+ // Work-around for default std::round rounding mode.
+ const Packet8d mask = pset1frombits<Packet8d>(static_cast<numext::uint64_t>(0x8000000000000000ull));
+ const Packet8d prev0dot5 = pset1frombits<Packet8d>(static_cast<numext::uint64_t>(0x3FDFFFFFFFFFFFFFull));
+ return _mm512_roundscale_pd(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
+}
- return res;
-#endif
+template<int N> EIGEN_STRONG_INLINE Packet16i parithmetic_shift_right(Packet16i a) {
+ return _mm512_srai_epi32(a, N);
+}
+
+template<int N> EIGEN_STRONG_INLINE Packet16i plogical_shift_right(Packet16i a) {
+ return _mm512_srli_epi32(a, N);
+}
+
+template<int N> EIGEN_STRONG_INLINE Packet16i plogical_shift_left(Packet16i a) {
+ return _mm512_slli_epi32(a, N);
}
template <>
@@ -461,15 +703,26 @@ EIGEN_STRONG_INLINE Packet16i ploadu<Packet16i>(const int* from) {
reinterpret_cast<const __m512i*>(from));
}
+template <>
+EIGEN_STRONG_INLINE Packet16f ploadu<Packet16f>(const float* from, uint16_t umask) {
+ __mmask16 mask = static_cast<__mmask16>(umask);
+ EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_maskz_loadu_ps(mask, from);
+}
+
// Loads 8 floats from memory a returns the packet
// {a0, a0 a1, a1, a2, a2, a3, a3, a4, a4, a5, a5, a6, a6, a7, a7}
template <>
EIGEN_STRONG_INLINE Packet16f ploaddup<Packet16f>(const float* from) {
- __m256i low_half = _mm256_load_si256(reinterpret_cast<const __m256i*>(from));
+ // an unaligned load is required here as there is no requirement
+ // on the alignment of input pointer 'from'
+ __m256i low_half = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from));
__m512 even_elements = _mm512_castsi512_ps(_mm512_cvtepu32_epi64(low_half));
__m512 pairs = _mm512_permute_ps(even_elements, _MM_SHUFFLE(2, 2, 0, 0));
return pairs;
}
+
+#ifdef EIGEN_VECTORIZE_AVX512DQ
+// FIXME: this does not look optimal, better load a Packet4d and shuffle...
// Loads 4 doubles from memory a returns the packet {a0, a0 a1, a1, a2, a2, a3,
// a3}
template <>
@@ -481,26 +734,33 @@ EIGEN_STRONG_INLINE Packet8d ploaddup<Packet8d>(const double* from) {
x = _mm512_insertf64x2(x, _mm_loaddup_pd(&from[3]), 3);
return x;
}
+#else
+template <>
+EIGEN_STRONG_INLINE Packet8d ploaddup<Packet8d>(const double* from) {
+ __m512d x = _mm512_setzero_pd();
+ x = _mm512_mask_broadcastsd_pd(x, 0x3<<0, _mm_load_sd(from+0));
+ x = _mm512_mask_broadcastsd_pd(x, 0x3<<2, _mm_load_sd(from+1));
+ x = _mm512_mask_broadcastsd_pd(x, 0x3<<4, _mm_load_sd(from+2));
+ x = _mm512_mask_broadcastsd_pd(x, 0x3<<6, _mm_load_sd(from+3));
+ return x;
+}
+#endif
// Loads 4 floats from memory a returns the packet
// {a0, a0 a0, a0, a1, a1, a1, a1, a2, a2, a2, a2, a3, a3, a3, a3}
template <>
EIGEN_STRONG_INLINE Packet16f ploadquad<Packet16f>(const float* from) {
- Packet16f tmp = _mm512_undefined_ps();
- tmp = _mm512_insertf32x4(tmp, _mm_load_ps1(from), 0);
- tmp = _mm512_insertf32x4(tmp, _mm_load_ps1(from + 1), 1);
- tmp = _mm512_insertf32x4(tmp, _mm_load_ps1(from + 2), 2);
- tmp = _mm512_insertf32x4(tmp, _mm_load_ps1(from + 3), 3);
- return tmp;
+ Packet16f tmp = _mm512_castps128_ps512(ploadu<Packet4f>(from));
+ const Packet16i scatter_mask = _mm512_set_epi32(3,3,3,3, 2,2,2,2, 1,1,1,1, 0,0,0,0);
+ return _mm512_permutexvar_ps(scatter_mask, tmp);
}
+
// Loads 2 doubles from memory a returns the packet
// {a0, a0 a0, a0, a1, a1, a1, a1}
template <>
EIGEN_STRONG_INLINE Packet8d ploadquad<Packet8d>(const double* from) {
- __m128d tmp0 = _mm_load_pd1(from);
- __m256d lane0 = _mm256_broadcastsd_pd(tmp0);
- __m128d tmp1 = _mm_load_pd1(from + 1);
- __m256d lane1 = _mm256_broadcastsd_pd(tmp1);
+ __m256d lane0 = _mm256_set1_pd(*from);
+ __m256d lane1 = _mm256_set1_pd(*(from+1));
__m512d tmp = _mm512_undefined_pd();
tmp = _mm512_insertf64x4(tmp, lane0, 0);
return _mm512_insertf64x4(tmp, lane1, 1);
@@ -533,11 +793,16 @@ EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet16i& from) {
EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_si512(
reinterpret_cast<__m512i*>(to), from);
}
+template <>
+EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet16f& from, uint16_t umask) {
+ __mmask16 mask = static_cast<__mmask16>(umask);
+ EIGEN_DEBUG_UNALIGNED_STORE return _mm512_mask_storeu_ps(to, mask, from);
+}
template <>
EIGEN_DEVICE_FUNC inline Packet16f pgather<float, Packet16f>(const float* from,
Index stride) {
- Packet16i stride_vector = _mm512_set1_epi32(stride);
+ Packet16i stride_vector = _mm512_set1_epi32(convert_index<int>(stride));
Packet16i stride_multiplier =
_mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
Packet16i indices = _mm512_mullo_epi32(stride_vector, stride_multiplier);
@@ -547,7 +812,7 @@ EIGEN_DEVICE_FUNC inline Packet16f pgather<float, Packet16f>(const float* from,
template <>
EIGEN_DEVICE_FUNC inline Packet8d pgather<double, Packet8d>(const double* from,
Index stride) {
- Packet8i stride_vector = _mm256_set1_epi32(stride);
+ Packet8i stride_vector = _mm256_set1_epi32(convert_index<int>(stride));
Packet8i stride_multiplier = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0);
Packet8i indices = _mm256_mullo_epi32(stride_vector, stride_multiplier);
@@ -558,7 +823,7 @@ template <>
EIGEN_DEVICE_FUNC inline void pscatter<float, Packet16f>(float* to,
const Packet16f& from,
Index stride) {
- Packet16i stride_vector = _mm512_set1_epi32(stride);
+ Packet16i stride_vector = _mm512_set1_epi32(convert_index<int>(stride));
Packet16i stride_multiplier =
_mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
Packet16i indices = _mm512_mullo_epi32(stride_vector, stride_multiplier);
@@ -568,7 +833,7 @@ template <>
EIGEN_DEVICE_FUNC inline void pscatter<double, Packet8d>(double* to,
const Packet8d& from,
Index stride) {
- Packet8i stride_vector = _mm256_set1_epi32(stride);
+ Packet8i stride_vector = _mm256_set1_epi32(convert_index<int>(stride));
Packet8i stride_multiplier = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0);
Packet8i indices = _mm256_mullo_epi32(stride_vector, stride_multiplier);
_mm512_i32scatter_pd(to, indices, from, 8);
@@ -590,9 +855,9 @@ EIGEN_STRONG_INLINE void pstore1<Packet16i>(int* to, const int& a) {
pstore(to, pa);
}
-template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
-template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
-template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
+template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
+template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
+template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
template <>
EIGEN_STRONG_INLINE float pfirst<Packet16f>(const Packet16f& a) {
@@ -620,13 +885,66 @@ template<> EIGEN_STRONG_INLINE Packet8d preverse(const Packet8d& a)
template<> EIGEN_STRONG_INLINE Packet16f pabs(const Packet16f& a)
{
// _mm512_abs_ps intrinsic not found, so hack around it
- return (__m512)_mm512_and_si512((__m512i)a, _mm512_set1_epi32(0x7fffffff));
+ return _mm512_castsi512_ps(_mm512_and_si512(_mm512_castps_si512(a), _mm512_set1_epi32(0x7fffffff)));
}
template <>
EIGEN_STRONG_INLINE Packet8d pabs(const Packet8d& a) {
// _mm512_abs_ps intrinsic not found, so hack around it
- return (__m512d)_mm512_and_si512((__m512i)a,
- _mm512_set1_epi64(0x7fffffffffffffff));
+ return _mm512_castsi512_pd(_mm512_and_si512(_mm512_castpd_si512(a),
+ _mm512_set1_epi64(0x7fffffffffffffff)));
+}
+
+template<>
+EIGEN_STRONG_INLINE Packet16f pfrexp<Packet16f>(const Packet16f& a, Packet16f& exponent){
+ return pfrexp_generic(a, exponent);
+}
+
+// Extract exponent without existence of Packet8l.
+template<>
+EIGEN_STRONG_INLINE
+Packet8d pfrexp_generic_get_biased_exponent(const Packet8d& a) {
+ const Packet8d cst_exp_mask = pset1frombits<Packet8d>(static_cast<uint64_t>(0x7ff0000000000000ull));
+ #ifdef EIGEN_VECTORIZE_AVX512DQ
+ return _mm512_cvtepi64_pd(_mm512_srli_epi64(_mm512_castpd_si512(pand(a, cst_exp_mask)), 52));
+ #else
+ return _mm512_cvtepi32_pd(_mm512_cvtepi64_epi32(_mm512_srli_epi64(_mm512_castpd_si512(pand(a, cst_exp_mask)), 52)));
+ #endif
+}
+
+template<>
+EIGEN_STRONG_INLINE Packet8d pfrexp<Packet8d>(const Packet8d& a, Packet8d& exponent) {
+ return pfrexp_generic(a, exponent);
+}
+
+template<> EIGEN_STRONG_INLINE Packet16f pldexp<Packet16f>(const Packet16f& a, const Packet16f& exponent) {
+ return pldexp_generic(a, exponent);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8d pldexp<Packet8d>(const Packet8d& a, const Packet8d& exponent) {
+ // Clamp exponent to [-2099, 2099]
+ const Packet8d max_exponent = pset1<Packet8d>(2099.0);
+ const Packet8i e = _mm512_cvtpd_epi32(pmin(pmax(exponent, pnegate(max_exponent)), max_exponent));
+
+ // Split 2^e into four factors and multiply.
+ const Packet8i bias = pset1<Packet8i>(1023);
+ Packet8i b = parithmetic_shift_right<2>(e); // floor(e/4)
+
+ // 2^b
+ const Packet8i permute_idx = _mm256_setr_epi32(0, 4, 1, 5, 2, 6, 3, 7);
+ Packet8i hi = _mm256_permutevar8x32_epi32(padd(b, bias), permute_idx);
+ Packet8i lo = _mm256_slli_epi64(hi, 52);
+ hi = _mm256_slli_epi64(_mm256_srli_epi64(hi, 32), 52);
+ Packet8d c = _mm512_castsi512_pd(_mm512_inserti64x4(_mm512_castsi256_si512(lo), hi, 1));
+ Packet8d out = pmul(pmul(pmul(a, c), c), c); // a * 2^(3b)
+
+ // 2^(e - 3b)
+ b = psub(psub(psub(e, b), b), b); // e - 3b
+ hi = _mm256_permutevar8x32_epi32(padd(b, bias), permute_idx);
+ lo = _mm256_slli_epi64(hi, 52);
+ hi = _mm256_slli_epi64(_mm256_srli_epi64(hi, 32), 52);
+ c = _mm512_castsi512_pd(_mm512_inserti64x4(_mm512_castsi256_si512(lo), hi, 1));
+ out = pmul(out, c); // a * 2^e
+ return out;
}
#ifdef EIGEN_VECTORIZE_AVX512DQ
@@ -646,205 +964,15 @@ EIGEN_STRONG_INLINE Packet8d pabs(const Packet8d& a) {
#ifdef EIGEN_VECTORIZE_AVX512DQ
#define EIGEN_INSERT_8f_INTO_16f(OUTPUT, INPUTA, INPUTB) \
- OUTPUT = _mm512_insertf32x8(OUTPUT, INPUTA, 0); \
- OUTPUT = _mm512_insertf32x8(OUTPUT, INPUTB, 1);
+ OUTPUT = _mm512_insertf32x8(_mm512_castps256_ps512(INPUTA), INPUTB, 1);
#else
#define EIGEN_INSERT_8f_INTO_16f(OUTPUT, INPUTA, INPUTB) \
+ OUTPUT = _mm512_undefined_ps(); \
OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTA, 0), 0); \
OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTA, 1), 1); \
OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTB, 0), 2); \
OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTB, 1), 3);
#endif
-template<> EIGEN_STRONG_INLINE Packet16f preduxp<Packet16f>(const Packet16f*
-vecs)
-{
- EIGEN_EXTRACT_8f_FROM_16f(vecs[0], vecs0);
- EIGEN_EXTRACT_8f_FROM_16f(vecs[1], vecs1);
- EIGEN_EXTRACT_8f_FROM_16f(vecs[2], vecs2);
- EIGEN_EXTRACT_8f_FROM_16f(vecs[3], vecs3);
- EIGEN_EXTRACT_8f_FROM_16f(vecs[4], vecs4);
- EIGEN_EXTRACT_8f_FROM_16f(vecs[5], vecs5);
- EIGEN_EXTRACT_8f_FROM_16f(vecs[6], vecs6);
- EIGEN_EXTRACT_8f_FROM_16f(vecs[7], vecs7);
- EIGEN_EXTRACT_8f_FROM_16f(vecs[8], vecs8);
- EIGEN_EXTRACT_8f_FROM_16f(vecs[9], vecs9);
- EIGEN_EXTRACT_8f_FROM_16f(vecs[10], vecs10);
- EIGEN_EXTRACT_8f_FROM_16f(vecs[11], vecs11);
- EIGEN_EXTRACT_8f_FROM_16f(vecs[12], vecs12);
- EIGEN_EXTRACT_8f_FROM_16f(vecs[13], vecs13);
- EIGEN_EXTRACT_8f_FROM_16f(vecs[14], vecs14);
- EIGEN_EXTRACT_8f_FROM_16f(vecs[15], vecs15);
-
- __m256 hsum1 = _mm256_hadd_ps(vecs0_0, vecs1_0);
- __m256 hsum2 = _mm256_hadd_ps(vecs2_0, vecs3_0);
- __m256 hsum3 = _mm256_hadd_ps(vecs4_0, vecs5_0);
- __m256 hsum4 = _mm256_hadd_ps(vecs6_0, vecs7_0);
-
- __m256 hsum5 = _mm256_hadd_ps(hsum1, hsum1);
- __m256 hsum6 = _mm256_hadd_ps(hsum2, hsum2);
- __m256 hsum7 = _mm256_hadd_ps(hsum3, hsum3);
- __m256 hsum8 = _mm256_hadd_ps(hsum4, hsum4);
-
- __m256 perm1 = _mm256_permute2f128_ps(hsum5, hsum5, 0x23);
- __m256 perm2 = _mm256_permute2f128_ps(hsum6, hsum6, 0x23);
- __m256 perm3 = _mm256_permute2f128_ps(hsum7, hsum7, 0x23);
- __m256 perm4 = _mm256_permute2f128_ps(hsum8, hsum8, 0x23);
-
- __m256 sum1 = _mm256_add_ps(perm1, hsum5);
- __m256 sum2 = _mm256_add_ps(perm2, hsum6);
- __m256 sum3 = _mm256_add_ps(perm3, hsum7);
- __m256 sum4 = _mm256_add_ps(perm4, hsum8);
-
- __m256 blend1 = _mm256_blend_ps(sum1, sum2, 0xcc);
- __m256 blend2 = _mm256_blend_ps(sum3, sum4, 0xcc);
-
- __m256 final = _mm256_blend_ps(blend1, blend2, 0xf0);
-
- hsum1 = _mm256_hadd_ps(vecs0_1, vecs1_1);
- hsum2 = _mm256_hadd_ps(vecs2_1, vecs3_1);
- hsum3 = _mm256_hadd_ps(vecs4_1, vecs5_1);
- hsum4 = _mm256_hadd_ps(vecs6_1, vecs7_1);
-
- hsum5 = _mm256_hadd_ps(hsum1, hsum1);
- hsum6 = _mm256_hadd_ps(hsum2, hsum2);
- hsum7 = _mm256_hadd_ps(hsum3, hsum3);
- hsum8 = _mm256_hadd_ps(hsum4, hsum4);
-
- perm1 = _mm256_permute2f128_ps(hsum5, hsum5, 0x23);
- perm2 = _mm256_permute2f128_ps(hsum6, hsum6, 0x23);
- perm3 = _mm256_permute2f128_ps(hsum7, hsum7, 0x23);
- perm4 = _mm256_permute2f128_ps(hsum8, hsum8, 0x23);
-
- sum1 = _mm256_add_ps(perm1, hsum5);
- sum2 = _mm256_add_ps(perm2, hsum6);
- sum3 = _mm256_add_ps(perm3, hsum7);
- sum4 = _mm256_add_ps(perm4, hsum8);
-
- blend1 = _mm256_blend_ps(sum1, sum2, 0xcc);
- blend2 = _mm256_blend_ps(sum3, sum4, 0xcc);
-
- final = _mm256_add_ps(final, _mm256_blend_ps(blend1, blend2, 0xf0));
-
- hsum1 = _mm256_hadd_ps(vecs8_0, vecs9_0);
- hsum2 = _mm256_hadd_ps(vecs10_0, vecs11_0);
- hsum3 = _mm256_hadd_ps(vecs12_0, vecs13_0);
- hsum4 = _mm256_hadd_ps(vecs14_0, vecs15_0);
-
- hsum5 = _mm256_hadd_ps(hsum1, hsum1);
- hsum6 = _mm256_hadd_ps(hsum2, hsum2);
- hsum7 = _mm256_hadd_ps(hsum3, hsum3);
- hsum8 = _mm256_hadd_ps(hsum4, hsum4);
-
- perm1 = _mm256_permute2f128_ps(hsum5, hsum5, 0x23);
- perm2 = _mm256_permute2f128_ps(hsum6, hsum6, 0x23);
- perm3 = _mm256_permute2f128_ps(hsum7, hsum7, 0x23);
- perm4 = _mm256_permute2f128_ps(hsum8, hsum8, 0x23);
-
- sum1 = _mm256_add_ps(perm1, hsum5);
- sum2 = _mm256_add_ps(perm2, hsum6);
- sum3 = _mm256_add_ps(perm3, hsum7);
- sum4 = _mm256_add_ps(perm4, hsum8);
-
- blend1 = _mm256_blend_ps(sum1, sum2, 0xcc);
- blend2 = _mm256_blend_ps(sum3, sum4, 0xcc);
-
- __m256 final_1 = _mm256_blend_ps(blend1, blend2, 0xf0);
-
- hsum1 = _mm256_hadd_ps(vecs8_1, vecs9_1);
- hsum2 = _mm256_hadd_ps(vecs10_1, vecs11_1);
- hsum3 = _mm256_hadd_ps(vecs12_1, vecs13_1);
- hsum4 = _mm256_hadd_ps(vecs14_1, vecs15_1);
-
- hsum5 = _mm256_hadd_ps(hsum1, hsum1);
- hsum6 = _mm256_hadd_ps(hsum2, hsum2);
- hsum7 = _mm256_hadd_ps(hsum3, hsum3);
- hsum8 = _mm256_hadd_ps(hsum4, hsum4);
-
- perm1 = _mm256_permute2f128_ps(hsum5, hsum5, 0x23);
- perm2 = _mm256_permute2f128_ps(hsum6, hsum6, 0x23);
- perm3 = _mm256_permute2f128_ps(hsum7, hsum7, 0x23);
- perm4 = _mm256_permute2f128_ps(hsum8, hsum8, 0x23);
-
- sum1 = _mm256_add_ps(perm1, hsum5);
- sum2 = _mm256_add_ps(perm2, hsum6);
- sum3 = _mm256_add_ps(perm3, hsum7);
- sum4 = _mm256_add_ps(perm4, hsum8);
-
- blend1 = _mm256_blend_ps(sum1, sum2, 0xcc);
- blend2 = _mm256_blend_ps(sum3, sum4, 0xcc);
-
- final_1 = _mm256_add_ps(final_1, _mm256_blend_ps(blend1, blend2, 0xf0));
-
- __m512 final_output;
-
- EIGEN_INSERT_8f_INTO_16f(final_output, final, final_1);
- return final_output;
-}
-
-template<> EIGEN_STRONG_INLINE Packet8d preduxp<Packet8d>(const Packet8d* vecs)
-{
- Packet4d vecs0_0 = _mm512_extractf64x4_pd(vecs[0], 0);
- Packet4d vecs0_1 = _mm512_extractf64x4_pd(vecs[0], 1);
-
- Packet4d vecs1_0 = _mm512_extractf64x4_pd(vecs[1], 0);
- Packet4d vecs1_1 = _mm512_extractf64x4_pd(vecs[1], 1);
-
- Packet4d vecs2_0 = _mm512_extractf64x4_pd(vecs[2], 0);
- Packet4d vecs2_1 = _mm512_extractf64x4_pd(vecs[2], 1);
-
- Packet4d vecs3_0 = _mm512_extractf64x4_pd(vecs[3], 0);
- Packet4d vecs3_1 = _mm512_extractf64x4_pd(vecs[3], 1);
-
- Packet4d vecs4_0 = _mm512_extractf64x4_pd(vecs[4], 0);
- Packet4d vecs4_1 = _mm512_extractf64x4_pd(vecs[4], 1);
-
- Packet4d vecs5_0 = _mm512_extractf64x4_pd(vecs[5], 0);
- Packet4d vecs5_1 = _mm512_extractf64x4_pd(vecs[5], 1);
-
- Packet4d vecs6_0 = _mm512_extractf64x4_pd(vecs[6], 0);
- Packet4d vecs6_1 = _mm512_extractf64x4_pd(vecs[6], 1);
-
- Packet4d vecs7_0 = _mm512_extractf64x4_pd(vecs[7], 0);
- Packet4d vecs7_1 = _mm512_extractf64x4_pd(vecs[7], 1);
-
- Packet4d tmp0, tmp1;
-
- tmp0 = _mm256_hadd_pd(vecs0_0, vecs1_0);
- tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1));
-
- tmp1 = _mm256_hadd_pd(vecs2_0, vecs3_0);
- tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1));
-
- __m256d final_0 = _mm256_blend_pd(tmp0, tmp1, 0xC);
-
- tmp0 = _mm256_hadd_pd(vecs0_1, vecs1_1);
- tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1));
-
- tmp1 = _mm256_hadd_pd(vecs2_1, vecs3_1);
- tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1));
-
- final_0 = _mm256_add_pd(final_0, _mm256_blend_pd(tmp0, tmp1, 0xC));
-
- tmp0 = _mm256_hadd_pd(vecs4_0, vecs5_0);
- tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1));
-
- tmp1 = _mm256_hadd_pd(vecs6_0, vecs7_0);
- tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1));
-
- __m256d final_1 = _mm256_blend_pd(tmp0, tmp1, 0xC);
-
- tmp0 = _mm256_hadd_pd(vecs4_1, vecs5_1);
- tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1));
-
- tmp1 = _mm256_hadd_pd(vecs6_1, vecs7_1);
- tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1));
-
- final_1 = _mm256_add_pd(final_1, _mm256_blend_pd(tmp0, tmp1, 0xC));
-
- __m512d final_output = _mm512_insertf64x4(final_output, final_0, 0);
-
- return _mm512_insertf64x4(final_output, final_1, 1);
-}
template <>
EIGEN_STRONG_INLINE float predux<Packet16f>(const Packet16f& a) {
@@ -874,7 +1002,7 @@ EIGEN_STRONG_INLINE double predux<Packet8d>(const Packet8d& a) {
}
template <>
-EIGEN_STRONG_INLINE Packet8f predux_downto4<Packet16f>(const Packet16f& a) {
+EIGEN_STRONG_INLINE Packet8f predux_half_dowto4<Packet16f>(const Packet16f& a) {
#ifdef EIGEN_VECTORIZE_AVX512DQ
__m256 lane0 = _mm512_extractf32x8_ps(a, 0);
__m256 lane1 = _mm512_extractf32x8_ps(a, 1);
@@ -890,11 +1018,10 @@ EIGEN_STRONG_INLINE Packet8f predux_downto4<Packet16f>(const Packet16f& a) {
#endif
}
template <>
-EIGEN_STRONG_INLINE Packet4d predux_downto4<Packet8d>(const Packet8d& a) {
+EIGEN_STRONG_INLINE Packet4d predux_half_dowto4<Packet8d>(const Packet8d& a) {
__m256d lane0 = _mm512_extractf64x4_pd(a, 0);
__m256d lane1 = _mm512_extractf64x4_pd(a, 1);
- __m256d res = _mm256_add_pd(lane0, lane1);
- return res;
+ return _mm256_add_pd(lane0, lane1);
}
template <>
@@ -965,52 +1092,13 @@ EIGEN_STRONG_INLINE double predux_max<Packet8d>(const Packet8d& a) {
return pfirst(_mm256_max_pd(res, _mm256_shuffle_pd(res, res, 1)));
}
-template <int Offset>
-struct palign_impl<Offset, Packet16f> {
- static EIGEN_STRONG_INLINE void run(Packet16f& first,
- const Packet16f& second) {
- if (Offset != 0) {
- __m512i first_idx = _mm512_set_epi32(
- Offset + 15, Offset + 14, Offset + 13, Offset + 12, Offset + 11,
- Offset + 10, Offset + 9, Offset + 8, Offset + 7, Offset + 6,
- Offset + 5, Offset + 4, Offset + 3, Offset + 2, Offset + 1, Offset);
-
- __m512i second_idx =
- _mm512_set_epi32(Offset - 1, Offset - 2, Offset - 3, Offset - 4,
- Offset - 5, Offset - 6, Offset - 7, Offset - 8,
- Offset - 9, Offset - 10, Offset - 11, Offset - 12,
- Offset - 13, Offset - 14, Offset - 15, Offset - 16);
-
- unsigned short mask = 0xFFFF;
- mask <<= (16 - Offset);
-
- first = _mm512_permutexvar_ps(first_idx, first);
- Packet16f tmp = _mm512_permutexvar_ps(second_idx, second);
- first = _mm512_mask_blend_ps(mask, first, tmp);
- }
- }
-};
-template <int Offset>
-struct palign_impl<Offset, Packet8d> {
- static EIGEN_STRONG_INLINE void run(Packet8d& first, const Packet8d& second) {
- if (Offset != 0) {
- __m512i first_idx = _mm512_set_epi32(
- 0, Offset + 7, 0, Offset + 6, 0, Offset + 5, 0, Offset + 4, 0,
- Offset + 3, 0, Offset + 2, 0, Offset + 1, 0, Offset);
-
- __m512i second_idx = _mm512_set_epi32(
- 0, Offset - 1, 0, Offset - 2, 0, Offset - 3, 0, Offset - 4, 0,
- Offset - 5, 0, Offset - 6, 0, Offset - 7, 0, Offset - 8);
-
- unsigned char mask = 0xFF;
- mask <<= (8 - Offset);
-
- first = _mm512_permutexvar_pd(first_idx, first);
- Packet8d tmp = _mm512_permutexvar_pd(second_idx, second);
- first = _mm512_mask_blend_pd(mask, first, tmp);
- }
- }
-};
+template<> EIGEN_STRONG_INLINE bool predux_any(const Packet16f& x)
+{
+ Packet16i xi = _mm512_castps_si512(x);
+ __mmask16 tmp = _mm512_test_epi32_mask(xi,xi);
+ return !_mm512_kortestz(tmp,tmp);
+}
+
#define PACK_OUTPUT(OUTPUT, INPUT, INDEX, STRIDE) \
@@ -1272,11 +1360,940 @@ EIGEN_STRONG_INLINE Packet16f pblend(const Selector<16>& /*ifPacket*/,
return Packet16f();
}
template <>
-EIGEN_STRONG_INLINE Packet8d pblend(const Selector<8>& /*ifPacket*/,
- const Packet8d& /*thenPacket*/,
- const Packet8d& /*elsePacket*/) {
- assert(false && "To be implemented");
- return Packet8d();
+EIGEN_STRONG_INLINE Packet8d pblend(const Selector<8>& ifPacket,
+ const Packet8d& thenPacket,
+ const Packet8d& elsePacket) {
+ __mmask8 m = (ifPacket.select[0] )
+ | (ifPacket.select[1]<<1)
+ | (ifPacket.select[2]<<2)
+ | (ifPacket.select[3]<<3)
+ | (ifPacket.select[4]<<4)
+ | (ifPacket.select[5]<<5)
+ | (ifPacket.select[6]<<6)
+ | (ifPacket.select[7]<<7);
+ return _mm512_mask_blend_pd(m, elsePacket, thenPacket);
+}
+
+// Packet math for Eigen::half
+template<> EIGEN_STRONG_INLINE Packet16h pset1<Packet16h>(const Eigen::half& from) {
+ return _mm256_set1_epi16(from.x);
+}
+
+template<> EIGEN_STRONG_INLINE Eigen::half pfirst<Packet16h>(const Packet16h& from) {
+ return half_impl::raw_uint16_to_half(static_cast<unsigned short>(_mm256_extract_epi16(from, 0)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h pload<Packet16h>(const Eigen::half* from) {
+ return _mm256_load_si256(reinterpret_cast<const __m256i*>(from));
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h ploadu<Packet16h>(const Eigen::half* from) {
+ return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from));
+}
+
+template<> EIGEN_STRONG_INLINE void pstore<half>(Eigen::half* to, const Packet16h& from) {
+ // (void*) -> workaround clang warning:
+ // cast from 'Eigen::half *' to '__m256i *' increases required alignment from 2 to 32
+ _mm256_store_si256((__m256i*)(void*)to, from);
+}
+
+template<> EIGEN_STRONG_INLINE void pstoreu<half>(Eigen::half* to, const Packet16h& from) {
+ // (void*) -> workaround clang warning:
+ // cast from 'Eigen::half *' to '__m256i *' increases required alignment from 2 to 32
+ _mm256_storeu_si256((__m256i*)(void*)to, from);
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h
+ploaddup<Packet16h>(const Eigen::half* from) {
+ unsigned short a = from[0].x;
+ unsigned short b = from[1].x;
+ unsigned short c = from[2].x;
+ unsigned short d = from[3].x;
+ unsigned short e = from[4].x;
+ unsigned short f = from[5].x;
+ unsigned short g = from[6].x;
+ unsigned short h = from[7].x;
+ return _mm256_set_epi16(h, h, g, g, f, f, e, e, d, d, c, c, b, b, a, a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h
+ploadquad(const Eigen::half* from) {
+ unsigned short a = from[0].x;
+ unsigned short b = from[1].x;
+ unsigned short c = from[2].x;
+ unsigned short d = from[3].x;
+ return _mm256_set_epi16(d, d, d, d, c, c, c, c, b, b, b, b, a, a, a, a);
+}
+
+EIGEN_STRONG_INLINE Packet16f half2float(const Packet16h& a) {
+#ifdef EIGEN_HAS_FP16_C
+ return _mm512_cvtph_ps(a);
+#else
+ EIGEN_ALIGN64 half aux[16];
+ pstore(aux, a);
+ float f0(aux[0]);
+ float f1(aux[1]);
+ float f2(aux[2]);
+ float f3(aux[3]);
+ float f4(aux[4]);
+ float f5(aux[5]);
+ float f6(aux[6]);
+ float f7(aux[7]);
+ float f8(aux[8]);
+ float f9(aux[9]);
+ float fa(aux[10]);
+ float fb(aux[11]);
+ float fc(aux[12]);
+ float fd(aux[13]);
+ float fe(aux[14]);
+ float ff(aux[15]);
+
+ return _mm512_set_ps(
+ ff, fe, fd, fc, fb, fa, f9, f8, f7, f6, f5, f4, f3, f2, f1, f0);
+#endif
+}
+
+EIGEN_STRONG_INLINE Packet16h float2half(const Packet16f& a) {
+#ifdef EIGEN_HAS_FP16_C
+ return _mm512_cvtps_ph(a, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
+#else
+ EIGEN_ALIGN64 float aux[16];
+ pstore(aux, a);
+ half h0(aux[0]);
+ half h1(aux[1]);
+ half h2(aux[2]);
+ half h3(aux[3]);
+ half h4(aux[4]);
+ half h5(aux[5]);
+ half h6(aux[6]);
+ half h7(aux[7]);
+ half h8(aux[8]);
+ half h9(aux[9]);
+ half ha(aux[10]);
+ half hb(aux[11]);
+ half hc(aux[12]);
+ half hd(aux[13]);
+ half he(aux[14]);
+ half hf(aux[15]);
+
+ return _mm256_set_epi16(
+ hf.x, he.x, hd.x, hc.x, hb.x, ha.x, h9.x, h8.x,
+ h7.x, h6.x, h5.x, h4.x, h3.x, h2.x, h1.x, h0.x);
+#endif
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h ptrue(const Packet16h& a) {
+ return ptrue(Packet8i(a));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16h pabs(const Packet16h& a) {
+ const __m256i sign_mask = _mm256_set1_epi16(static_cast<numext::uint16_t>(0x8000));
+ return _mm256_andnot_si256(sign_mask, a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16h pmin<Packet16h>(const Packet16h& a,
+ const Packet16h& b) {
+ return float2half(pmin<Packet16f>(half2float(a), half2float(b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16h pmax<Packet16h>(const Packet16h& a,
+ const Packet16h& b) {
+ return float2half(pmax<Packet16f>(half2float(a), half2float(b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16h plset<Packet16h>(const half& a) {
+ return float2half(plset<Packet16f>(static_cast<float>(a)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h por(const Packet16h& a,const Packet16h& b) {
+ // in some cases Packet8i is a wrapper around __m256i, so we need to
+ // cast to Packet8i to call the correct overload.
+ return por(Packet8i(a),Packet8i(b));
+}
+template<> EIGEN_STRONG_INLINE Packet16h pxor(const Packet16h& a,const Packet16h& b) {
+ return pxor(Packet8i(a),Packet8i(b));
+}
+template<> EIGEN_STRONG_INLINE Packet16h pand(const Packet16h& a,const Packet16h& b) {
+ return pand(Packet8i(a),Packet8i(b));
+}
+template<> EIGEN_STRONG_INLINE Packet16h pandnot(const Packet16h& a,const Packet16h& b) {
+ return pandnot(Packet8i(a),Packet8i(b));
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h pselect(const Packet16h& mask, const Packet16h& a, const Packet16h& b) {
+ return _mm256_blendv_epi8(b, a, mask);
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h pround<Packet16h>(const Packet16h& a) {
+ return float2half(pround<Packet16f>(half2float(a)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h print<Packet16h>(const Packet16h& a) {
+ return float2half(print<Packet16f>(half2float(a)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h pceil<Packet16h>(const Packet16h& a) {
+ return float2half(pceil<Packet16f>(half2float(a)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h pfloor<Packet16h>(const Packet16h& a) {
+ return float2half(pfloor<Packet16f>(half2float(a)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h pcmp_eq(const Packet16h& a,const Packet16h& b) {
+ Packet16f af = half2float(a);
+ Packet16f bf = half2float(b);
+ return Pack32To16(pcmp_eq(af, bf));
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h pcmp_le(const Packet16h& a,const Packet16h& b) {
+ return Pack32To16(pcmp_le(half2float(a), half2float(b)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h pcmp_lt(const Packet16h& a,const Packet16h& b) {
+ return Pack32To16(pcmp_lt(half2float(a), half2float(b)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h pcmp_lt_or_nan(const Packet16h& a,const Packet16h& b) {
+ return Pack32To16(pcmp_lt_or_nan(half2float(a), half2float(b)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h pconj(const Packet16h& a) { return a; }
+
+template<> EIGEN_STRONG_INLINE Packet16h pnegate(const Packet16h& a) {
+ Packet16h sign_mask = _mm256_set1_epi16(static_cast<unsigned short>(0x8000));
+ return _mm256_xor_si256(a, sign_mask);
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h padd<Packet16h>(const Packet16h& a, const Packet16h& b) {
+ Packet16f af = half2float(a);
+ Packet16f bf = half2float(b);
+ Packet16f rf = padd(af, bf);
+ return float2half(rf);
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h psub<Packet16h>(const Packet16h& a, const Packet16h& b) {
+ Packet16f af = half2float(a);
+ Packet16f bf = half2float(b);
+ Packet16f rf = psub(af, bf);
+ return float2half(rf);
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h pmul<Packet16h>(const Packet16h& a, const Packet16h& b) {
+ Packet16f af = half2float(a);
+ Packet16f bf = half2float(b);
+ Packet16f rf = pmul(af, bf);
+ return float2half(rf);
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h pdiv<Packet16h>(const Packet16h& a, const Packet16h& b) {
+ Packet16f af = half2float(a);
+ Packet16f bf = half2float(b);
+ Packet16f rf = pdiv(af, bf);
+ return float2half(rf);
+}
+
+template<> EIGEN_STRONG_INLINE half predux<Packet16h>(const Packet16h& from) {
+ Packet16f from_float = half2float(from);
+ return half(predux(from_float));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8h predux_half_dowto4<Packet16h>(const Packet16h& a) {
+ Packet8h lane0 = _mm256_extractf128_si256(a, 0);
+ Packet8h lane1 = _mm256_extractf128_si256(a, 1);
+ return padd<Packet8h>(lane0, lane1);
+}
+
+template<> EIGEN_STRONG_INLINE Eigen::half predux_max<Packet16h>(const Packet16h& a) {
+ Packet16f af = half2float(a);
+ float reduced = predux_max<Packet16f>(af);
+ return Eigen::half(reduced);
+}
+
+template<> EIGEN_STRONG_INLINE Eigen::half predux_min<Packet16h>(const Packet16h& a) {
+ Packet16f af = half2float(a);
+ float reduced = predux_min<Packet16f>(af);
+ return Eigen::half(reduced);
+}
+
+template<> EIGEN_STRONG_INLINE half predux_mul<Packet16h>(const Packet16h& from) {
+ Packet16f from_float = half2float(from);
+ return half(predux_mul(from_float));
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h preverse(const Packet16h& a)
+{
+ __m128i m = _mm_setr_epi8(14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1);
+ return _mm256_insertf128_si256(
+ _mm256_castsi128_si256(_mm_shuffle_epi8(_mm256_extractf128_si256(a,1),m)),
+ _mm_shuffle_epi8(_mm256_extractf128_si256(a,0),m), 1);
+}
+
+template<> EIGEN_STRONG_INLINE Packet16h pgather<Eigen::half, Packet16h>(const Eigen::half* from, Index stride)
+{
+ return _mm256_set_epi16(
+ from[15*stride].x, from[14*stride].x, from[13*stride].x, from[12*stride].x,
+ from[11*stride].x, from[10*stride].x, from[9*stride].x, from[8*stride].x,
+ from[7*stride].x, from[6*stride].x, from[5*stride].x, from[4*stride].x,
+ from[3*stride].x, from[2*stride].x, from[1*stride].x, from[0*stride].x);
+}
+
+template<> EIGEN_STRONG_INLINE void pscatter<half, Packet16h>(half* to, const Packet16h& from, Index stride)
+{
+ EIGEN_ALIGN64 half aux[16];
+ pstore(aux, from);
+ to[stride*0] = aux[0];
+ to[stride*1] = aux[1];
+ to[stride*2] = aux[2];
+ to[stride*3] = aux[3];
+ to[stride*4] = aux[4];
+ to[stride*5] = aux[5];
+ to[stride*6] = aux[6];
+ to[stride*7] = aux[7];
+ to[stride*8] = aux[8];
+ to[stride*9] = aux[9];
+ to[stride*10] = aux[10];
+ to[stride*11] = aux[11];
+ to[stride*12] = aux[12];
+ to[stride*13] = aux[13];
+ to[stride*14] = aux[14];
+ to[stride*15] = aux[15];
+}
+
+EIGEN_STRONG_INLINE void
+ptranspose(PacketBlock<Packet16h,16>& kernel) {
+ __m256i a = kernel.packet[0];
+ __m256i b = kernel.packet[1];
+ __m256i c = kernel.packet[2];
+ __m256i d = kernel.packet[3];
+ __m256i e = kernel.packet[4];
+ __m256i f = kernel.packet[5];
+ __m256i g = kernel.packet[6];
+ __m256i h = kernel.packet[7];
+ __m256i i = kernel.packet[8];
+ __m256i j = kernel.packet[9];
+ __m256i k = kernel.packet[10];
+ __m256i l = kernel.packet[11];
+ __m256i m = kernel.packet[12];
+ __m256i n = kernel.packet[13];
+ __m256i o = kernel.packet[14];
+ __m256i p = kernel.packet[15];
+
+ __m256i ab_07 = _mm256_unpacklo_epi16(a, b);
+ __m256i cd_07 = _mm256_unpacklo_epi16(c, d);
+ __m256i ef_07 = _mm256_unpacklo_epi16(e, f);
+ __m256i gh_07 = _mm256_unpacklo_epi16(g, h);
+ __m256i ij_07 = _mm256_unpacklo_epi16(i, j);
+ __m256i kl_07 = _mm256_unpacklo_epi16(k, l);
+ __m256i mn_07 = _mm256_unpacklo_epi16(m, n);
+ __m256i op_07 = _mm256_unpacklo_epi16(o, p);
+
+ __m256i ab_8f = _mm256_unpackhi_epi16(a, b);
+ __m256i cd_8f = _mm256_unpackhi_epi16(c, d);
+ __m256i ef_8f = _mm256_unpackhi_epi16(e, f);
+ __m256i gh_8f = _mm256_unpackhi_epi16(g, h);
+ __m256i ij_8f = _mm256_unpackhi_epi16(i, j);
+ __m256i kl_8f = _mm256_unpackhi_epi16(k, l);
+ __m256i mn_8f = _mm256_unpackhi_epi16(m, n);
+ __m256i op_8f = _mm256_unpackhi_epi16(o, p);
+
+ __m256i abcd_03 = _mm256_unpacklo_epi32(ab_07, cd_07);
+ __m256i abcd_47 = _mm256_unpackhi_epi32(ab_07, cd_07);
+ __m256i efgh_03 = _mm256_unpacklo_epi32(ef_07, gh_07);
+ __m256i efgh_47 = _mm256_unpackhi_epi32(ef_07, gh_07);
+ __m256i ijkl_03 = _mm256_unpacklo_epi32(ij_07, kl_07);
+ __m256i ijkl_47 = _mm256_unpackhi_epi32(ij_07, kl_07);
+ __m256i mnop_03 = _mm256_unpacklo_epi32(mn_07, op_07);
+ __m256i mnop_47 = _mm256_unpackhi_epi32(mn_07, op_07);
+
+ __m256i abcd_8b = _mm256_unpacklo_epi32(ab_8f, cd_8f);
+ __m256i abcd_cf = _mm256_unpackhi_epi32(ab_8f, cd_8f);
+ __m256i efgh_8b = _mm256_unpacklo_epi32(ef_8f, gh_8f);
+ __m256i efgh_cf = _mm256_unpackhi_epi32(ef_8f, gh_8f);
+ __m256i ijkl_8b = _mm256_unpacklo_epi32(ij_8f, kl_8f);
+ __m256i ijkl_cf = _mm256_unpackhi_epi32(ij_8f, kl_8f);
+ __m256i mnop_8b = _mm256_unpacklo_epi32(mn_8f, op_8f);
+ __m256i mnop_cf = _mm256_unpackhi_epi32(mn_8f, op_8f);
+
+ __m256i abcdefgh_01 = _mm256_unpacklo_epi64(abcd_03, efgh_03);
+ __m256i abcdefgh_23 = _mm256_unpackhi_epi64(abcd_03, efgh_03);
+ __m256i ijklmnop_01 = _mm256_unpacklo_epi64(ijkl_03, mnop_03);
+ __m256i ijklmnop_23 = _mm256_unpackhi_epi64(ijkl_03, mnop_03);
+ __m256i abcdefgh_45 = _mm256_unpacklo_epi64(abcd_47, efgh_47);
+ __m256i abcdefgh_67 = _mm256_unpackhi_epi64(abcd_47, efgh_47);
+ __m256i ijklmnop_45 = _mm256_unpacklo_epi64(ijkl_47, mnop_47);
+ __m256i ijklmnop_67 = _mm256_unpackhi_epi64(ijkl_47, mnop_47);
+ __m256i abcdefgh_89 = _mm256_unpacklo_epi64(abcd_8b, efgh_8b);
+ __m256i abcdefgh_ab = _mm256_unpackhi_epi64(abcd_8b, efgh_8b);
+ __m256i ijklmnop_89 = _mm256_unpacklo_epi64(ijkl_8b, mnop_8b);
+ __m256i ijklmnop_ab = _mm256_unpackhi_epi64(ijkl_8b, mnop_8b);
+ __m256i abcdefgh_cd = _mm256_unpacklo_epi64(abcd_cf, efgh_cf);
+ __m256i abcdefgh_ef = _mm256_unpackhi_epi64(abcd_cf, efgh_cf);
+ __m256i ijklmnop_cd = _mm256_unpacklo_epi64(ijkl_cf, mnop_cf);
+ __m256i ijklmnop_ef = _mm256_unpackhi_epi64(ijkl_cf, mnop_cf);
+
+ // NOTE: no unpacklo/hi instr in this case, so using permute instr.
+ __m256i a_p_0 = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x20);
+ __m256i a_p_1 = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x20);
+ __m256i a_p_2 = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x20);
+ __m256i a_p_3 = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x20);
+ __m256i a_p_4 = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x20);
+ __m256i a_p_5 = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x20);
+ __m256i a_p_6 = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x20);
+ __m256i a_p_7 = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x20);
+ __m256i a_p_8 = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x31);
+ __m256i a_p_9 = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x31);
+ __m256i a_p_a = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x31);
+ __m256i a_p_b = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x31);
+ __m256i a_p_c = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x31);
+ __m256i a_p_d = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x31);
+ __m256i a_p_e = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x31);
+ __m256i a_p_f = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x31);
+
+ kernel.packet[0] = a_p_0;
+ kernel.packet[1] = a_p_1;
+ kernel.packet[2] = a_p_2;
+ kernel.packet[3] = a_p_3;
+ kernel.packet[4] = a_p_4;
+ kernel.packet[5] = a_p_5;
+ kernel.packet[6] = a_p_6;
+ kernel.packet[7] = a_p_7;
+ kernel.packet[8] = a_p_8;
+ kernel.packet[9] = a_p_9;
+ kernel.packet[10] = a_p_a;
+ kernel.packet[11] = a_p_b;
+ kernel.packet[12] = a_p_c;
+ kernel.packet[13] = a_p_d;
+ kernel.packet[14] = a_p_e;
+ kernel.packet[15] = a_p_f;
+}
+
+EIGEN_STRONG_INLINE void
+ptranspose(PacketBlock<Packet16h,8>& kernel) {
+ EIGEN_ALIGN64 half in[8][16];
+ pstore<half>(in[0], kernel.packet[0]);
+ pstore<half>(in[1], kernel.packet[1]);
+ pstore<half>(in[2], kernel.packet[2]);
+ pstore<half>(in[3], kernel.packet[3]);
+ pstore<half>(in[4], kernel.packet[4]);
+ pstore<half>(in[5], kernel.packet[5]);
+ pstore<half>(in[6], kernel.packet[6]);
+ pstore<half>(in[7], kernel.packet[7]);
+
+ EIGEN_ALIGN64 half out[8][16];
+
+ for (int i = 0; i < 8; ++i) {
+ for (int j = 0; j < 8; ++j) {
+ out[i][j] = in[j][2*i];
+ }
+ for (int j = 0; j < 8; ++j) {
+ out[i][j+8] = in[j][2*i+1];
+ }
+ }
+
+ kernel.packet[0] = pload<Packet16h>(out[0]);
+ kernel.packet[1] = pload<Packet16h>(out[1]);
+ kernel.packet[2] = pload<Packet16h>(out[2]);
+ kernel.packet[3] = pload<Packet16h>(out[3]);
+ kernel.packet[4] = pload<Packet16h>(out[4]);
+ kernel.packet[5] = pload<Packet16h>(out[5]);
+ kernel.packet[6] = pload<Packet16h>(out[6]);
+ kernel.packet[7] = pload<Packet16h>(out[7]);
+}
+
+EIGEN_STRONG_INLINE void
+ptranspose(PacketBlock<Packet16h,4>& kernel) {
+ EIGEN_ALIGN64 half in[4][16];
+ pstore<half>(in[0], kernel.packet[0]);
+ pstore<half>(in[1], kernel.packet[1]);
+ pstore<half>(in[2], kernel.packet[2]);
+ pstore<half>(in[3], kernel.packet[3]);
+
+ EIGEN_ALIGN64 half out[4][16];
+
+ for (int i = 0; i < 4; ++i) {
+ for (int j = 0; j < 4; ++j) {
+ out[i][j] = in[j][4*i];
+ }
+ for (int j = 0; j < 4; ++j) {
+ out[i][j+4] = in[j][4*i+1];
+ }
+ for (int j = 0; j < 4; ++j) {
+ out[i][j+8] = in[j][4*i+2];
+ }
+ for (int j = 0; j < 4; ++j) {
+ out[i][j+12] = in[j][4*i+3];
+ }
+ }
+
+ kernel.packet[0] = pload<Packet16h>(out[0]);
+ kernel.packet[1] = pload<Packet16h>(out[1]);
+ kernel.packet[2] = pload<Packet16h>(out[2]);
+ kernel.packet[3] = pload<Packet16h>(out[3]);
+}
+
+template <> struct is_arithmetic<Packet16bf> { enum { value = true }; };
+
+template <>
+struct packet_traits<bfloat16> : default_packet_traits {
+ typedef Packet16bf type;
+ typedef Packet8bf half;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 16,
+ HasHalfPacket = 1,
+ HasBlend = 0,
+ HasInsert = 1,
+ HasSin = EIGEN_FAST_MATH,
+ HasCos = EIGEN_FAST_MATH,
+#if EIGEN_GNUC_AT_LEAST(5, 3) || (!EIGEN_COMP_GNUC_STRICT)
+#ifdef EIGEN_VECTORIZE_AVX512DQ
+ HasLog = 1, // Currently fails test with bad accuracy.
+ HasLog1p = 1,
+ HasExpm1 = 1,
+ HasNdtri = 1,
+ HasBessel = 1,
+#endif
+ HasExp = 1,
+ HasSqrt = EIGEN_FAST_MATH,
+ HasRsqrt = EIGEN_FAST_MATH,
+ HasTanh = EIGEN_FAST_MATH,
+ HasErf = EIGEN_FAST_MATH,
+#endif
+ HasCmp = 1,
+ HasDiv = 1
+ };
+};
+
+template <>
+struct unpacket_traits<Packet16bf>
+{
+ typedef bfloat16 type;
+ enum {size=16, alignment=Aligned32, vectorizable=true, masked_load_available=false, masked_store_available=false};
+ typedef Packet8bf half;
+};
+
+template <>
+EIGEN_STRONG_INLINE Packet16bf pset1<Packet16bf>(const bfloat16& from) {
+ return _mm256_set1_epi16(from.value);
+}
+
+template <>
+EIGEN_STRONG_INLINE bfloat16 pfirst<Packet16bf>(const Packet16bf& from) {
+ bfloat16 t;
+ t.value = static_cast<unsigned short>(_mm256_extract_epi16(from, 0));
+ return t;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16bf pload<Packet16bf>(const bfloat16* from) {
+ return _mm256_load_si256(reinterpret_cast<const __m256i*>(from));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16bf ploadu<Packet16bf>(const bfloat16* from) {
+ return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from));
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<bfloat16>(bfloat16* to,
+ const Packet16bf& from) {
+ _mm256_store_si256(reinterpret_cast<__m256i*>(to), from);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<bfloat16>(bfloat16* to,
+ const Packet16bf& from) {
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from);
+}
+
+template<> EIGEN_STRONG_INLINE Packet16bf
+ploaddup<Packet16bf>(const bfloat16* from) {
+ Packet16bf r;
+ unsigned short a = from[0].value;
+ unsigned short b = from[1].value;
+ unsigned short c = from[2].value;
+ unsigned short d = from[3].value;
+ unsigned short e = from[4].value;
+ unsigned short f = from[5].value;
+ unsigned short g = from[6].value;
+ unsigned short h = from[7].value;
+ return _mm256_set_epi16(h, h, g, g, f, f, e, e, d, d, c, c, b, b, a, a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet16bf
+ploadquad(const bfloat16* from) {
+ Packet16bf r;
+ unsigned short a = from[0].value;
+ unsigned short b = from[1].value;
+ unsigned short c = from[2].value;
+ unsigned short d = from[3].value;
+ return _mm256_set_epi16(d, d, d, d, c, c, c, c, b, b, b, b, a, a, a, a);
+}
+
+EIGEN_STRONG_INLINE Packet16f Bf16ToF32(const Packet16bf& a) {
+ return _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_cvtepu16_epi32(a), 16));
+}
+
+// Convert float to bfloat16 according to round-to-nearest-even/denormals algorithm.
+EIGEN_STRONG_INLINE Packet16bf F32ToBf16(const Packet16f& a) {
+ Packet16bf r;
+
+#if defined(EIGEN_VECTORIZE_AVX512BF16) && EIGEN_GNUC_AT_LEAST(10, 1)
+ // Since GCC 10.1 supports avx512bf16 and C style explicit cast
+ // (C++ static_cast is not supported yet), do converion via intrinsic
+ // and register path for performance.
+ r = (__m256i)(_mm512_cvtneps_pbh(a));
+
+#else
+ __m512i t;
+ __m512i input = _mm512_castps_si512(a);
+ __m512i nan = _mm512_set1_epi32(0x7fc0);
+
+ // uint32_t lsb = (input >> 16) & 1;
+ t = _mm512_and_si512(_mm512_srli_epi32(input, 16), _mm512_set1_epi32(1));
+ // uint32_t rounding_bias = 0x7fff + lsb;
+ t = _mm512_add_epi32(t, _mm512_set1_epi32(0x7fff));
+ // input += rounding_bias;
+ t = _mm512_add_epi32(t, input);
+ // input = input >> 16;
+ t = _mm512_srli_epi32(t, 16);
+
+ // Check NaN before converting back to bf16
+ __mmask16 mask = _mm512_cmp_ps_mask(a, a, _CMP_ORD_Q);
+
+ t = _mm512_mask_blend_epi32(mask, nan, t);
+ // output.value = static_cast<uint16_t>(input);
+ r = _mm512_cvtepi32_epi16(t);
+#endif // EIGEN_VECTORIZE_AVX512BF16
+
+ return r;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16bf ptrue(const Packet16bf& a) {
+ return ptrue<Packet8i>(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16bf por(const Packet16bf& a, const Packet16bf& b) {
+ return por<Packet8i>(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16bf pxor(const Packet16bf& a, const Packet16bf& b) {
+ return pxor<Packet8i>(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16bf pand(const Packet16bf& a, const Packet16bf& b) {
+ return pand<Packet8i>(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16bf pandnot(const Packet16bf& a,
+ const Packet16bf& b) {
+ return pandnot<Packet8i>(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16bf pselect(const Packet16bf& mask,
+ const Packet16bf& a,
+ const Packet16bf& b) {
+ // Input mask is expected to be all 0/1, handle it with 8-bit
+ // intrinsic for performance.
+ return _mm256_blendv_epi8(b, a, mask);
+}
+
+template<> EIGEN_STRONG_INLINE Packet16bf pround<Packet16bf>(const Packet16bf& a)
+{
+ return F32ToBf16(pround<Packet16f>(Bf16ToF32(a)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet16bf print<Packet16bf>(const Packet16bf& a) {
+ return F32ToBf16(print<Packet16f>(Bf16ToF32(a)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet16bf pceil<Packet16bf>(const Packet16bf& a) {
+ return F32ToBf16(pceil<Packet16f>(Bf16ToF32(a)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet16bf pfloor<Packet16bf>(const Packet16bf& a) {
+ return F32ToBf16(pfloor<Packet16f>(Bf16ToF32(a)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16bf pcmp_eq(const Packet16bf& a,
+ const Packet16bf& b) {
+ return Pack32To16(pcmp_eq(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16bf pcmp_le(const Packet16bf& a,
+ const Packet16bf& b) {
+ return Pack32To16(pcmp_le(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16bf pcmp_lt(const Packet16bf& a,
+ const Packet16bf& b) {
+ return Pack32To16(pcmp_lt(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16bf pcmp_lt_or_nan(const Packet16bf& a,
+ const Packet16bf& b) {
+ return Pack32To16(pcmp_lt_or_nan(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16bf pnegate(const Packet16bf& a) {
+ Packet16bf sign_mask = _mm256_set1_epi16(static_cast<unsigned short>(0x8000));
+ return _mm256_xor_si256(a, sign_mask);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16bf pconj(const Packet16bf& a) {
+ return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16bf pabs(const Packet16bf& a) {
+ const __m256i sign_mask = _mm256_set1_epi16(static_cast<numext::uint16_t>(0x8000));
+ return _mm256_andnot_si256(sign_mask, a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16bf padd<Packet16bf>(const Packet16bf& a,
+ const Packet16bf& b) {
+ return F32ToBf16(padd<Packet16f>(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16bf psub<Packet16bf>(const Packet16bf& a,
+ const Packet16bf& b) {
+ return F32ToBf16(psub<Packet16f>(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16bf pmul<Packet16bf>(const Packet16bf& a,
+ const Packet16bf& b) {
+ return F32ToBf16(pmul<Packet16f>(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16bf pdiv<Packet16bf>(const Packet16bf& a,
+ const Packet16bf& b) {
+ return F32ToBf16(pdiv<Packet16f>(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16bf pmin<Packet16bf>(const Packet16bf& a,
+ const Packet16bf& b) {
+ return F32ToBf16(pmin<Packet16f>(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16bf pmax<Packet16bf>(const Packet16bf& a,
+ const Packet16bf& b) {
+ return F32ToBf16(pmax<Packet16f>(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16bf plset<Packet16bf>(const bfloat16& a) {
+ return F32ToBf16(plset<Packet16f>(static_cast<float>(a)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8bf predux_half_dowto4<Packet16bf>(const Packet16bf& a) {
+ Packet8bf lane0 = _mm256_extractf128_si256(a, 0);
+ Packet8bf lane1 = _mm256_extractf128_si256(a, 1);
+ return padd<Packet8bf>(lane0, lane1);
+}
+
+template <>
+EIGEN_STRONG_INLINE bfloat16 predux<Packet16bf>(const Packet16bf& p) {
+ return static_cast<bfloat16>(predux<Packet16f>(Bf16ToF32(p)));
+}
+
+template <>
+EIGEN_STRONG_INLINE bfloat16 predux_mul<Packet16bf>(const Packet16bf& from) {
+ return static_cast<bfloat16>(predux_mul<Packet16f>(Bf16ToF32(from)));
+}
+
+template <>
+EIGEN_STRONG_INLINE bfloat16 predux_min<Packet16bf>(const Packet16bf& from) {
+ return static_cast<bfloat16>(predux_min<Packet16f>(Bf16ToF32(from)));
+}
+
+template <>
+EIGEN_STRONG_INLINE bfloat16 predux_max<Packet16bf>(const Packet16bf& from) {
+ return static_cast<bfloat16>(predux_max<Packet16f>(Bf16ToF32(from)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16bf preverse(const Packet16bf& a) {
+ __m256i m = _mm256_setr_epi8(14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1,
+ 14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1);
+
+ Packet16bf res;
+ // Swap hi and lo first because shuffle is in 128-bit lanes.
+ res = _mm256_permute2x128_si256(a, a, 1);
+ // Shuffle 8-bit values in src within 2*128-bit lanes.
+ return _mm256_shuffle_epi8(res, m);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet16bf pgather<bfloat16, Packet16bf>(const bfloat16* from,
+ Index stride) {
+ return _mm256_set_epi16(
+ from[15*stride].value, from[14*stride].value, from[13*stride].value, from[12*stride].value,
+ from[11*stride].value, from[10*stride].value, from[9*stride].value, from[8*stride].value,
+ from[7*stride].value, from[6*stride].value, from[5*stride].value, from[4*stride].value,
+ from[3*stride].value, from[2*stride].value, from[1*stride].value, from[0*stride].value);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pscatter<bfloat16, Packet16bf>(bfloat16* to,
+ const Packet16bf& from,
+ Index stride) {
+ EIGEN_ALIGN64 bfloat16 aux[16];
+ pstore(aux, from);
+ to[stride*0] = aux[0];
+ to[stride*1] = aux[1];
+ to[stride*2] = aux[2];
+ to[stride*3] = aux[3];
+ to[stride*4] = aux[4];
+ to[stride*5] = aux[5];
+ to[stride*6] = aux[6];
+ to[stride*7] = aux[7];
+ to[stride*8] = aux[8];
+ to[stride*9] = aux[9];
+ to[stride*10] = aux[10];
+ to[stride*11] = aux[11];
+ to[stride*12] = aux[12];
+ to[stride*13] = aux[13];
+ to[stride*14] = aux[14];
+ to[stride*15] = aux[15];
+}
+
+EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet16bf,16>& kernel) {
+ __m256i a = kernel.packet[0];
+ __m256i b = kernel.packet[1];
+ __m256i c = kernel.packet[2];
+ __m256i d = kernel.packet[3];
+ __m256i e = kernel.packet[4];
+ __m256i f = kernel.packet[5];
+ __m256i g = kernel.packet[6];
+ __m256i h = kernel.packet[7];
+ __m256i i = kernel.packet[8];
+ __m256i j = kernel.packet[9];
+ __m256i k = kernel.packet[10];
+ __m256i l = kernel.packet[11];
+ __m256i m = kernel.packet[12];
+ __m256i n = kernel.packet[13];
+ __m256i o = kernel.packet[14];
+ __m256i p = kernel.packet[15];
+
+ __m256i ab_07 = _mm256_unpacklo_epi16(a, b);
+ __m256i cd_07 = _mm256_unpacklo_epi16(c, d);
+ __m256i ef_07 = _mm256_unpacklo_epi16(e, f);
+ __m256i gh_07 = _mm256_unpacklo_epi16(g, h);
+ __m256i ij_07 = _mm256_unpacklo_epi16(i, j);
+ __m256i kl_07 = _mm256_unpacklo_epi16(k, l);
+ __m256i mn_07 = _mm256_unpacklo_epi16(m, n);
+ __m256i op_07 = _mm256_unpacklo_epi16(o, p);
+
+ __m256i ab_8f = _mm256_unpackhi_epi16(a, b);
+ __m256i cd_8f = _mm256_unpackhi_epi16(c, d);
+ __m256i ef_8f = _mm256_unpackhi_epi16(e, f);
+ __m256i gh_8f = _mm256_unpackhi_epi16(g, h);
+ __m256i ij_8f = _mm256_unpackhi_epi16(i, j);
+ __m256i kl_8f = _mm256_unpackhi_epi16(k, l);
+ __m256i mn_8f = _mm256_unpackhi_epi16(m, n);
+ __m256i op_8f = _mm256_unpackhi_epi16(o, p);
+
+ __m256i abcd_03 = _mm256_unpacklo_epi32(ab_07, cd_07);
+ __m256i abcd_47 = _mm256_unpackhi_epi32(ab_07, cd_07);
+ __m256i efgh_03 = _mm256_unpacklo_epi32(ef_07, gh_07);
+ __m256i efgh_47 = _mm256_unpackhi_epi32(ef_07, gh_07);
+ __m256i ijkl_03 = _mm256_unpacklo_epi32(ij_07, kl_07);
+ __m256i ijkl_47 = _mm256_unpackhi_epi32(ij_07, kl_07);
+ __m256i mnop_03 = _mm256_unpacklo_epi32(mn_07, op_07);
+ __m256i mnop_47 = _mm256_unpackhi_epi32(mn_07, op_07);
+
+ __m256i abcd_8b = _mm256_unpacklo_epi32(ab_8f, cd_8f);
+ __m256i abcd_cf = _mm256_unpackhi_epi32(ab_8f, cd_8f);
+ __m256i efgh_8b = _mm256_unpacklo_epi32(ef_8f, gh_8f);
+ __m256i efgh_cf = _mm256_unpackhi_epi32(ef_8f, gh_8f);
+ __m256i ijkl_8b = _mm256_unpacklo_epi32(ij_8f, kl_8f);
+ __m256i ijkl_cf = _mm256_unpackhi_epi32(ij_8f, kl_8f);
+ __m256i mnop_8b = _mm256_unpacklo_epi32(mn_8f, op_8f);
+ __m256i mnop_cf = _mm256_unpackhi_epi32(mn_8f, op_8f);
+
+ __m256i abcdefgh_01 = _mm256_unpacklo_epi64(abcd_03, efgh_03);
+ __m256i abcdefgh_23 = _mm256_unpackhi_epi64(abcd_03, efgh_03);
+ __m256i ijklmnop_01 = _mm256_unpacklo_epi64(ijkl_03, mnop_03);
+ __m256i ijklmnop_23 = _mm256_unpackhi_epi64(ijkl_03, mnop_03);
+ __m256i abcdefgh_45 = _mm256_unpacklo_epi64(abcd_47, efgh_47);
+ __m256i abcdefgh_67 = _mm256_unpackhi_epi64(abcd_47, efgh_47);
+ __m256i ijklmnop_45 = _mm256_unpacklo_epi64(ijkl_47, mnop_47);
+ __m256i ijklmnop_67 = _mm256_unpackhi_epi64(ijkl_47, mnop_47);
+ __m256i abcdefgh_89 = _mm256_unpacklo_epi64(abcd_8b, efgh_8b);
+ __m256i abcdefgh_ab = _mm256_unpackhi_epi64(abcd_8b, efgh_8b);
+ __m256i ijklmnop_89 = _mm256_unpacklo_epi64(ijkl_8b, mnop_8b);
+ __m256i ijklmnop_ab = _mm256_unpackhi_epi64(ijkl_8b, mnop_8b);
+ __m256i abcdefgh_cd = _mm256_unpacklo_epi64(abcd_cf, efgh_cf);
+ __m256i abcdefgh_ef = _mm256_unpackhi_epi64(abcd_cf, efgh_cf);
+ __m256i ijklmnop_cd = _mm256_unpacklo_epi64(ijkl_cf, mnop_cf);
+ __m256i ijklmnop_ef = _mm256_unpackhi_epi64(ijkl_cf, mnop_cf);
+
+ // NOTE: no unpacklo/hi instr in this case, so using permute instr.
+ kernel.packet[0] = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x20);
+ kernel.packet[1] = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x20);
+ kernel.packet[2] = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x20);
+ kernel.packet[3] = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x20);
+ kernel.packet[4] = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x20);
+ kernel.packet[5] = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x20);
+ kernel.packet[6] = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x20);
+ kernel.packet[7] = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x20);
+ kernel.packet[8] = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x31);
+ kernel.packet[9] = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x31);
+ kernel.packet[10] = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x31);
+ kernel.packet[11] = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x31);
+ kernel.packet[12] = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x31);
+ kernel.packet[13] = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x31);
+ kernel.packet[14] = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x31);
+ kernel.packet[15] = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x31);
+}
+
+EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet16bf,4>& kernel) {
+ __m256i a = kernel.packet[0];
+ __m256i b = kernel.packet[1];
+ __m256i c = kernel.packet[2];
+ __m256i d = kernel.packet[3];
+
+ __m256i ab_07 = _mm256_unpacklo_epi16(a, b);
+ __m256i cd_07 = _mm256_unpacklo_epi16(c, d);
+ __m256i ab_8f = _mm256_unpackhi_epi16(a, b);
+ __m256i cd_8f = _mm256_unpackhi_epi16(c, d);
+
+ __m256i abcd_03 = _mm256_unpacklo_epi32(ab_07, cd_07);
+ __m256i abcd_47 = _mm256_unpackhi_epi32(ab_07, cd_07);
+ __m256i abcd_8b = _mm256_unpacklo_epi32(ab_8f, cd_8f);
+ __m256i abcd_cf = _mm256_unpackhi_epi32(ab_8f, cd_8f);
+
+ // NOTE: no unpacklo/hi instr in this case, so using permute instr.
+ kernel.packet[0] = _mm256_permute2x128_si256(abcd_03, abcd_47, 0x20);
+ kernel.packet[1] = _mm256_permute2x128_si256(abcd_8b, abcd_cf, 0x20);
+ kernel.packet[2] = _mm256_permute2x128_si256(abcd_03, abcd_47, 0x31);
+ kernel.packet[3] = _mm256_permute2x128_si256(abcd_8b, abcd_cf, 0x31);
}
} // end namespace internal
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX512/TypeCasting.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX512/TypeCasting.h
new file mode 100644
index 000000000..330412729
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/AVX512/TypeCasting.h
@@ -0,0 +1,89 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2019 Rasmus Munk Larsen <rmlarsen@google.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_TYPE_CASTING_AVX512_H
+#define EIGEN_TYPE_CASTING_AVX512_H
+
+namespace Eigen {
+
+namespace internal {
+
+template<> EIGEN_STRONG_INLINE Packet16i pcast<Packet16f, Packet16i>(const Packet16f& a) {
+ return _mm512_cvttps_epi32(a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet16f pcast<Packet16i, Packet16f>(const Packet16i& a) {
+ return _mm512_cvtepi32_ps(a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet16i preinterpret<Packet16i, Packet16f>(const Packet16f& a) {
+ return _mm512_castps_si512(a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet16f preinterpret<Packet16f, Packet16i>(const Packet16i& a) {
+ return _mm512_castsi512_ps(a);
+}
+
+template <>
+struct type_casting_traits<half, float> {
+ enum {
+ VectorizedCast = 1,
+ SrcCoeffRatio = 1,
+ TgtCoeffRatio = 1
+ };
+};
+
+template<> EIGEN_STRONG_INLINE Packet16f pcast<Packet16h, Packet16f>(const Packet16h& a) {
+ return half2float(a);
+}
+
+template <>
+struct type_casting_traits<float, half> {
+ enum {
+ VectorizedCast = 1,
+ SrcCoeffRatio = 1,
+ TgtCoeffRatio = 1
+ };
+};
+
+template<> EIGEN_STRONG_INLINE Packet16h pcast<Packet16f, Packet16h>(const Packet16f& a) {
+ return float2half(a);
+}
+
+template <>
+struct type_casting_traits<bfloat16, float> {
+ enum {
+ VectorizedCast = 1,
+ SrcCoeffRatio = 1,
+ TgtCoeffRatio = 1
+ };
+};
+
+template<> EIGEN_STRONG_INLINE Packet16f pcast<Packet16bf, Packet16f>(const Packet16bf& a) {
+ return Bf16ToF32(a);
+}
+
+template <>
+struct type_casting_traits<float, bfloat16> {
+ enum {
+ VectorizedCast = 1,
+ SrcCoeffRatio = 1,
+ TgtCoeffRatio = 1
+ };
+};
+
+template<> EIGEN_STRONG_INLINE Packet16bf pcast<Packet16f, Packet16bf>(const Packet16f& a) {
+ return F32ToBf16(a);
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_TYPE_CASTING_AVX512_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/AltiVec/Complex.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/AltiVec/Complex.h
index 3e665730c..b3932998c 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/arch/AltiVec/Complex.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/AltiVec/Complex.h
@@ -29,8 +29,54 @@ static Packet2ul p2ul_CONJ_XOR2 = (Packet2ul) vec_sld((Packet4ui) p2d_MZERO, (P
//---------- float ----------
struct Packet2cf
{
- EIGEN_STRONG_INLINE explicit Packet2cf() : v(p4f_ZERO) {}
+ EIGEN_STRONG_INLINE explicit Packet2cf() {}
EIGEN_STRONG_INLINE explicit Packet2cf(const Packet4f& a) : v(a) {}
+
+ EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b)
+ {
+ Packet4f v1, v2;
+
+ // Permute and multiply the real parts of a and b
+ v1 = vec_perm(a.v, a.v, p16uc_PSET32_WODD);
+ // Get the imaginary parts of a
+ v2 = vec_perm(a.v, a.v, p16uc_PSET32_WEVEN);
+ // multiply a_re * b
+ v1 = vec_madd(v1, b.v, p4f_ZERO);
+ // multiply a_im * b and get the conjugate result
+ v2 = vec_madd(v2, b.v, p4f_ZERO);
+ v2 = reinterpret_cast<Packet4f>(pxor(v2, reinterpret_cast<Packet4f>(p4ui_CONJ_XOR)));
+ // permute back to a proper order
+ v2 = vec_perm(v2, v2, p16uc_COMPLEX32_REV);
+
+ return Packet2cf(padd<Packet4f>(v1, v2));
+ }
+
+ EIGEN_STRONG_INLINE Packet2cf& operator*=(const Packet2cf& b) {
+ v = pmul(Packet2cf(*this), b).v;
+ return *this;
+ }
+ EIGEN_STRONG_INLINE Packet2cf operator*(const Packet2cf& b) const {
+ return Packet2cf(*this) *= b;
+ }
+
+ EIGEN_STRONG_INLINE Packet2cf& operator+=(const Packet2cf& b) {
+ v = padd(v, b.v);
+ return *this;
+ }
+ EIGEN_STRONG_INLINE Packet2cf operator+(const Packet2cf& b) const {
+ return Packet2cf(*this) += b;
+ }
+ EIGEN_STRONG_INLINE Packet2cf& operator-=(const Packet2cf& b) {
+ v = psub(v, b.v);
+ return *this;
+ }
+ EIGEN_STRONG_INLINE Packet2cf operator-(const Packet2cf& b) const {
+ return Packet2cf(*this) -= b;
+ }
+ EIGEN_STRONG_INLINE Packet2cf operator-(void) const {
+ return Packet2cf(-v);
+ }
+
Packet4f v;
};
@@ -38,6 +84,7 @@ template<> struct packet_traits<std::complex<float> > : default_packet_traits
{
typedef Packet2cf type;
typedef Packet2cf half;
+ typedef Packet4f as_real;
enum {
Vectorizable = 1,
AlignedOnScalar = 1,
@@ -60,7 +107,7 @@ template<> struct packet_traits<std::complex<float> > : default_packet_traits
};
};
-template<> struct unpacket_traits<Packet2cf> { typedef std::complex<float> type; enum {size=2, alignment=Aligned16}; typedef Packet2cf half; };
+template<> struct unpacket_traits<Packet2cf> { typedef std::complex<float> type; enum {size=2, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet2cf half; typedef Packet4f as_real; };
template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>& from)
{
@@ -80,16 +127,35 @@ template<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<
template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { pstore((float*)to, from.v); }
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { pstoreu((float*)to, from.v); }
+EIGEN_STRONG_INLINE Packet2cf pload2(const std::complex<float>& from0, const std::complex<float>& from1)
+{
+ Packet4f res0, res1;
+#ifdef __VSX__
+ __asm__ ("lxsdx %x0,%y1" : "=wa" (res0) : "Z" (from0));
+ __asm__ ("lxsdx %x0,%y1" : "=wa" (res1) : "Z" (from1));
+#ifdef _BIG_ENDIAN
+ __asm__ ("xxpermdi %x0, %x1, %x2, 0" : "=wa" (res0) : "wa" (res0), "wa" (res1));
+#else
+ __asm__ ("xxpermdi %x0, %x2, %x1, 0" : "=wa" (res0) : "wa" (res0), "wa" (res1));
+#endif
+#else
+ *reinterpret_cast<std::complex<float> *>(&res0) = from0;
+ *reinterpret_cast<std::complex<float> *>(&res1) = from1;
+ res0 = vec_perm(res0, res1, p16uc_TRANSPOSE64_HI);
+#endif
+ return Packet2cf(res0);
+}
+
template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, Index stride)
{
- std::complex<float> EIGEN_ALIGN16 af[2];
+ EIGEN_ALIGN16 std::complex<float> af[2];
af[0] = from[0*stride];
af[1] = from[1*stride];
return pload<Packet2cf>(af);
}
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, Index stride)
{
- std::complex<float> EIGEN_ALIGN16 af[2];
+ EIGEN_ALIGN16 std::complex<float> af[2];
pstore<std::complex<float> >((std::complex<float> *) af, from);
to[0*stride] = af[0];
to[1*stride] = af[1];
@@ -100,25 +166,6 @@ template<> EIGEN_STRONG_INLINE Packet2cf psub<Packet2cf>(const Packet2cf& a, con
template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) { return Packet2cf(pnegate(a.v)); }
template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a) { return Packet2cf(pxor<Packet4f>(a.v, reinterpret_cast<Packet4f>(p4ui_CONJ_XOR))); }
-template<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
-{
- Packet4f v1, v2;
-
- // Permute and multiply the real parts of a and b
- v1 = vec_perm(a.v, a.v, p16uc_PSET32_WODD);
- // Get the imaginary parts of a
- v2 = vec_perm(a.v, a.v, p16uc_PSET32_WEVEN);
- // multiply a_re * b
- v1 = vec_madd(v1, b.v, p4f_ZERO);
- // multiply a_im * b and get the conjugate result
- v2 = vec_madd(v2, b.v, p4f_ZERO);
- v2 = reinterpret_cast<Packet4f>(pxor(v2, reinterpret_cast<Packet4f>(p4ui_CONJ_XOR)));
- // permute back to a proper order
- v2 = vec_perm(v2, v2, p16uc_COMPLEX32_REV);
-
- return Packet2cf(padd<Packet4f>(v1, v2));
-}
-
template<> EIGEN_STRONG_INLINE Packet2cf pand <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pand<Packet4f>(a.v, b.v)); }
template<> EIGEN_STRONG_INLINE Packet2cf por <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(por<Packet4f>(a.v, b.v)); }
template<> EIGEN_STRONG_INLINE Packet2cf pxor <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pxor<Packet4f>(a.v, b.v)); }
@@ -128,7 +175,7 @@ template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::co
template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet2cf>(const Packet2cf& a)
{
- std::complex<float> EIGEN_ALIGN16 res[2];
+ EIGEN_ALIGN16 std::complex<float> res[2];
pstore((float *)&res, a.v);
return res[0];
@@ -149,22 +196,6 @@ template<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet2cf>(const Packe
return pfirst<Packet2cf>(Packet2cf(b));
}
-template<> EIGEN_STRONG_INLINE Packet2cf preduxp<Packet2cf>(const Packet2cf* vecs)
-{
- Packet4f b1, b2;
-#ifdef _BIG_ENDIAN
- b1 = vec_sld(vecs[0].v, vecs[1].v, 8);
- b2 = vec_sld(vecs[1].v, vecs[0].v, 8);
-#else
- b1 = vec_sld(vecs[1].v, vecs[0].v, 8);
- b2 = vec_sld(vecs[0].v, vecs[1].v, 8);
-#endif
- b2 = vec_sld(b2, b2, 8);
- b2 = padd<Packet4f>(b1, b2);
-
- return Packet2cf(b2);
-}
-
template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const Packet2cf& a)
{
Packet4f b;
@@ -175,61 +206,12 @@ template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const P
return pfirst<Packet2cf>(prod);
}
-template<int Offset>
-struct palign_impl<Offset,Packet2cf>
-{
- static EIGEN_STRONG_INLINE void run(Packet2cf& first, const Packet2cf& second)
- {
- if (Offset==1)
- {
-#ifdef _BIG_ENDIAN
- first.v = vec_sld(first.v, second.v, 8);
-#else
- first.v = vec_sld(second.v, first.v, 8);
-#endif
- }
- }
-};
-
-template<> struct conj_helper<Packet2cf, Packet2cf, false,true>
-{
- EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
- { return padd(pmul(x,y),c); }
-
- EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
- {
- return internal::pmul(a, pconj(b));
- }
-};
-
-template<> struct conj_helper<Packet2cf, Packet2cf, true,false>
-{
- EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
- { return padd(pmul(x,y),c); }
-
- EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
- {
- return internal::pmul(pconj(a), b);
- }
-};
-
-template<> struct conj_helper<Packet2cf, Packet2cf, true,true>
-{
- EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
- { return padd(pmul(x,y),c); }
-
- EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
- {
- return pconj(internal::pmul(a, b));
- }
-};
-
EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf,Packet4f)
template<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
{
// TODO optimize it for AltiVec
- Packet2cf res = conj_helper<Packet2cf,Packet2cf,false,true>().pmul(a, b);
+ Packet2cf res = pmul(a, pconj(b));
Packet4f s = pmul<Packet4f>(b.v, b.v);
return Packet2cf(pdiv(res.v, padd<Packet4f>(s, vec_perm(s, s, p16uc_COMPLEX32_REV))));
}
@@ -246,6 +228,11 @@ EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet2cf,2>& kernel)
kernel.packet[0].v = tmp;
}
+template<> EIGEN_STRONG_INLINE Packet2cf pcmp_eq(const Packet2cf& a, const Packet2cf& b) {
+ Packet4f eq = reinterpret_cast<Packet4f>(vec_cmpeq(a.v,b.v));
+ return Packet2cf(vec_and(eq, vec_perm(eq, eq, p16uc_COMPLEX32_REV)));
+}
+
#ifdef __VSX__
template<> EIGEN_STRONG_INLINE Packet2cf pblend(const Selector<2>& ifPacket, const Packet2cf& thenPacket, const Packet2cf& elsePacket) {
Packet2cf result;
@@ -254,12 +241,62 @@ template<> EIGEN_STRONG_INLINE Packet2cf pblend(const Selector<2>& ifPacket, con
}
#endif
+template<> EIGEN_STRONG_INLINE Packet2cf psqrt<Packet2cf>(const Packet2cf& a)
+{
+ return psqrt_complex<Packet2cf>(a);
+}
+
//---------- double ----------
#ifdef __VSX__
struct Packet1cd
{
EIGEN_STRONG_INLINE Packet1cd() {}
EIGEN_STRONG_INLINE explicit Packet1cd(const Packet2d& a) : v(a) {}
+
+ EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b)
+ {
+ Packet2d a_re, a_im, v1, v2;
+
+ // Permute and multiply the real parts of a and b
+ a_re = vec_perm(a.v, a.v, p16uc_PSET64_HI);
+ // Get the imaginary parts of a
+ a_im = vec_perm(a.v, a.v, p16uc_PSET64_LO);
+ // multiply a_re * b
+ v1 = vec_madd(a_re, b.v, p2d_ZERO);
+ // multiply a_im * b and get the conjugate result
+ v2 = vec_madd(a_im, b.v, p2d_ZERO);
+ v2 = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4ui>(v2), reinterpret_cast<Packet4ui>(v2), 8));
+ v2 = pxor(v2, reinterpret_cast<Packet2d>(p2ul_CONJ_XOR1));
+
+ return Packet1cd(padd<Packet2d>(v1, v2));
+ }
+
+ EIGEN_STRONG_INLINE Packet1cd& operator*=(const Packet1cd& b) {
+ v = pmul(Packet1cd(*this), b).v;
+ return *this;
+ }
+ EIGEN_STRONG_INLINE Packet1cd operator*(const Packet1cd& b) const {
+ return Packet1cd(*this) *= b;
+ }
+
+ EIGEN_STRONG_INLINE Packet1cd& operator+=(const Packet1cd& b) {
+ v = padd(v, b.v);
+ return *this;
+ }
+ EIGEN_STRONG_INLINE Packet1cd operator+(const Packet1cd& b) const {
+ return Packet1cd(*this) += b;
+ }
+ EIGEN_STRONG_INLINE Packet1cd& operator-=(const Packet1cd& b) {
+ v = psub(v, b.v);
+ return *this;
+ }
+ EIGEN_STRONG_INLINE Packet1cd operator-(const Packet1cd& b) const {
+ return Packet1cd(*this) -= b;
+ }
+ EIGEN_STRONG_INLINE Packet1cd operator-(void) const {
+ return Packet1cd(-v);
+ }
+
Packet2d v;
};
@@ -267,6 +304,7 @@ template<> struct packet_traits<std::complex<double> > : default_packet_traits
{
typedef Packet1cd type;
typedef Packet1cd half;
+ typedef Packet2d as_real;
enum {
Vectorizable = 1,
AlignedOnScalar = 0,
@@ -286,7 +324,7 @@ template<> struct packet_traits<std::complex<double> > : default_packet_traits
};
};
-template<> struct unpacket_traits<Packet1cd> { typedef std::complex<double> type; enum {size=1, alignment=Aligned16}; typedef Packet1cd half; };
+template<> struct unpacket_traits<Packet1cd> { typedef std::complex<double> type; enum {size=1, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet1cd half; typedef Packet2d as_real; };
template<> EIGEN_STRONG_INLINE Packet1cd pload <Packet1cd>(const std::complex<double>* from) { return Packet1cd(pload<Packet2d>((const double*)from)); }
template<> EIGEN_STRONG_INLINE Packet1cd ploadu<Packet1cd>(const std::complex<double>* from) { return Packet1cd(ploadu<Packet2d>((const double*)from)); }
@@ -296,19 +334,13 @@ template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<
template<> EIGEN_STRONG_INLINE Packet1cd pset1<Packet1cd>(const std::complex<double>& from)
{ /* here we really have to use unaligned loads :( */ return ploadu<Packet1cd>(&from); }
-template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Packet1cd>(const std::complex<double>* from, Index stride)
+template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Packet1cd>(const std::complex<double>* from, Index)
{
- std::complex<double> EIGEN_ALIGN16 af[2];
- af[0] = from[0*stride];
- af[1] = from[1*stride];
- return pload<Packet1cd>(af);
+ return pload<Packet1cd>(from);
}
-template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet1cd>(std::complex<double>* to, const Packet1cd& from, Index stride)
+template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet1cd>(std::complex<double>* to, const Packet1cd& from, Index)
{
- std::complex<double> EIGEN_ALIGN16 af[2];
- pstore<std::complex<double> >(af, from);
- to[0*stride] = af[0];
- to[1*stride] = af[1];
+ pstore<std::complex<double> >(to, from);
}
template<> EIGEN_STRONG_INLINE Packet1cd padd<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(a.v + b.v); }
@@ -316,24 +348,6 @@ template<> EIGEN_STRONG_INLINE Packet1cd psub<Packet1cd>(const Packet1cd& a, con
template<> EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a) { return Packet1cd(pnegate(Packet2d(a.v))); }
template<> EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a) { return Packet1cd(pxor(a.v, reinterpret_cast<Packet2d>(p2ul_CONJ_XOR2))); }
-template<> EIGEN_STRONG_INLINE Packet1cd pmul<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
-{
- Packet2d a_re, a_im, v1, v2;
-
- // Permute and multiply the real parts of a and b
- a_re = vec_perm(a.v, a.v, p16uc_PSET64_HI);
- // Get the imaginary parts of a
- a_im = vec_perm(a.v, a.v, p16uc_PSET64_LO);
- // multiply a_re * b
- v1 = vec_madd(a_re, b.v, p2d_ZERO);
- // multiply a_im * b and get the conjugate result
- v2 = vec_madd(a_im, b.v, p2d_ZERO);
- v2 = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4ui>(v2), reinterpret_cast<Packet4ui>(v2), 8));
- v2 = pxor(v2, reinterpret_cast<Packet2d>(p2ul_CONJ_XOR1));
-
- return Packet1cd(padd<Packet2d>(v1, v2));
-}
-
template<> EIGEN_STRONG_INLINE Packet1cd pand <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(pand(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet1cd por <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(por(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet1cd pxor <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(pxor(a.v,b.v)); }
@@ -345,7 +359,7 @@ template<> EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::c
template<> EIGEN_STRONG_INLINE std::complex<double> pfirst<Packet1cd>(const Packet1cd& a)
{
- std::complex<double> EIGEN_ALIGN16 res[2];
+ EIGEN_ALIGN16 std::complex<double> res[2];
pstore<std::complex<double> >(res, a);
return res[0];
@@ -354,59 +368,15 @@ template<> EIGEN_STRONG_INLINE std::complex<double> pfirst<Packet1cd>(const Pac
template<> EIGEN_STRONG_INLINE Packet1cd preverse(const Packet1cd& a) { return a; }
template<> EIGEN_STRONG_INLINE std::complex<double> predux<Packet1cd>(const Packet1cd& a) { return pfirst(a); }
-template<> EIGEN_STRONG_INLINE Packet1cd preduxp<Packet1cd>(const Packet1cd* vecs) { return vecs[0]; }
template<> EIGEN_STRONG_INLINE std::complex<double> predux_mul<Packet1cd>(const Packet1cd& a) { return pfirst(a); }
-template<int Offset>
-struct palign_impl<Offset,Packet1cd>
-{
- static EIGEN_STRONG_INLINE void run(Packet1cd& /*first*/, const Packet1cd& /*second*/)
- {
- // FIXME is it sure we never have to align a Packet1cd?
- // Even though a std::complex<double> has 16 bytes, it is not necessarily aligned on a 16 bytes boundary...
- }
-};
-
-template<> struct conj_helper<Packet1cd, Packet1cd, false,true>
-{
- EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const
- { return padd(pmul(x,y),c); }
-
- EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
- {
- return internal::pmul(a, pconj(b));
- }
-};
-
-template<> struct conj_helper<Packet1cd, Packet1cd, true,false>
-{
- EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const
- { return padd(pmul(x,y),c); }
-
- EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
- {
- return internal::pmul(pconj(a), b);
- }
-};
-
-template<> struct conj_helper<Packet1cd, Packet1cd, true,true>
-{
- EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const
- { return padd(pmul(x,y),c); }
-
- EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
- {
- return pconj(internal::pmul(a, b));
- }
-};
-
EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cd,Packet2d)
template<> EIGEN_STRONG_INLINE Packet1cd pdiv<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
{
// TODO optimize it for AltiVec
- Packet1cd res = conj_helper<Packet1cd,Packet1cd,false,true>().pmul(a,b);
+ Packet1cd res = pmul(a,pconj(b));
Packet2d s = pmul<Packet2d>(b.v, b.v);
return Packet1cd(pdiv(res.v, padd<Packet2d>(s, vec_perm(s, s, p16uc_REVERSE64))));
}
@@ -422,6 +392,23 @@ EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet1cd,2>& kernel)
kernel.packet[1].v = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_LO);
kernel.packet[0].v = tmp;
}
+
+template<> EIGEN_STRONG_INLINE Packet1cd pcmp_eq(const Packet1cd& a, const Packet1cd& b) {
+ // Compare real and imaginary parts of a and b to get the mask vector:
+ // [re(a)==re(b), im(a)==im(b)]
+ Packet2d eq = reinterpret_cast<Packet2d>(vec_cmpeq(a.v,b.v));
+ // Swap real/imag elements in the mask in to get:
+ // [im(a)==im(b), re(a)==re(b)]
+ Packet2d eq_swapped = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4ui>(eq), reinterpret_cast<Packet4ui>(eq), 8));
+ // Return re(a)==re(b) & im(a)==im(b) by computing bitwise AND of eq and eq_swapped
+ return Packet1cd(vec_and(eq, eq_swapped));
+}
+
+template<> EIGEN_STRONG_INLINE Packet1cd psqrt<Packet1cd>(const Packet1cd& a)
+{
+ return psqrt_complex<Packet1cd>(a);
+}
+
#endif // __VSX__
} // end namespace internal
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/AltiVec/MathFunctions.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/AltiVec/MathFunctions.h
index c5e4bede7..3a7a32936 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/arch/AltiVec/MathFunctions.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/AltiVec/MathFunctions.h
@@ -9,10 +9,6 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-/* The sin, cos, exp, and log functions of this file come from
- * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/
- */
-
#ifndef EIGEN_MATH_FUNCTIONS_ALTIVEC_H
#define EIGEN_MATH_FUNCTIONS_ALTIVEC_H
@@ -20,180 +16,28 @@ namespace Eigen {
namespace internal {
-static _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
-static _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
-static _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f);
-static _EIGEN_DECLARE_CONST_Packet4i(23, 23);
-
-static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(inv_mant_mask, ~0x7f800000);
-
-/* the smallest non denormalized float number */
-static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(min_norm_pos, 0x00800000);
-static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_inf, 0xff800000); // -1.f/0.f
-static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_nan, 0xffffffff);
-
-/* natural logarithm computed for 4 simultaneous float
- return NaN for x <= 0
-*/
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_SQRTHF, 0.707106781186547524f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p0, 7.0376836292E-2f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p1, - 1.1514610310E-1f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p2, 1.1676998740E-1f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p3, - 1.2420140846E-1f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p4, + 1.4249322787E-1f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p5, - 1.6668057665E-1f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p6, + 2.0000714765E-1f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p7, - 2.4999993993E-1f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p8, + 3.3333331174E-1f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q1, -2.12194440e-4f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q2, 0.693359375f);
-
-static _EIGEN_DECLARE_CONST_Packet4f(exp_hi, 88.3762626647950f);
-static _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -88.3762626647949f);
-
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f);
-
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500E-4f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507E-3f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073E-3f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894E-2f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f);
-static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f);
-
-#ifdef __VSX__
-static _EIGEN_DECLARE_CONST_Packet2d(1 , 1.0);
-static _EIGEN_DECLARE_CONST_Packet2d(2 , 2.0);
-static _EIGEN_DECLARE_CONST_Packet2d(half, 0.5);
-
-static _EIGEN_DECLARE_CONST_Packet2d(exp_hi, 709.437);
-static _EIGEN_DECLARE_CONST_Packet2d(exp_lo, -709.436139303);
-
-static _EIGEN_DECLARE_CONST_Packet2d(cephes_LOG2EF, 1.4426950408889634073599);
-
-static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p0, 1.26177193074810590878e-4);
-static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p1, 3.02994407707441961300e-2);
-static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p2, 9.99999999999999999910e-1);
-
-static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q0, 3.00198505138664455042e-6);
-static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q1, 2.52448340349684104192e-3);
-static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q2, 2.27265548208155028766e-1);
-static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q3, 2.00000000000000000009e0);
-
-static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C1, 0.693145751953125);
-static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6);
-
-#ifdef __POWER8_VECTOR__
-static Packet2l p2l_1023 = { 1023, 1023 };
-static Packet2ul p2ul_52 = { 52, 52 };
-#endif
-
-#endif
-
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet4f plog<Packet4f>(const Packet4f& _x)
{
- Packet4f x = _x;
-
- Packet4i emm0;
-
- /* isvalid_mask is 0 if x < 0 or x is NaN. */
- Packet4ui isvalid_mask = reinterpret_cast<Packet4ui>(vec_cmpge(x, p4f_ZERO));
- Packet4ui iszero_mask = reinterpret_cast<Packet4ui>(vec_cmpeq(x, p4f_ZERO));
-
- x = pmax(x, p4f_min_norm_pos); /* cut off denormalized stuff */
- emm0 = vec_sr(reinterpret_cast<Packet4i>(x),
- reinterpret_cast<Packet4ui>(p4i_23));
-
- /* keep only the fractional part */
- x = pand(x, p4f_inv_mant_mask);
- x = por(x, p4f_half);
-
- emm0 = psub(emm0, p4i_0x7f);
- Packet4f e = padd(vec_ctf(emm0, 0), p4f_1);
-
- /* part2:
- if( x < SQRTHF ) {
- e -= 1;
- x = x + x - 1.0;
- } else { x = x - 1.0; }
- */
- Packet4f mask = reinterpret_cast<Packet4f>(vec_cmplt(x, p4f_cephes_SQRTHF));
- Packet4f tmp = pand(x, mask);
- x = psub(x, p4f_1);
- e = psub(e, pand(p4f_1, mask));
- x = padd(x, tmp);
-
- Packet4f x2 = pmul(x,x);
- Packet4f x3 = pmul(x2,x);
-
- Packet4f y, y1, y2;
- y = pmadd(p4f_cephes_log_p0, x, p4f_cephes_log_p1);
- y1 = pmadd(p4f_cephes_log_p3, x, p4f_cephes_log_p4);
- y2 = pmadd(p4f_cephes_log_p6, x, p4f_cephes_log_p7);
- y = pmadd(y , x, p4f_cephes_log_p2);
- y1 = pmadd(y1, x, p4f_cephes_log_p5);
- y2 = pmadd(y2, x, p4f_cephes_log_p8);
- y = pmadd(y, x3, y1);
- y = pmadd(y, x3, y2);
- y = pmul(y, x3);
-
- y1 = pmul(e, p4f_cephes_log_q1);
- tmp = pmul(x2, p4f_half);
- y = padd(y, y1);
- x = psub(x, tmp);
- y2 = pmul(e, p4f_cephes_log_q2);
- x = padd(x, y);
- x = padd(x, y2);
- // negative arg will be NAN, 0 will be -INF
- x = vec_sel(x, p4f_minus_inf, iszero_mask);
- x = vec_sel(p4f_minus_nan, x, isvalid_mask);
- return x;
+ return plog_float(_x);
}
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet4f pexp<Packet4f>(const Packet4f& _x)
{
- Packet4f x = _x;
-
- Packet4f tmp, fx;
- Packet4i emm0;
-
- // clamp x
- x = pmax(pmin(x, p4f_exp_hi), p4f_exp_lo);
-
- // express exp(x) as exp(g + n*log(2))
- fx = pmadd(x, p4f_cephes_LOG2EF, p4f_half);
-
- fx = pfloor(fx);
-
- tmp = pmul(fx, p4f_cephes_exp_C1);
- Packet4f z = pmul(fx, p4f_cephes_exp_C2);
- x = psub(x, tmp);
- x = psub(x, z);
-
- z = pmul(x,x);
-
- Packet4f y = p4f_cephes_exp_p0;
- y = pmadd(y, x, p4f_cephes_exp_p1);
- y = pmadd(y, x, p4f_cephes_exp_p2);
- y = pmadd(y, x, p4f_cephes_exp_p3);
- y = pmadd(y, x, p4f_cephes_exp_p4);
- y = pmadd(y, x, p4f_cephes_exp_p5);
- y = pmadd(y, z, x);
- y = padd(y, p4f_1);
+ return pexp_float(_x);
+}
- // build 2^n
- emm0 = vec_cts(fx, 0);
- emm0 = vec_add(emm0, p4i_0x7f);
- emm0 = vec_sl(emm0, reinterpret_cast<Packet4ui>(p4i_23));
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet4f psin<Packet4f>(const Packet4f& _x)
+{
+ return psin_float(_x);
+}
- // Altivec's max & min operators just drop silent NaNs. Check NaNs in
- // inputs and return them unmodified.
- Packet4ui isnumber_mask = reinterpret_cast<Packet4ui>(vec_cmpeq(_x, _x));
- return vec_sel(_x, pmax(pmul(y, reinterpret_cast<Packet4f>(emm0)), _x),
- isnumber_mask);
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet4f pcos<Packet4f>(const Packet4f& _x)
+{
+ return pcos_float(_x);
}
#ifndef EIGEN_COMP_CLANG
@@ -225,95 +69,19 @@ Packet2d psqrt<Packet2d>(const Packet2d& x)
return vec_sqrt(x);
}
-// VSX support varies between different compilers and even different
-// versions of the same compiler. For gcc version >= 4.9.3, we can use
-// vec_cts to efficiently convert Packet2d to Packet2l. Otherwise, use
-// a slow version that works with older compilers.
-// Update: apparently vec_cts/vec_ctf intrinsics for 64-bit doubles
-// are buggy, https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70963
-static inline Packet2l ConvertToPacket2l(const Packet2d& x) {
-#if EIGEN_GNUC_AT_LEAST(5, 4) || \
- (EIGEN_GNUC_AT(6, 1) && __GNUC_PATCHLEVEL__ >= 1)
- return vec_cts(x, 0); // TODO: check clang version.
-#else
- double tmp[2];
- memcpy(tmp, &x, sizeof(tmp));
- Packet2l l = { static_cast<long long>(tmp[0]),
- static_cast<long long>(tmp[1]) };
- return l;
-#endif
-}
-
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet2d pexp<Packet2d>(const Packet2d& _x)
{
- Packet2d x = _x;
-
- Packet2d tmp, fx;
- Packet2l emm0;
-
- // clamp x
- x = pmax(pmin(x, p2d_exp_hi), p2d_exp_lo);
-
- /* express exp(x) as exp(g + n*log(2)) */
- fx = pmadd(x, p2d_cephes_LOG2EF, p2d_half);
-
- fx = pfloor(fx);
-
- tmp = pmul(fx, p2d_cephes_exp_C1);
- Packet2d z = pmul(fx, p2d_cephes_exp_C2);
- x = psub(x, tmp);
- x = psub(x, z);
-
- Packet2d x2 = pmul(x,x);
-
- Packet2d px = p2d_cephes_exp_p0;
- px = pmadd(px, x2, p2d_cephes_exp_p1);
- px = pmadd(px, x2, p2d_cephes_exp_p2);
- px = pmul (px, x);
-
- Packet2d qx = p2d_cephes_exp_q0;
- qx = pmadd(qx, x2, p2d_cephes_exp_q1);
- qx = pmadd(qx, x2, p2d_cephes_exp_q2);
- qx = pmadd(qx, x2, p2d_cephes_exp_q3);
-
- x = pdiv(px,psub(qx,px));
- x = pmadd(p2d_2,x,p2d_1);
-
- // build 2^n
- emm0 = ConvertToPacket2l(fx);
-
-#ifdef __POWER8_VECTOR__
- emm0 = vec_add(emm0, p2l_1023);
- emm0 = vec_sl(emm0, p2ul_52);
-#else
- // Code is a bit complex for POWER7. There is actually a
- // vec_xxsldi intrinsic but it is not supported by some gcc versions.
- // So we shift (52-32) bits and do a word swap with zeros.
- _EIGEN_DECLARE_CONST_Packet4i(1023, 1023);
- _EIGEN_DECLARE_CONST_Packet4i(20, 20); // 52 - 32
-
- Packet4i emm04i = reinterpret_cast<Packet4i>(emm0);
- emm04i = vec_add(emm04i, p4i_1023);
- emm04i = vec_sl(emm04i, reinterpret_cast<Packet4ui>(p4i_20));
- static const Packet16uc perm = {
- 0x14, 0x15, 0x16, 0x17, 0x00, 0x01, 0x02, 0x03,
- 0x1c, 0x1d, 0x1e, 0x1f, 0x08, 0x09, 0x0a, 0x0b };
-#ifdef _BIG_ENDIAN
- emm0 = reinterpret_cast<Packet2l>(vec_perm(p4i_ZERO, emm04i, perm));
-#else
- emm0 = reinterpret_cast<Packet2l>(vec_perm(emm04i, p4i_ZERO, perm));
-#endif
-
+ return pexp_double(_x);
+}
#endif
- // Altivec's max & min operators just drop silent NaNs. Check NaNs in
- // inputs and return them unmodified.
- Packet2ul isnumber_mask = reinterpret_cast<Packet2ul>(vec_cmpeq(_x, _x));
- return vec_sel(_x, pmax(pmul(x, reinterpret_cast<Packet2d>(emm0)), _x),
- isnumber_mask);
+// Hyperbolic Tangent function.
+template <>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f
+ptanh<Packet4f>(const Packet4f& x) {
+ return internal::generic_fast_tanh_float(x);
}
-#endif
} // end namespace internal
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/AltiVec/MatrixProduct.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/AltiVec/MatrixProduct.h
new file mode 100644
index 000000000..8feb88ea7
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/AltiVec/MatrixProduct.h
@@ -0,0 +1,2765 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2020 Everton Constantino (everton.constantino@ibm.com)
+// Copyright (C) 2021 Chip Kerchner (chip.kerchner@ibm.com)
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MATRIX_PRODUCT_ALTIVEC_H
+#define EIGEN_MATRIX_PRODUCT_ALTIVEC_H
+
+#ifndef EIGEN_ALTIVEC_USE_CUSTOM_PACK
+#define EIGEN_ALTIVEC_USE_CUSTOM_PACK 1
+#endif
+
+#include "MatrixProductCommon.h"
+
+// Since LLVM doesn't support dynamic dispatching, force either always MMA or VSX
+#if EIGEN_COMP_LLVM
+#if !defined(EIGEN_ALTIVEC_DISABLE_MMA) && !defined(EIGEN_ALTIVEC_MMA_ONLY)
+#ifdef __MMA__
+#define EIGEN_ALTIVEC_MMA_ONLY
+#else
+#define EIGEN_ALTIVEC_DISABLE_MMA
+#endif
+#endif
+#endif
+
+#ifdef __has_builtin
+#if __has_builtin(__builtin_mma_assemble_acc)
+ #define ALTIVEC_MMA_SUPPORT
+#endif
+#endif
+
+#if defined(ALTIVEC_MMA_SUPPORT) && !defined(EIGEN_ALTIVEC_DISABLE_MMA)
+ #include "MatrixProductMMA.h"
+#endif
+
+/**************************************************************************************************
+ * TODO *
+ * - Check StorageOrder on dhs_pack (the innermost second loop seems unvectorized when it could). *
+ * - Check the possibility of transposing as GETREAL and GETIMAG when needed. *
+ **************************************************************************************************/
+namespace Eigen {
+
+namespace internal {
+
+/**************************
+ * Constants and typedefs *
+ **************************/
+template<typename Scalar>
+struct quad_traits
+{
+ typedef typename packet_traits<Scalar>::type vectortype;
+ typedef PacketBlock<vectortype,4> type;
+ typedef vectortype rhstype;
+ enum
+ {
+ vectorsize = packet_traits<Scalar>::size,
+ size = 4,
+ rows = 4
+ };
+};
+
+template<>
+struct quad_traits<double>
+{
+ typedef Packet2d vectortype;
+ typedef PacketBlock<vectortype,4> type;
+ typedef PacketBlock<Packet2d,2> rhstype;
+ enum
+ {
+ vectorsize = packet_traits<double>::size,
+ size = 2,
+ rows = 4
+ };
+};
+
+// MatrixProduct decomposes real/imaginary vectors into a real vector and an imaginary vector, this turned out
+// to be faster than Eigen's usual approach of having real/imaginary pairs on a single vector. This constants then
+// are responsible to extract from convert between Eigen's and MatrixProduct approach.
+
+const static Packet16uc p16uc_GETREAL32 = { 0, 1, 2, 3,
+ 8, 9, 10, 11,
+ 16, 17, 18, 19,
+ 24, 25, 26, 27};
+
+const static Packet16uc p16uc_GETIMAG32 = { 4, 5, 6, 7,
+ 12, 13, 14, 15,
+ 20, 21, 22, 23,
+ 28, 29, 30, 31};
+const static Packet16uc p16uc_GETREAL64 = { 0, 1, 2, 3, 4, 5, 6, 7,
+ 16, 17, 18, 19, 20, 21, 22, 23};
+
+//[a,ai],[b,bi] = [ai,bi]
+const static Packet16uc p16uc_GETIMAG64 = { 8, 9, 10, 11, 12, 13, 14, 15,
+ 24, 25, 26, 27, 28, 29, 30, 31};
+
+/*********************************************
+ * Single precision real and complex packing *
+ * *******************************************/
+
+/**
+ * Symm packing is related to packing of symmetric adjoint blocks, as expected the packing leaves
+ * the diagonal real, whatever is below it is copied from the respective upper diagonal element and
+ * conjugated. There's no PanelMode available for symm packing.
+ *
+ * Packing in general is supposed to leave the lhs block and the rhs block easy to be read by gemm using
+ * its respective rank-update instructions. The float32/64 versions are different because at this moment
+ * the size of the accumulator is fixed at 512-bits so you can't have a 4x4 accumulator of 64-bit elements.
+ *
+ * As mentioned earlier MatrixProduct breaks complex numbers into a real vector and a complex vector so packing has
+ * to take that into account, at the moment, we run pack the real part and then the imaginary part, this is the main
+ * reason why packing for complex is broken down into several different parts, also the reason why we endup having a
+ * float32/64 and complex float32/64 version.
+ **/
+template<typename Scalar, typename Index, int StorageOrder>
+EIGEN_ALWAYS_INLINE std::complex<Scalar> getAdjointVal(Index i, Index j, const_blas_data_mapper<std::complex<Scalar>, Index, StorageOrder>& dt)
+{
+ std::complex<Scalar> v;
+ if(i < j)
+ {
+ v.real( dt(j,i).real());
+ v.imag(-dt(j,i).imag());
+ } else if(i > j)
+ {
+ v.real( dt(i,j).real());
+ v.imag( dt(i,j).imag());
+ } else {
+ v.real( dt(i,j).real());
+ v.imag((Scalar)0.0);
+ }
+ return v;
+}
+
+template<typename Scalar, typename Index, int StorageOrder, int N>
+EIGEN_STRONG_INLINE void symm_pack_complex_rhs_helper(std::complex<Scalar>* blockB, const std::complex<Scalar>* _rhs, Index rhsStride, Index rows, Index cols, Index k2)
+{
+ const Index depth = k2 + rows;
+ const_blas_data_mapper<std::complex<Scalar>, Index, StorageOrder> rhs(_rhs, rhsStride);
+ const Index vectorSize = N*quad_traits<Scalar>::vectorsize;
+ const Index vectorDelta = vectorSize * rows;
+ Scalar* blockBf = reinterpret_cast<Scalar *>(blockB);
+
+ Index rir = 0, rii, j = 0;
+ for(; j + vectorSize <= cols; j+=vectorSize)
+ {
+ rii = rir + vectorDelta;
+
+ for(Index i = k2; i < depth; i++)
+ {
+ for(Index k = 0; k < vectorSize; k++)
+ {
+ std::complex<Scalar> v = getAdjointVal<Scalar, Index, StorageOrder>(i, j + k, rhs);
+
+ blockBf[rir + k] = v.real();
+ blockBf[rii + k] = v.imag();
+ }
+ rir += vectorSize;
+ rii += vectorSize;
+ }
+
+ rir += vectorDelta;
+ }
+
+ for(; j < cols; j++)
+ {
+ rii = rir + rows;
+
+ for(Index i = k2; i < depth; i++)
+ {
+ std::complex<Scalar> v = getAdjointVal<Scalar, Index, StorageOrder>(i, j, rhs);
+
+ blockBf[rir] = v.real();
+ blockBf[rii] = v.imag();
+
+ rir += 1;
+ rii += 1;
+ }
+
+ rir += rows;
+ }
+}
+
+template<typename Scalar, typename Index, int StorageOrder>
+EIGEN_STRONG_INLINE void symm_pack_complex_lhs_helper(std::complex<Scalar>* blockA, const std::complex<Scalar>* _lhs, Index lhsStride, Index cols, Index rows)
+{
+ const Index depth = cols;
+ const_blas_data_mapper<std::complex<Scalar>, Index, StorageOrder> lhs(_lhs, lhsStride);
+ const Index vectorSize = quad_traits<Scalar>::vectorsize;
+ const Index vectorDelta = vectorSize * depth;
+ Scalar* blockAf = (Scalar *)(blockA);
+
+ Index rir = 0, rii, j = 0;
+ for(; j + vectorSize <= rows; j+=vectorSize)
+ {
+ rii = rir + vectorDelta;
+
+ for(Index i = 0; i < depth; i++)
+ {
+ for(Index k = 0; k < vectorSize; k++)
+ {
+ std::complex<Scalar> v = getAdjointVal<Scalar, Index, StorageOrder>(j+k, i, lhs);
+
+ blockAf[rir + k] = v.real();
+ blockAf[rii + k] = v.imag();
+ }
+ rir += vectorSize;
+ rii += vectorSize;
+ }
+
+ rir += vectorDelta;
+ }
+
+ if (j < rows)
+ {
+ rii = rir + ((rows - j) * depth);
+
+ for(Index i = 0; i < depth; i++)
+ {
+ Index k = j;
+ for(; k < rows; k++)
+ {
+ std::complex<Scalar> v = getAdjointVal<Scalar, Index, StorageOrder>(k, i, lhs);
+
+ blockAf[rir] = v.real();
+ blockAf[rii] = v.imag();
+
+ rir += 1;
+ rii += 1;
+ }
+ }
+ }
+}
+
+template<typename Scalar, typename Index, int StorageOrder, int N>
+EIGEN_STRONG_INLINE void symm_pack_rhs_helper(Scalar* blockB, const Scalar* _rhs, Index rhsStride, Index rows, Index cols, Index k2)
+{
+ const Index depth = k2 + rows;
+ const_blas_data_mapper<Scalar, Index, StorageOrder> rhs(_rhs, rhsStride);
+ const Index vectorSize = quad_traits<Scalar>::vectorsize;
+
+ Index ri = 0, j = 0;
+ for(; j + N*vectorSize <= cols; j+=N*vectorSize)
+ {
+ Index i = k2;
+ for(; i < depth; i++)
+ {
+ for(Index k = 0; k < N*vectorSize; k++)
+ {
+ if(i <= j+k)
+ blockB[ri + k] = rhs(j+k, i);
+ else
+ blockB[ri + k] = rhs(i, j+k);
+ }
+ ri += N*vectorSize;
+ }
+ }
+
+ for(; j < cols; j++)
+ {
+ for(Index i = k2; i < depth; i++)
+ {
+ if(j <= i)
+ blockB[ri] = rhs(i, j);
+ else
+ blockB[ri] = rhs(j, i);
+ ri += 1;
+ }
+ }
+}
+
+template<typename Scalar, typename Index, int StorageOrder>
+EIGEN_STRONG_INLINE void symm_pack_lhs_helper(Scalar* blockA, const Scalar* _lhs, Index lhsStride, Index cols, Index rows)
+{
+ const Index depth = cols;
+ const_blas_data_mapper<Scalar, Index, StorageOrder> lhs(_lhs, lhsStride);
+ const Index vectorSize = quad_traits<Scalar>::vectorsize;
+
+ Index ri = 0, j = 0;
+ for(; j + vectorSize <= rows; j+=vectorSize)
+ {
+ Index i = 0;
+
+ for(; i < depth; i++)
+ {
+ for(Index k = 0; k < vectorSize; k++)
+ {
+ if(i <= j+k)
+ blockA[ri + k] = lhs(j+k, i);
+ else
+ blockA[ri + k] = lhs(i, j+k);
+ }
+ ri += vectorSize;
+ }
+ }
+
+ if (j < rows)
+ {
+ for(Index i = 0; i < depth; i++)
+ {
+ Index k = j;
+ for(; k < rows; k++)
+ {
+ if(i <= k)
+ blockA[ri] = lhs(k, i);
+ else
+ blockA[ri] = lhs(i, k);
+ ri += 1;
+ }
+ }
+ }
+}
+
+template<typename Index, int nr, int StorageOrder>
+struct symm_pack_rhs<std::complex<float>, Index, nr, StorageOrder>
+{
+ void operator()(std::complex<float>* blockB, const std::complex<float>* _rhs, Index rhsStride, Index rows, Index cols, Index k2)
+ {
+ symm_pack_complex_rhs_helper<float, Index, StorageOrder, 1>(blockB, _rhs, rhsStride, rows, cols, k2);
+ }
+};
+
+template<typename Index, int Pack1, int Pack2_dummy, int StorageOrder>
+struct symm_pack_lhs<std::complex<float>, Index, Pack1, Pack2_dummy, StorageOrder>
+{
+ void operator()(std::complex<float>* blockA, const std::complex<float>* _lhs, Index lhsStride, Index cols, Index rows)
+ {
+ symm_pack_complex_lhs_helper<float, Index, StorageOrder>(blockA, _lhs, lhsStride, cols, rows);
+ }
+};
+
+// *********** symm_pack std::complex<float64> ***********
+
+template<typename Index, int nr, int StorageOrder>
+struct symm_pack_rhs<std::complex<double>, Index, nr, StorageOrder>
+{
+ void operator()(std::complex<double>* blockB, const std::complex<double>* _rhs, Index rhsStride, Index rows, Index cols, Index k2)
+ {
+ symm_pack_complex_rhs_helper<double, Index, StorageOrder, 2>(blockB, _rhs, rhsStride, rows, cols, k2);
+ }
+};
+
+template<typename Index, int Pack1, int Pack2_dummy, int StorageOrder>
+struct symm_pack_lhs<std::complex<double>, Index, Pack1, Pack2_dummy, StorageOrder>
+{
+ void operator()(std::complex<double>* blockA, const std::complex<double>* _lhs, Index lhsStride, Index cols, Index rows)
+ {
+ symm_pack_complex_lhs_helper<double, Index, StorageOrder>(blockA, _lhs, lhsStride, cols, rows);
+ }
+};
+
+// *********** symm_pack float32 ***********
+template<typename Index, int nr, int StorageOrder>
+struct symm_pack_rhs<float, Index, nr, StorageOrder>
+{
+ void operator()(float* blockB, const float* _rhs, Index rhsStride, Index rows, Index cols, Index k2)
+ {
+ symm_pack_rhs_helper<float, Index, StorageOrder, 1>(blockB, _rhs, rhsStride, rows, cols, k2);
+ }
+};
+
+template<typename Index, int Pack1, int Pack2_dummy, int StorageOrder>
+struct symm_pack_lhs<float, Index, Pack1, Pack2_dummy, StorageOrder>
+{
+ void operator()(float* blockA, const float* _lhs, Index lhsStride, Index cols, Index rows)
+ {
+ symm_pack_lhs_helper<float, Index, StorageOrder>(blockA, _lhs, lhsStride, cols, rows);
+ }
+};
+
+// *********** symm_pack float64 ***********
+template<typename Index, int nr, int StorageOrder>
+struct symm_pack_rhs<double, Index, nr, StorageOrder>
+{
+ void operator()(double* blockB, const double* _rhs, Index rhsStride, Index rows, Index cols, Index k2)
+ {
+ symm_pack_rhs_helper<double, Index, StorageOrder, 2>(blockB, _rhs, rhsStride, rows, cols, k2);
+ }
+};
+
+template<typename Index, int Pack1, int Pack2_dummy, int StorageOrder>
+struct symm_pack_lhs<double, Index, Pack1, Pack2_dummy, StorageOrder>
+{
+ void operator()(double* blockA, const double* _lhs, Index lhsStride, Index cols, Index rows)
+ {
+ symm_pack_lhs_helper<double, Index, StorageOrder>(blockA, _lhs, lhsStride, cols, rows);
+ }
+};
+
+/**
+ * PanelMode
+ * Packing might be called several times before being multiplied by gebp_kernel, this happens because
+ * on special occasions it fills part of block with other parts of the matrix. Two variables control
+ * how PanelMode should behave: offset and stride. The idea is that those variables represent whatever
+ * is going to be the real offset and stride in the future and this is what you should obey. The process
+ * is to behave as you would with normal packing but leave the start of each part with the correct offset
+ * and the end as well respecting the real stride the block will have. Gebp is aware of both blocks stride
+ * and offset and behaves accordingly.
+ **/
+
+template<typename Scalar, typename Packet, typename Index, int N>
+EIGEN_ALWAYS_INLINE void storeBlock(Scalar* to, PacketBlock<Packet,N>& block)
+{
+ const Index size = 16 / sizeof(Scalar);
+ pstore<Scalar>(to + (0 * size), block.packet[0]);
+ pstore<Scalar>(to + (1 * size), block.packet[1]);
+ if (N > 2) {
+ pstore<Scalar>(to + (2 * size), block.packet[2]);
+ }
+ if (N > 3) {
+ pstore<Scalar>(to + (3 * size), block.packet[3]);
+ }
+}
+
+// General template for lhs & rhs complex packing.
+template<typename Scalar, typename Index, typename DataMapper, typename Packet, typename PacketC, int StorageOrder, bool Conjugate, bool PanelMode, bool UseLhs>
+struct dhs_cpack {
+ EIGEN_STRONG_INLINE void operator()(std::complex<Scalar>* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)
+ {
+ const Index vectorSize = quad_traits<Scalar>::vectorsize;
+ const Index vectorDelta = vectorSize * ((PanelMode) ? stride : depth);
+ Index rir = ((PanelMode) ? (vectorSize*offset) : 0), rii;
+ Scalar* blockAt = reinterpret_cast<Scalar *>(blockA);
+ Index j = 0;
+
+ for(; j + vectorSize <= rows; j+=vectorSize)
+ {
+ Index i = 0;
+
+ rii = rir + vectorDelta;
+
+ for(; i + vectorSize <= depth; i+=vectorSize)
+ {
+ PacketBlock<Packet,4> blockr, blocki;
+ PacketBlock<PacketC,8> cblock;
+
+ if (UseLhs) {
+ bload<DataMapper, PacketC, Index, 2, StorageOrder, true, 4>(cblock, lhs, j, i);
+ } else {
+ bload<DataMapper, PacketC, Index, 2, StorageOrder, true, 4>(cblock, lhs, i, j);
+ }
+
+ blockr.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[4].v, p16uc_GETREAL32);
+ blockr.packet[1] = vec_perm(cblock.packet[1].v, cblock.packet[5].v, p16uc_GETREAL32);
+ blockr.packet[2] = vec_perm(cblock.packet[2].v, cblock.packet[6].v, p16uc_GETREAL32);
+ blockr.packet[3] = vec_perm(cblock.packet[3].v, cblock.packet[7].v, p16uc_GETREAL32);
+
+ blocki.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[4].v, p16uc_GETIMAG32);
+ blocki.packet[1] = vec_perm(cblock.packet[1].v, cblock.packet[5].v, p16uc_GETIMAG32);
+ blocki.packet[2] = vec_perm(cblock.packet[2].v, cblock.packet[6].v, p16uc_GETIMAG32);
+ blocki.packet[3] = vec_perm(cblock.packet[3].v, cblock.packet[7].v, p16uc_GETIMAG32);
+
+ if(Conjugate)
+ {
+ blocki.packet[0] = -blocki.packet[0];
+ blocki.packet[1] = -blocki.packet[1];
+ blocki.packet[2] = -blocki.packet[2];
+ blocki.packet[3] = -blocki.packet[3];
+ }
+
+ if(((StorageOrder == RowMajor) && UseLhs) || (((StorageOrder == ColMajor) && !UseLhs)))
+ {
+ ptranspose(blockr);
+ ptranspose(blocki);
+ }
+
+ storeBlock<Scalar, Packet, Index, 4>(blockAt + rir, blockr);
+ storeBlock<Scalar, Packet, Index, 4>(blockAt + rii, blocki);
+
+ rir += 4*vectorSize;
+ rii += 4*vectorSize;
+ }
+ for(; i < depth; i++)
+ {
+ PacketBlock<Packet,1> blockr, blocki;
+ PacketBlock<PacketC,2> cblock;
+
+ if(((StorageOrder == ColMajor) && UseLhs) || (((StorageOrder == RowMajor) && !UseLhs)))
+ {
+ if (UseLhs) {
+ cblock.packet[0] = lhs.template loadPacket<PacketC>(j + 0, i);
+ cblock.packet[1] = lhs.template loadPacket<PacketC>(j + 2, i);
+ } else {
+ cblock.packet[0] = lhs.template loadPacket<PacketC>(i, j + 0);
+ cblock.packet[1] = lhs.template loadPacket<PacketC>(i, j + 2);
+ }
+ } else {
+ if (UseLhs) {
+ cblock.packet[0] = pload2(lhs(j + 0, i), lhs(j + 1, i));
+ cblock.packet[1] = pload2(lhs(j + 2, i), lhs(j + 3, i));
+ } else {
+ cblock.packet[0] = pload2(lhs(i, j + 0), lhs(i, j + 1));
+ cblock.packet[1] = pload2(lhs(i, j + 2), lhs(i, j + 3));
+ }
+ }
+
+ blockr.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[1].v, p16uc_GETREAL32);
+ blocki.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[1].v, p16uc_GETIMAG32);
+
+ if(Conjugate)
+ {
+ blocki.packet[0] = -blocki.packet[0];
+ }
+
+ pstore<Scalar>(blockAt + rir, blockr.packet[0]);
+ pstore<Scalar>(blockAt + rii, blocki.packet[0]);
+
+ rir += vectorSize;
+ rii += vectorSize;
+ }
+
+ rir += ((PanelMode) ? (vectorSize*(2*stride - depth)) : vectorDelta);
+ }
+
+ if (!UseLhs)
+ {
+ if(PanelMode) rir -= (offset*(vectorSize - 1));
+
+ for(; j < rows; j++)
+ {
+ rii = rir + ((PanelMode) ? stride : depth);
+
+ for(Index i = 0; i < depth; i++)
+ {
+ blockAt[rir] = lhs(i, j).real();
+
+ if(Conjugate)
+ blockAt[rii] = -lhs(i, j).imag();
+ else
+ blockAt[rii] = lhs(i, j).imag();
+
+ rir += 1;
+ rii += 1;
+ }
+
+ rir += ((PanelMode) ? (2*stride - depth) : depth);
+ }
+ } else {
+ if (j < rows)
+ {
+ if(PanelMode) rir += (offset*(rows - j - vectorSize));
+ rii = rir + (((PanelMode) ? stride : depth) * (rows - j));
+
+ for(Index i = 0; i < depth; i++)
+ {
+ Index k = j;
+ for(; k < rows; k++)
+ {
+ blockAt[rir] = lhs(k, i).real();
+
+ if(Conjugate)
+ blockAt[rii] = -lhs(k, i).imag();
+ else
+ blockAt[rii] = lhs(k, i).imag();
+
+ rir += 1;
+ rii += 1;
+ }
+ }
+ }
+ }
+ }
+};
+
+// General template for lhs & rhs packing.
+template<typename Scalar, typename Index, typename DataMapper, typename Packet, int StorageOrder, bool PanelMode, bool UseLhs>
+struct dhs_pack{
+ EIGEN_STRONG_INLINE void operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)
+ {
+ const Index vectorSize = quad_traits<Scalar>::vectorsize;
+ Index ri = 0, j = 0;
+
+ for(; j + vectorSize <= rows; j+=vectorSize)
+ {
+ Index i = 0;
+
+ if(PanelMode) ri += vectorSize*offset;
+
+ for(; i + vectorSize <= depth; i+=vectorSize)
+ {
+ PacketBlock<Packet,4> block;
+
+ if (UseLhs) {
+ bload<DataMapper, Packet, Index, 4, StorageOrder, false, 4>(block, lhs, j, i);
+ } else {
+ bload<DataMapper, Packet, Index, 4, StorageOrder, false, 4>(block, lhs, i, j);
+ }
+ if(((StorageOrder == RowMajor) && UseLhs) || ((StorageOrder == ColMajor) && !UseLhs))
+ {
+ ptranspose(block);
+ }
+
+ storeBlock<Scalar, Packet, Index, 4>(blockA + ri, block);
+
+ ri += 4*vectorSize;
+ }
+ for(; i < depth; i++)
+ {
+ if(((StorageOrder == RowMajor) && UseLhs) || ((StorageOrder == ColMajor) && !UseLhs))
+ {
+ if (UseLhs) {
+ blockA[ri+0] = lhs(j+0, i);
+ blockA[ri+1] = lhs(j+1, i);
+ blockA[ri+2] = lhs(j+2, i);
+ blockA[ri+3] = lhs(j+3, i);
+ } else {
+ blockA[ri+0] = lhs(i, j+0);
+ blockA[ri+1] = lhs(i, j+1);
+ blockA[ri+2] = lhs(i, j+2);
+ blockA[ri+3] = lhs(i, j+3);
+ }
+ } else {
+ Packet lhsV;
+ if (UseLhs) {
+ lhsV = lhs.template loadPacket<Packet>(j, i);
+ } else {
+ lhsV = lhs.template loadPacket<Packet>(i, j);
+ }
+ pstore<Scalar>(blockA + ri, lhsV);
+ }
+
+ ri += vectorSize;
+ }
+
+ if(PanelMode) ri += vectorSize*(stride - offset - depth);
+ }
+
+ if (!UseLhs)
+ {
+ if(PanelMode) ri += offset;
+
+ for(; j < rows; j++)
+ {
+ for(Index i = 0; i < depth; i++)
+ {
+ blockA[ri] = lhs(i, j);
+ ri += 1;
+ }
+
+ if(PanelMode) ri += stride - depth;
+ }
+ } else {
+ if (j < rows)
+ {
+ if(PanelMode) ri += offset*(rows - j);
+
+ for(Index i = 0; i < depth; i++)
+ {
+ Index k = j;
+ for(; k < rows; k++)
+ {
+ blockA[ri] = lhs(k, i);
+ ri += 1;
+ }
+ }
+ }
+ }
+ }
+};
+
+// General template for lhs packing, float64 specialization.
+template<typename Index, typename DataMapper, int StorageOrder, bool PanelMode>
+struct dhs_pack<double, Index, DataMapper, Packet2d, StorageOrder, PanelMode, true>
+{
+ EIGEN_STRONG_INLINE void operator()(double* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)
+ {
+ const Index vectorSize = quad_traits<double>::vectorsize;
+ Index ri = 0, j = 0;
+
+ for(; j + vectorSize <= rows; j+=vectorSize)
+ {
+ Index i = 0;
+
+ if(PanelMode) ri += vectorSize*offset;
+
+ for(; i + vectorSize <= depth; i+=vectorSize)
+ {
+ PacketBlock<Packet2d,2> block;
+ if(StorageOrder == RowMajor)
+ {
+ block.packet[0] = lhs.template loadPacket<Packet2d>(j + 0, i);
+ block.packet[1] = lhs.template loadPacket<Packet2d>(j + 1, i);
+
+ ptranspose(block);
+ } else {
+ block.packet[0] = lhs.template loadPacket<Packet2d>(j, i + 0);
+ block.packet[1] = lhs.template loadPacket<Packet2d>(j, i + 1);
+ }
+
+ storeBlock<double, Packet2d, Index, 2>(blockA + ri, block);
+
+ ri += 2*vectorSize;
+ }
+ for(; i < depth; i++)
+ {
+ if(StorageOrder == RowMajor)
+ {
+ blockA[ri+0] = lhs(j+0, i);
+ blockA[ri+1] = lhs(j+1, i);
+ } else {
+ Packet2d lhsV = lhs.template loadPacket<Packet2d>(j, i);
+ pstore<double>(blockA + ri, lhsV);
+ }
+
+ ri += vectorSize;
+ }
+
+ if(PanelMode) ri += vectorSize*(stride - offset - depth);
+ }
+
+ if (j < rows)
+ {
+ if(PanelMode) ri += offset*(rows - j);
+
+ for(Index i = 0; i < depth; i++)
+ {
+ Index k = j;
+ for(; k < rows; k++)
+ {
+ blockA[ri] = lhs(k, i);
+ ri += 1;
+ }
+ }
+ }
+ }
+};
+
+// General template for rhs packing, float64 specialization.
+template<typename Index, typename DataMapper, int StorageOrder, bool PanelMode>
+struct dhs_pack<double, Index, DataMapper, Packet2d, StorageOrder, PanelMode, false>
+{
+ EIGEN_STRONG_INLINE void operator()(double* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset)
+ {
+ const Index vectorSize = quad_traits<double>::vectorsize;
+ Index ri = 0, j = 0;
+
+ for(; j + 2*vectorSize <= cols; j+=2*vectorSize)
+ {
+ Index i = 0;
+
+ if(PanelMode) ri += offset*(2*vectorSize);
+
+ for(; i + vectorSize <= depth; i+=vectorSize)
+ {
+ PacketBlock<Packet2d,4> block;
+ if(StorageOrder == ColMajor)
+ {
+ PacketBlock<Packet2d,2> block1, block2;
+ block1.packet[0] = rhs.template loadPacket<Packet2d>(i, j + 0);
+ block1.packet[1] = rhs.template loadPacket<Packet2d>(i, j + 1);
+ block2.packet[0] = rhs.template loadPacket<Packet2d>(i, j + 2);
+ block2.packet[1] = rhs.template loadPacket<Packet2d>(i, j + 3);
+
+ ptranspose(block1);
+ ptranspose(block2);
+
+ pstore<double>(blockB + ri , block1.packet[0]);
+ pstore<double>(blockB + ri + 2, block2.packet[0]);
+ pstore<double>(blockB + ri + 4, block1.packet[1]);
+ pstore<double>(blockB + ri + 6, block2.packet[1]);
+ } else {
+ block.packet[0] = rhs.template loadPacket<Packet2d>(i + 0, j + 0); //[a1 a2]
+ block.packet[1] = rhs.template loadPacket<Packet2d>(i + 0, j + 2); //[a3 a4]
+ block.packet[2] = rhs.template loadPacket<Packet2d>(i + 1, j + 0); //[b1 b2]
+ block.packet[3] = rhs.template loadPacket<Packet2d>(i + 1, j + 2); //[b3 b4]
+
+ storeBlock<double, Packet2d, Index, 4>(blockB + ri, block);
+ }
+
+ ri += 4*vectorSize;
+ }
+ for(; i < depth; i++)
+ {
+ if(StorageOrder == ColMajor)
+ {
+ blockB[ri+0] = rhs(i, j+0);
+ blockB[ri+1] = rhs(i, j+1);
+
+ ri += vectorSize;
+
+ blockB[ri+0] = rhs(i, j+2);
+ blockB[ri+1] = rhs(i, j+3);
+ } else {
+ Packet2d rhsV = rhs.template loadPacket<Packet2d>(i, j);
+ pstore<double>(blockB + ri, rhsV);
+
+ ri += vectorSize;
+
+ rhsV = rhs.template loadPacket<Packet2d>(i, j + 2);
+ pstore<double>(blockB + ri, rhsV);
+ }
+ ri += vectorSize;
+ }
+
+ if(PanelMode) ri += (2*vectorSize)*(stride - offset - depth);
+ }
+
+ if(PanelMode) ri += offset;
+
+ for(; j < cols; j++)
+ {
+ for(Index i = 0; i < depth; i++)
+ {
+ blockB[ri] = rhs(i, j);
+ ri += 1;
+ }
+
+ if(PanelMode) ri += stride - depth;
+ }
+ }
+};
+
+// General template for lhs complex packing, float64 specialization.
+template<typename Index, typename DataMapper, typename Packet, typename PacketC, int StorageOrder, bool Conjugate, bool PanelMode>
+struct dhs_cpack<double, Index, DataMapper, Packet, PacketC, StorageOrder, Conjugate, PanelMode, true>
+{
+ EIGEN_STRONG_INLINE void operator()(std::complex<double>* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)
+ {
+ const Index vectorSize = quad_traits<double>::vectorsize;
+ const Index vectorDelta = vectorSize * ((PanelMode) ? stride : depth);
+ Index rir = ((PanelMode) ? (vectorSize*offset) : 0), rii;
+ double* blockAt = reinterpret_cast<double *>(blockA);
+ Index j = 0;
+
+ for(; j + vectorSize <= rows; j+=vectorSize)
+ {
+ Index i = 0;
+
+ rii = rir + vectorDelta;
+
+ for(; i + vectorSize <= depth; i+=vectorSize)
+ {
+ PacketBlock<Packet,2> blockr, blocki;
+ PacketBlock<PacketC,4> cblock;
+
+ if(StorageOrder == ColMajor)
+ {
+ cblock.packet[0] = lhs.template loadPacket<PacketC>(j, i + 0); //[a1 a1i]
+ cblock.packet[1] = lhs.template loadPacket<PacketC>(j, i + 1); //[b1 b1i]
+
+ cblock.packet[2] = lhs.template loadPacket<PacketC>(j + 1, i + 0); //[a2 a2i]
+ cblock.packet[3] = lhs.template loadPacket<PacketC>(j + 1, i + 1); //[b2 b2i]
+
+ blockr.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[2].v, p16uc_GETREAL64); //[a1 a2]
+ blockr.packet[1] = vec_perm(cblock.packet[1].v, cblock.packet[3].v, p16uc_GETREAL64); //[b1 b2]
+
+ blocki.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[2].v, p16uc_GETIMAG64);
+ blocki.packet[1] = vec_perm(cblock.packet[1].v, cblock.packet[3].v, p16uc_GETIMAG64);
+ } else {
+ cblock.packet[0] = lhs.template loadPacket<PacketC>(j + 0, i); //[a1 a1i]
+ cblock.packet[1] = lhs.template loadPacket<PacketC>(j + 1, i); //[a2 a2i]
+
+ cblock.packet[2] = lhs.template loadPacket<PacketC>(j + 0, i + 1); //[b1 b1i]
+ cblock.packet[3] = lhs.template loadPacket<PacketC>(j + 1, i + 1); //[b2 b2i
+
+ blockr.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[1].v, p16uc_GETREAL64); //[a1 a2]
+ blockr.packet[1] = vec_perm(cblock.packet[2].v, cblock.packet[3].v, p16uc_GETREAL64); //[b1 b2]
+
+ blocki.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[1].v, p16uc_GETIMAG64);
+ blocki.packet[1] = vec_perm(cblock.packet[2].v, cblock.packet[3].v, p16uc_GETIMAG64);
+ }
+
+ if(Conjugate)
+ {
+ blocki.packet[0] = -blocki.packet[0];
+ blocki.packet[1] = -blocki.packet[1];
+ }
+
+ storeBlock<double, Packet, Index, 2>(blockAt + rir, blockr);
+ storeBlock<double, Packet, Index, 2>(blockAt + rii, blocki);
+
+ rir += 2*vectorSize;
+ rii += 2*vectorSize;
+ }
+ for(; i < depth; i++)
+ {
+ PacketBlock<Packet,1> blockr, blocki;
+ PacketBlock<PacketC,2> cblock;
+
+ cblock.packet[0] = lhs.template loadPacket<PacketC>(j + 0, i);
+ cblock.packet[1] = lhs.template loadPacket<PacketC>(j + 1, i);
+
+ blockr.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[1].v, p16uc_GETREAL64);
+ blocki.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[1].v, p16uc_GETIMAG64);
+
+ if(Conjugate)
+ {
+ blocki.packet[0] = -blocki.packet[0];
+ }
+
+ pstore<double>(blockAt + rir, blockr.packet[0]);
+ pstore<double>(blockAt + rii, blocki.packet[0]);
+
+ rir += vectorSize;
+ rii += vectorSize;
+ }
+
+ rir += ((PanelMode) ? (vectorSize*(2*stride - depth)) : vectorDelta);
+ }
+
+ if (j < rows)
+ {
+ if(PanelMode) rir += (offset*(rows - j - vectorSize));
+ rii = rir + (((PanelMode) ? stride : depth) * (rows - j));
+
+ for(Index i = 0; i < depth; i++)
+ {
+ Index k = j;
+ for(; k < rows; k++)
+ {
+ blockAt[rir] = lhs(k, i).real();
+
+ if(Conjugate)
+ blockAt[rii] = -lhs(k, i).imag();
+ else
+ blockAt[rii] = lhs(k, i).imag();
+
+ rir += 1;
+ rii += 1;
+ }
+ }
+ }
+ }
+};
+
+// General template for rhs complex packing, float64 specialization.
+template<typename Index, typename DataMapper, typename Packet, typename PacketC, int StorageOrder, bool Conjugate, bool PanelMode>
+struct dhs_cpack<double, Index, DataMapper, Packet, PacketC, StorageOrder, Conjugate, PanelMode, false>
+{
+ EIGEN_STRONG_INLINE void operator()(std::complex<double>* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset)
+ {
+ const Index vectorSize = quad_traits<double>::vectorsize;
+ const Index vectorDelta = 2*vectorSize * ((PanelMode) ? stride : depth);
+ Index rir = ((PanelMode) ? (2*vectorSize*offset) : 0), rii;
+ double* blockBt = reinterpret_cast<double *>(blockB);
+ Index j = 0;
+
+ for(; j + 2*vectorSize <= cols; j+=2*vectorSize)
+ {
+ Index i = 0;
+
+ rii = rir + vectorDelta;
+
+ for(; i < depth; i++)
+ {
+ PacketBlock<PacketC,4> cblock;
+ PacketBlock<Packet,2> blockr, blocki;
+
+ bload<DataMapper, PacketC, Index, 2, ColMajor, false, 4>(cblock, rhs, i, j);
+
+ blockr.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[1].v, p16uc_GETREAL64);
+ blockr.packet[1] = vec_perm(cblock.packet[2].v, cblock.packet[3].v, p16uc_GETREAL64);
+
+ blocki.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[1].v, p16uc_GETIMAG64);
+ blocki.packet[1] = vec_perm(cblock.packet[2].v, cblock.packet[3].v, p16uc_GETIMAG64);
+
+ if(Conjugate)
+ {
+ blocki.packet[0] = -blocki.packet[0];
+ blocki.packet[1] = -blocki.packet[1];
+ }
+
+ storeBlock<double, Packet, Index, 2>(blockBt + rir, blockr);
+ storeBlock<double, Packet, Index, 2>(blockBt + rii, blocki);
+
+ rir += 2*vectorSize;
+ rii += 2*vectorSize;
+ }
+
+ rir += ((PanelMode) ? (2*vectorSize*(2*stride - depth)) : vectorDelta);
+ }
+
+ if(PanelMode) rir -= (offset*(2*vectorSize - 1));
+
+ for(; j < cols; j++)
+ {
+ rii = rir + ((PanelMode) ? stride : depth);
+
+ for(Index i = 0; i < depth; i++)
+ {
+ blockBt[rir] = rhs(i, j).real();
+
+ if(Conjugate)
+ blockBt[rii] = -rhs(i, j).imag();
+ else
+ blockBt[rii] = rhs(i, j).imag();
+
+ rir += 1;
+ rii += 1;
+ }
+
+ rir += ((PanelMode) ? (2*stride - depth) : depth);
+ }
+ }
+};
+
+/**************
+ * GEMM utils *
+ **************/
+
+// 512-bits rank1-update of acc. It can either positive or negative accumulate (useful for complex gemm).
+template<typename Packet, bool NegativeAccumulate, int N>
+EIGEN_ALWAYS_INLINE void pger_common(PacketBlock<Packet,N>* acc, const Packet& lhsV, const Packet* rhsV)
+{
+ if(NegativeAccumulate)
+ {
+ acc->packet[0] = vec_nmsub(lhsV, rhsV[0], acc->packet[0]);
+ if (N > 1) {
+ acc->packet[1] = vec_nmsub(lhsV, rhsV[1], acc->packet[1]);
+ }
+ if (N > 2) {
+ acc->packet[2] = vec_nmsub(lhsV, rhsV[2], acc->packet[2]);
+ }
+ if (N > 3) {
+ acc->packet[3] = vec_nmsub(lhsV, rhsV[3], acc->packet[3]);
+ }
+ } else {
+ acc->packet[0] = vec_madd(lhsV, rhsV[0], acc->packet[0]);
+ if (N > 1) {
+ acc->packet[1] = vec_madd(lhsV, rhsV[1], acc->packet[1]);
+ }
+ if (N > 2) {
+ acc->packet[2] = vec_madd(lhsV, rhsV[2], acc->packet[2]);
+ }
+ if (N > 3) {
+ acc->packet[3] = vec_madd(lhsV, rhsV[3], acc->packet[3]);
+ }
+ }
+}
+
+template<int N, typename Scalar, typename Packet, bool NegativeAccumulate>
+EIGEN_ALWAYS_INLINE void pger(PacketBlock<Packet,N>* acc, const Scalar* lhs, const Packet* rhsV)
+{
+ Packet lhsV = pload<Packet>(lhs);
+
+ pger_common<Packet, NegativeAccumulate, N>(acc, lhsV, rhsV);
+}
+
+template<typename Scalar, typename Packet, typename Index, const Index remaining_rows>
+EIGEN_ALWAYS_INLINE void loadPacketRemaining(const Scalar* lhs, Packet &lhsV)
+{
+#ifdef _ARCH_PWR9
+ lhsV = vec_xl_len((Scalar *)lhs, remaining_rows * sizeof(Scalar));
+#else
+ Index i = 0;
+ do {
+ lhsV[i] = lhs[i];
+ } while (++i < remaining_rows);
+#endif
+}
+
+template<int N, typename Scalar, typename Packet, typename Index, bool NegativeAccumulate, const Index remaining_rows>
+EIGEN_ALWAYS_INLINE void pger(PacketBlock<Packet,N>* acc, const Scalar* lhs, const Packet* rhsV)
+{
+ Packet lhsV;
+ loadPacketRemaining<Scalar, Packet, Index, remaining_rows>(lhs, lhsV);
+
+ pger_common<Packet, NegativeAccumulate, N>(acc, lhsV, rhsV);
+}
+
+// 512-bits rank1-update of complex acc. It takes decoupled accumulators as entries. It also takes cares of mixed types real * complex and complex * real.
+template<int N, typename Packet, bool ConjugateLhs, bool ConjugateRhs, bool LhsIsReal, bool RhsIsReal>
+EIGEN_ALWAYS_INLINE void pgerc_common(PacketBlock<Packet,N>* accReal, PacketBlock<Packet,N>* accImag, const Packet &lhsV, const Packet &lhsVi, const Packet* rhsV, const Packet* rhsVi)
+{
+ pger_common<Packet, false, N>(accReal, lhsV, rhsV);
+ if(LhsIsReal)
+ {
+ pger_common<Packet, ConjugateRhs, N>(accImag, lhsV, rhsVi);
+ EIGEN_UNUSED_VARIABLE(lhsVi);
+ } else {
+ if (!RhsIsReal) {
+ pger_common<Packet, ConjugateLhs == ConjugateRhs, N>(accReal, lhsVi, rhsVi);
+ pger_common<Packet, ConjugateRhs, N>(accImag, lhsV, rhsVi);
+ } else {
+ EIGEN_UNUSED_VARIABLE(rhsVi);
+ }
+ pger_common<Packet, ConjugateLhs, N>(accImag, lhsVi, rhsV);
+ }
+}
+
+template<int N, typename Scalar, typename Packet, bool ConjugateLhs, bool ConjugateRhs, bool LhsIsReal, bool RhsIsReal>
+EIGEN_ALWAYS_INLINE void pgerc(PacketBlock<Packet,N>* accReal, PacketBlock<Packet,N>* accImag, const Scalar* lhs_ptr, const Scalar* lhs_ptr_imag, const Packet* rhsV, const Packet* rhsVi)
+{
+ Packet lhsV = ploadLhs<Scalar, Packet>(lhs_ptr);
+ Packet lhsVi;
+ if(!LhsIsReal) lhsVi = ploadLhs<Scalar, Packet>(lhs_ptr_imag);
+ else EIGEN_UNUSED_VARIABLE(lhs_ptr_imag);
+
+ pgerc_common<N, Packet, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(accReal, accImag, lhsV, lhsVi, rhsV, rhsVi);
+}
+
+template<typename Scalar, typename Packet, typename Index, bool LhsIsReal, const Index remaining_rows>
+EIGEN_ALWAYS_INLINE void loadPacketRemaining(const Scalar* lhs_ptr, const Scalar* lhs_ptr_imag, Packet &lhsV, Packet &lhsVi)
+{
+#ifdef _ARCH_PWR9
+ lhsV = vec_xl_len((Scalar *)lhs_ptr, remaining_rows * sizeof(Scalar));
+ if(!LhsIsReal) lhsVi = vec_xl_len((Scalar *)lhs_ptr_imag, remaining_rows * sizeof(Scalar));
+ else EIGEN_UNUSED_VARIABLE(lhs_ptr_imag);
+#else
+ Index i = 0;
+ do {
+ lhsV[i] = lhs_ptr[i];
+ if(!LhsIsReal) lhsVi[i] = lhs_ptr_imag[i];
+ } while (++i < remaining_rows);
+ if(LhsIsReal) EIGEN_UNUSED_VARIABLE(lhs_ptr_imag);
+#endif
+}
+
+template<int N, typename Scalar, typename Packet, typename Index, bool ConjugateLhs, bool ConjugateRhs, bool LhsIsReal, bool RhsIsReal, const Index remaining_rows>
+EIGEN_ALWAYS_INLINE void pgerc(PacketBlock<Packet,N>* accReal, PacketBlock<Packet,N>* accImag, const Scalar* lhs_ptr, const Scalar* lhs_ptr_imag, const Packet* rhsV, const Packet* rhsVi)
+{
+ Packet lhsV, lhsVi;
+ loadPacketRemaining<Scalar, Packet, Index, LhsIsReal, remaining_rows>(lhs_ptr, lhs_ptr_imag, lhsV, lhsVi);
+
+ pgerc_common<N, Packet, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(accReal, accImag, lhsV, lhsVi, rhsV, rhsVi);
+}
+
+template<typename Scalar, typename Packet>
+EIGEN_ALWAYS_INLINE Packet ploadLhs(const Scalar* lhs)
+{
+ return ploadu<Packet>(lhs);
+}
+
+// Zero the accumulator on PacketBlock.
+template<typename Scalar, typename Packet, int N>
+EIGEN_ALWAYS_INLINE void bsetzero(PacketBlock<Packet,N>& acc)
+{
+ acc.packet[0] = pset1<Packet>((Scalar)0);
+ if (N > 1) {
+ acc.packet[1] = pset1<Packet>((Scalar)0);
+ }
+ if (N > 2) {
+ acc.packet[2] = pset1<Packet>((Scalar)0);
+ }
+ if (N > 3) {
+ acc.packet[3] = pset1<Packet>((Scalar)0);
+ }
+}
+
+// Scale the PacketBlock vectors by alpha.
+template<typename Packet, int N>
+EIGEN_ALWAYS_INLINE void bscale(PacketBlock<Packet,N>& acc, PacketBlock<Packet,N>& accZ, const Packet& pAlpha)
+{
+ acc.packet[0] = pmadd(pAlpha, accZ.packet[0], acc.packet[0]);
+ if (N > 1) {
+ acc.packet[1] = pmadd(pAlpha, accZ.packet[1], acc.packet[1]);
+ }
+ if (N > 2) {
+ acc.packet[2] = pmadd(pAlpha, accZ.packet[2], acc.packet[2]);
+ }
+ if (N > 3) {
+ acc.packet[3] = pmadd(pAlpha, accZ.packet[3], acc.packet[3]);
+ }
+}
+
+template<typename Packet, int N>
+EIGEN_ALWAYS_INLINE void bscalec_common(PacketBlock<Packet,N>& acc, PacketBlock<Packet,N>& accZ, const Packet& pAlpha)
+{
+ acc.packet[0] = pmul<Packet>(accZ.packet[0], pAlpha);
+ if (N > 1) {
+ acc.packet[1] = pmul<Packet>(accZ.packet[1], pAlpha);
+ }
+ if (N > 2) {
+ acc.packet[2] = pmul<Packet>(accZ.packet[2], pAlpha);
+ }
+ if (N > 3) {
+ acc.packet[3] = pmul<Packet>(accZ.packet[3], pAlpha);
+ }
+}
+
+// Complex version of PacketBlock scaling.
+template<typename Packet, int N>
+EIGEN_ALWAYS_INLINE void bscalec(PacketBlock<Packet,N>& aReal, PacketBlock<Packet,N>& aImag, const Packet& bReal, const Packet& bImag, PacketBlock<Packet,N>& cReal, PacketBlock<Packet,N>& cImag)
+{
+ bscalec_common<Packet, N>(cReal, aReal, bReal);
+
+ bscalec_common<Packet, N>(cImag, aImag, bReal);
+
+ pger_common<Packet, true, N>(&cReal, bImag, aImag.packet);
+
+ pger_common<Packet, false, N>(&cImag, bImag, aReal.packet);
+}
+
+template<typename Packet, int N>
+EIGEN_ALWAYS_INLINE void band(PacketBlock<Packet,N>& acc, const Packet& pMask)
+{
+ acc.packet[0] = pand(acc.packet[0], pMask);
+ if (N > 1) {
+ acc.packet[1] = pand(acc.packet[1], pMask);
+ }
+ if (N > 2) {
+ acc.packet[2] = pand(acc.packet[2], pMask);
+ }
+ if (N > 3) {
+ acc.packet[3] = pand(acc.packet[3], pMask);
+ }
+}
+
+template<typename Packet, int N>
+EIGEN_ALWAYS_INLINE void bscalec(PacketBlock<Packet,N>& aReal, PacketBlock<Packet,N>& aImag, const Packet& bReal, const Packet& bImag, PacketBlock<Packet,N>& cReal, PacketBlock<Packet,N>& cImag, const Packet& pMask)
+{
+ band<Packet, N>(aReal, pMask);
+ band<Packet, N>(aImag, pMask);
+
+ bscalec<Packet,N>(aReal, aImag, bReal, bImag, cReal, cImag);
+}
+
+// Load a PacketBlock, the N parameters make tunning gemm easier so we can add more accumulators as needed.
+template<typename DataMapper, typename Packet, typename Index, const Index accCols, int StorageOrder, bool Complex, int N>
+EIGEN_ALWAYS_INLINE void bload(PacketBlock<Packet,N*(Complex?2:1)>& acc, const DataMapper& res, Index row, Index col)
+{
+ if (StorageOrder == RowMajor) {
+ acc.packet[0] = res.template loadPacket<Packet>(row + 0, col);
+ if (N > 1) {
+ acc.packet[1] = res.template loadPacket<Packet>(row + 1, col);
+ }
+ if (N > 2) {
+ acc.packet[2] = res.template loadPacket<Packet>(row + 2, col);
+ }
+ if (N > 3) {
+ acc.packet[3] = res.template loadPacket<Packet>(row + 3, col);
+ }
+ if (Complex) {
+ acc.packet[0+N] = res.template loadPacket<Packet>(row + 0, col + accCols);
+ if (N > 1) {
+ acc.packet[1+N] = res.template loadPacket<Packet>(row + 1, col + accCols);
+ }
+ if (N > 2) {
+ acc.packet[2+N] = res.template loadPacket<Packet>(row + 2, col + accCols);
+ }
+ if (N > 3) {
+ acc.packet[3+N] = res.template loadPacket<Packet>(row + 3, col + accCols);
+ }
+ }
+ } else {
+ acc.packet[0] = res.template loadPacket<Packet>(row, col + 0);
+ if (N > 1) {
+ acc.packet[1] = res.template loadPacket<Packet>(row, col + 1);
+ }
+ if (N > 2) {
+ acc.packet[2] = res.template loadPacket<Packet>(row, col + 2);
+ }
+ if (N > 3) {
+ acc.packet[3] = res.template loadPacket<Packet>(row, col + 3);
+ }
+ if (Complex) {
+ acc.packet[0+N] = res.template loadPacket<Packet>(row + accCols, col + 0);
+ if (N > 1) {
+ acc.packet[1+N] = res.template loadPacket<Packet>(row + accCols, col + 1);
+ }
+ if (N > 2) {
+ acc.packet[2+N] = res.template loadPacket<Packet>(row + accCols, col + 2);
+ }
+ if (N > 3) {
+ acc.packet[3+N] = res.template loadPacket<Packet>(row + accCols, col + 3);
+ }
+ }
+ }
+}
+
+const static Packet4i mask41 = { -1, 0, 0, 0 };
+const static Packet4i mask42 = { -1, -1, 0, 0 };
+const static Packet4i mask43 = { -1, -1, -1, 0 };
+
+const static Packet2l mask21 = { -1, 0 };
+
+template<typename Packet>
+EIGEN_ALWAYS_INLINE Packet bmask(const int remaining_rows)
+{
+ if (remaining_rows == 0) {
+ return pset1<Packet>(float(0.0)); // Not used
+ } else {
+ switch (remaining_rows) {
+ case 1: return Packet(mask41);
+ case 2: return Packet(mask42);
+ default: return Packet(mask43);
+ }
+ }
+}
+
+template<>
+EIGEN_ALWAYS_INLINE Packet2d bmask<Packet2d>(const int remaining_rows)
+{
+ if (remaining_rows == 0) {
+ return pset1<Packet2d>(double(0.0)); // Not used
+ } else {
+ return Packet2d(mask21);
+ }
+}
+
+template<typename Packet, int N>
+EIGEN_ALWAYS_INLINE void bscale(PacketBlock<Packet,N>& acc, PacketBlock<Packet,N>& accZ, const Packet& pAlpha, const Packet& pMask)
+{
+ band<Packet, N>(accZ, pMask);
+
+ bscale<Packet, N>(acc, accZ, pAlpha);
+}
+
+template<typename Packet, int N> EIGEN_ALWAYS_INLINE void
+pbroadcastN_old(const __UNPACK_TYPE__(Packet) *a,
+ Packet& a0, Packet& a1, Packet& a2, Packet& a3)
+{
+ a0 = pset1<Packet>(a[0]);
+ if (N > 1) {
+ a1 = pset1<Packet>(a[1]);
+ } else {
+ EIGEN_UNUSED_VARIABLE(a1);
+ }
+ if (N > 2) {
+ a2 = pset1<Packet>(a[2]);
+ } else {
+ EIGEN_UNUSED_VARIABLE(a2);
+ }
+ if (N > 3) {
+ a3 = pset1<Packet>(a[3]);
+ } else {
+ EIGEN_UNUSED_VARIABLE(a3);
+ }
+}
+
+template<>
+EIGEN_ALWAYS_INLINE void pbroadcastN_old<Packet4f,4>(const float* a, Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3)
+{
+ pbroadcast4<Packet4f>(a, a0, a1, a2, a3);
+}
+
+template<>
+EIGEN_ALWAYS_INLINE void pbroadcastN_old<Packet2d,4>(const double* a, Packet2d& a0, Packet2d& a1, Packet2d& a2, Packet2d& a3)
+{
+ a1 = pload<Packet2d>(a);
+ a3 = pload<Packet2d>(a + 2);
+ a0 = vec_splat(a1, 0);
+ a1 = vec_splat(a1, 1);
+ a2 = vec_splat(a3, 0);
+ a3 = vec_splat(a3, 1);
+}
+
+template<typename Packet, int N> EIGEN_ALWAYS_INLINE void
+pbroadcastN(const __UNPACK_TYPE__(Packet) *a,
+ Packet& a0, Packet& a1, Packet& a2, Packet& a3)
+{
+ a0 = pset1<Packet>(a[0]);
+ if (N > 1) {
+ a1 = pset1<Packet>(a[1]);
+ } else {
+ EIGEN_UNUSED_VARIABLE(a1);
+ }
+ if (N > 2) {
+ a2 = pset1<Packet>(a[2]);
+ } else {
+ EIGEN_UNUSED_VARIABLE(a2);
+ }
+ if (N > 3) {
+ a3 = pset1<Packet>(a[3]);
+ } else {
+ EIGEN_UNUSED_VARIABLE(a3);
+ }
+}
+
+template<> EIGEN_ALWAYS_INLINE void
+pbroadcastN<Packet4f,4>(const float *a,
+ Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3)
+{
+ a3 = pload<Packet4f>(a);
+ a0 = vec_splat(a3, 0);
+ a1 = vec_splat(a3, 1);
+ a2 = vec_splat(a3, 2);
+ a3 = vec_splat(a3, 3);
+}
+
+// PEEL loop factor.
+#define PEEL 7
+#define PEEL_ROW 7
+
+#define MICRO_UNROLL_PEEL(func) \
+ func(0) func(1) func(2) func(3) func(4) func(5) func(6) func(7)
+
+#define MICRO_ZERO_PEEL(peel) \
+ if ((PEEL_ROW > peel) && (peel != 0)) { \
+ bsetzero<Scalar, Packet, accRows>(accZero##peel); \
+ } else { \
+ EIGEN_UNUSED_VARIABLE(accZero##peel); \
+ }
+
+#define MICRO_ZERO_PEEL_ROW \
+ MICRO_UNROLL_PEEL(MICRO_ZERO_PEEL);
+
+#define MICRO_WORK_PEEL(peel) \
+ if (PEEL_ROW > peel) { \
+ pbroadcastN<Packet,accRows>(rhs_ptr + (accRows * peel), rhsV##peel[0], rhsV##peel[1], rhsV##peel[2], rhsV##peel[3]); \
+ pger<accRows, Scalar, Packet, false>(&accZero##peel, lhs_ptr + (remaining_rows * peel), rhsV##peel); \
+ } else { \
+ EIGEN_UNUSED_VARIABLE(rhsV##peel); \
+ }
+
+#define MICRO_WORK_PEEL_ROW \
+ Packet rhsV0[4], rhsV1[4], rhsV2[4], rhsV3[4], rhsV4[4], rhsV5[4], rhsV6[4], rhsV7[4]; \
+ MICRO_UNROLL_PEEL(MICRO_WORK_PEEL); \
+ lhs_ptr += (remaining_rows * PEEL_ROW); \
+ rhs_ptr += (accRows * PEEL_ROW);
+
+#define MICRO_ADD_PEEL(peel, sum) \
+ if (PEEL_ROW > peel) { \
+ for (Index i = 0; i < accRows; i++) { \
+ accZero##sum.packet[i] += accZero##peel.packet[i]; \
+ } \
+ }
+
+#define MICRO_ADD_PEEL_ROW \
+ MICRO_ADD_PEEL(4, 0) MICRO_ADD_PEEL(5, 1) MICRO_ADD_PEEL(6, 2) MICRO_ADD_PEEL(7, 3) \
+ MICRO_ADD_PEEL(2, 0) MICRO_ADD_PEEL(3, 1) MICRO_ADD_PEEL(1, 0)
+
+template<typename Scalar, typename Packet, typename Index, const Index accRows, const Index remaining_rows>
+EIGEN_ALWAYS_INLINE void MICRO_EXTRA_ROW(
+ const Scalar* &lhs_ptr,
+ const Scalar* &rhs_ptr,
+ PacketBlock<Packet,accRows> &accZero)
+{
+ Packet rhsV[4];
+ pbroadcastN<Packet,accRows>(rhs_ptr, rhsV[0], rhsV[1], rhsV[2], rhsV[3]);
+ pger<accRows, Scalar, Packet, false>(&accZero, lhs_ptr, rhsV);
+ lhs_ptr += remaining_rows;
+ rhs_ptr += accRows;
+}
+
+template<typename Scalar, typename Packet, typename DataMapper, typename Index, const Index accRows, const Index accCols, const Index remaining_rows>
+EIGEN_ALWAYS_INLINE void gemm_unrolled_row_iteration(
+ const DataMapper& res,
+ const Scalar* lhs_base,
+ const Scalar* rhs_base,
+ Index depth,
+ Index strideA,
+ Index offsetA,
+ Index row,
+ Index col,
+ Index rows,
+ Index cols,
+ const Packet& pAlpha,
+ const Packet& pMask)
+{
+ const Scalar* rhs_ptr = rhs_base;
+ const Scalar* lhs_ptr = lhs_base + row*strideA + remaining_rows*offsetA;
+ PacketBlock<Packet,accRows> accZero0, accZero1, accZero2, accZero3, accZero4, accZero5, accZero6, accZero7, acc;
+
+ bsetzero<Scalar, Packet, accRows>(accZero0);
+
+ Index remaining_depth = (col + quad_traits<Scalar>::rows < cols) ? depth : (depth & -quad_traits<Scalar>::rows);
+ Index k = 0;
+ if (remaining_depth >= PEEL_ROW) {
+ MICRO_ZERO_PEEL_ROW
+ do
+ {
+ EIGEN_POWER_PREFETCH(rhs_ptr);
+ EIGEN_POWER_PREFETCH(lhs_ptr);
+ MICRO_WORK_PEEL_ROW
+ } while ((k += PEEL_ROW) + PEEL_ROW <= remaining_depth);
+ MICRO_ADD_PEEL_ROW
+ }
+ for(; k < remaining_depth; k++)
+ {
+ MICRO_EXTRA_ROW<Scalar, Packet, Index, accRows, remaining_rows>(lhs_ptr, rhs_ptr, accZero0);
+ }
+
+ if ((remaining_depth == depth) && (rows >= accCols))
+ {
+ bload<DataMapper, Packet, Index, 0, ColMajor, false, accRows>(acc, res, row, 0);
+ bscale<Packet,accRows>(acc, accZero0, pAlpha, pMask);
+ res.template storePacketBlock<Packet,accRows>(row, 0, acc);
+ } else {
+ for(; k < depth; k++)
+ {
+ Packet rhsV[4];
+ pbroadcastN<Packet,accRows>(rhs_ptr, rhsV[0], rhsV[1], rhsV[2], rhsV[3]);
+ pger<accRows, Scalar, Packet, Index, false, remaining_rows>(&accZero0, lhs_ptr, rhsV);
+ lhs_ptr += remaining_rows;
+ rhs_ptr += accRows;
+ }
+
+ for(Index j = 0; j < accRows; j++) {
+ accZero0.packet[j] = vec_mul(pAlpha, accZero0.packet[j]);
+ for(Index i = 0; i < remaining_rows; i++) {
+ res(row + i, j) += accZero0.packet[j][i];
+ }
+ }
+ }
+}
+
+template<typename Scalar, typename Packet, typename DataMapper, typename Index, const Index accRows, const Index accCols>
+EIGEN_ALWAYS_INLINE void gemm_extra_row(
+ const DataMapper& res,
+ const Scalar* lhs_base,
+ const Scalar* rhs_base,
+ Index depth,
+ Index strideA,
+ Index offsetA,
+ Index row,
+ Index col,
+ Index rows,
+ Index cols,
+ Index remaining_rows,
+ const Packet& pAlpha,
+ const Packet& pMask)
+{
+ switch(remaining_rows) {
+ case 1:
+ gemm_unrolled_row_iteration<Scalar, Packet, DataMapper, Index, accRows, accCols, 1>(res, lhs_base, rhs_base, depth, strideA, offsetA, row, col, rows, cols, pAlpha, pMask);
+ break;
+ case 2:
+ if (sizeof(Scalar) == sizeof(float)) {
+ gemm_unrolled_row_iteration<Scalar, Packet, DataMapper, Index, accRows, accCols, 2>(res, lhs_base, rhs_base, depth, strideA, offsetA, row, col, rows, cols, pAlpha, pMask);
+ }
+ break;
+ default:
+ if (sizeof(Scalar) == sizeof(float)) {
+ gemm_unrolled_row_iteration<Scalar, Packet, DataMapper, Index, accRows, accCols, 3>(res, lhs_base, rhs_base, depth, strideA, offsetA, row, col, rows, cols, pAlpha, pMask);
+ }
+ break;
+ }
+}
+
+#define MICRO_UNROLL(func) \
+ func(0) func(1) func(2) func(3) func(4) func(5) func(6) func(7)
+
+#define MICRO_UNROLL_WORK(func, func2, peel) \
+ MICRO_UNROLL(func2); \
+ func(0,peel) func(1,peel) func(2,peel) func(3,peel) \
+ func(4,peel) func(5,peel) func(6,peel) func(7,peel)
+
+#define MICRO_LOAD_ONE(iter) \
+ if (unroll_factor > iter) { \
+ lhsV##iter = ploadLhs<Scalar, Packet>(lhs_ptr##iter); \
+ lhs_ptr##iter += accCols; \
+ } else { \
+ EIGEN_UNUSED_VARIABLE(lhsV##iter); \
+ }
+
+#define MICRO_WORK_ONE(iter, peel) \
+ if (unroll_factor > iter) { \
+ pger_common<Packet, false, accRows>(&accZero##iter, lhsV##iter, rhsV##peel); \
+ }
+
+#define MICRO_TYPE_PEEL4(func, func2, peel) \
+ if (PEEL > peel) { \
+ Packet lhsV0, lhsV1, lhsV2, lhsV3, lhsV4, lhsV5, lhsV6, lhsV7; \
+ pbroadcastN<Packet,accRows>(rhs_ptr + (accRows * peel), rhsV##peel[0], rhsV##peel[1], rhsV##peel[2], rhsV##peel[3]); \
+ MICRO_UNROLL_WORK(func, func2, peel) \
+ } else { \
+ EIGEN_UNUSED_VARIABLE(rhsV##peel); \
+ }
+
+#define MICRO_UNROLL_TYPE_PEEL(M, func, func1, func2) \
+ Packet rhsV0[M], rhsV1[M], rhsV2[M], rhsV3[M], rhsV4[M], rhsV5[M], rhsV6[M], rhsV7[M]; \
+ func(func1,func2,0); func(func1,func2,1); \
+ func(func1,func2,2); func(func1,func2,3); \
+ func(func1,func2,4); func(func1,func2,5); \
+ func(func1,func2,6); func(func1,func2,7);
+
+#define MICRO_UNROLL_TYPE_ONE(M, func, func1, func2) \
+ Packet rhsV0[M]; \
+ func(func1,func2,0);
+
+#define MICRO_ONE_PEEL4 \
+ MICRO_UNROLL_TYPE_PEEL(4, MICRO_TYPE_PEEL4, MICRO_WORK_ONE, MICRO_LOAD_ONE); \
+ rhs_ptr += (accRows * PEEL);
+
+#define MICRO_ONE4 \
+ MICRO_UNROLL_TYPE_ONE(4, MICRO_TYPE_PEEL4, MICRO_WORK_ONE, MICRO_LOAD_ONE); \
+ rhs_ptr += accRows;
+
+#define MICRO_DST_PTR_ONE(iter) \
+ if (unroll_factor > iter) { \
+ bsetzero<Scalar, Packet, accRows>(accZero##iter); \
+ } else { \
+ EIGEN_UNUSED_VARIABLE(accZero##iter); \
+ }
+
+#define MICRO_DST_PTR MICRO_UNROLL(MICRO_DST_PTR_ONE)
+
+#define MICRO_SRC_PTR_ONE(iter) \
+ if (unroll_factor > iter) { \
+ lhs_ptr##iter = lhs_base + ( (row/accCols) + iter )*strideA*accCols; \
+ } else { \
+ EIGEN_UNUSED_VARIABLE(lhs_ptr##iter); \
+ }
+
+#define MICRO_SRC_PTR MICRO_UNROLL(MICRO_SRC_PTR_ONE)
+
+#define MICRO_PREFETCH_ONE(iter) \
+ if (unroll_factor > iter) { \
+ EIGEN_POWER_PREFETCH(lhs_ptr##iter); \
+ }
+
+#define MICRO_PREFETCH MICRO_UNROLL(MICRO_PREFETCH_ONE)
+
+#define MICRO_STORE_ONE(iter) \
+ if (unroll_factor > iter) { \
+ bload<DataMapper, Packet, Index, 0, ColMajor, false, accRows>(acc, res, row + iter*accCols, 0); \
+ bscale<Packet,accRows>(acc, accZero##iter, pAlpha); \
+ res.template storePacketBlock<Packet,accRows>(row + iter*accCols, 0, acc); \
+ }
+
+#define MICRO_STORE MICRO_UNROLL(MICRO_STORE_ONE)
+
+template<int unroll_factor, typename Scalar, typename Packet, typename DataMapper, typename Index, const Index accRows, const Index accCols>
+EIGEN_STRONG_INLINE void gemm_unrolled_iteration(
+ const DataMapper& res,
+ const Scalar* lhs_base,
+ const Scalar* rhs_base,
+ Index depth,
+ Index strideA,
+ Index& row,
+ const Packet& pAlpha)
+{
+ const Scalar* rhs_ptr = rhs_base;
+ const Scalar* lhs_ptr0 = NULL, * lhs_ptr1 = NULL, * lhs_ptr2 = NULL, * lhs_ptr3 = NULL, * lhs_ptr4 = NULL, * lhs_ptr5 = NULL, * lhs_ptr6 = NULL, * lhs_ptr7 = NULL;
+ PacketBlock<Packet,accRows> accZero0, accZero1, accZero2, accZero3, accZero4, accZero5, accZero6, accZero7;
+ PacketBlock<Packet,accRows> acc;
+
+ MICRO_SRC_PTR
+ MICRO_DST_PTR
+
+ Index k = 0;
+ for(; k + PEEL <= depth; k+= PEEL)
+ {
+ EIGEN_POWER_PREFETCH(rhs_ptr);
+ MICRO_PREFETCH
+ MICRO_ONE_PEEL4
+ }
+ for(; k < depth; k++)
+ {
+ MICRO_ONE4
+ }
+ MICRO_STORE
+
+ row += unroll_factor*accCols;
+}
+
+template<typename Scalar, typename Packet, typename DataMapper, typename Index, const Index accRows, const Index accCols>
+EIGEN_ALWAYS_INLINE void gemm_cols(
+ const DataMapper& res,
+ const Scalar* blockA,
+ const Scalar* blockB,
+ Index depth,
+ Index strideA,
+ Index offsetA,
+ Index strideB,
+ Index offsetB,
+ Index col,
+ Index rows,
+ Index cols,
+ Index remaining_rows,
+ const Packet& pAlpha,
+ const Packet& pMask)
+{
+ const DataMapper res3 = res.getSubMapper(0, col);
+
+ const Scalar* rhs_base = blockB + col*strideB + accRows*offsetB;
+ const Scalar* lhs_base = blockA + accCols*offsetA;
+ Index row = 0;
+
+#define MAX_UNROLL 6
+ while(row + MAX_UNROLL*accCols <= rows) {
+ gemm_unrolled_iteration<MAX_UNROLL, Scalar, Packet, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha);
+ }
+ switch( (rows-row)/accCols ) {
+#if MAX_UNROLL > 7
+ case 7:
+ gemm_unrolled_iteration<7, Scalar, Packet, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha);
+ break;
+#endif
+#if MAX_UNROLL > 6
+ case 6:
+ gemm_unrolled_iteration<6, Scalar, Packet, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha);
+ break;
+#endif
+#if MAX_UNROLL > 5
+ case 5:
+ gemm_unrolled_iteration<5, Scalar, Packet, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha);
+ break;
+#endif
+#if MAX_UNROLL > 4
+ case 4:
+ gemm_unrolled_iteration<4, Scalar, Packet, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha);
+ break;
+#endif
+#if MAX_UNROLL > 3
+ case 3:
+ gemm_unrolled_iteration<3, Scalar, Packet, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha);
+ break;
+#endif
+#if MAX_UNROLL > 2
+ case 2:
+ gemm_unrolled_iteration<2, Scalar, Packet, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha);
+ break;
+#endif
+#if MAX_UNROLL > 1
+ case 1:
+ gemm_unrolled_iteration<1, Scalar, Packet, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha);
+ break;
+#endif
+ default:
+ break;
+ }
+#undef MAX_UNROLL
+
+ if(remaining_rows > 0)
+ {
+ gemm_extra_row<Scalar, Packet, DataMapper, Index, accRows, accCols>(res3, blockA, rhs_base, depth, strideA, offsetA, row, col, rows, cols, remaining_rows, pAlpha, pMask);
+ }
+}
+
+template<typename Scalar, typename Packet, typename DataMapper, typename Index, const Index accCols>
+EIGEN_STRONG_INLINE void gemm_extra_cols(
+ const DataMapper& res,
+ const Scalar* blockA,
+ const Scalar* blockB,
+ Index depth,
+ Index strideA,
+ Index offsetA,
+ Index strideB,
+ Index offsetB,
+ Index col,
+ Index rows,
+ Index cols,
+ Index remaining_rows,
+ const Packet& pAlpha,
+ const Packet& pMask)
+{
+ for (; col < cols; col++) {
+ gemm_cols<Scalar, Packet, DataMapper, Index, 1, accCols>(res, blockA, blockB, depth, strideA, offsetA, strideB, offsetB, col, rows, cols, remaining_rows, pAlpha, pMask);
+ }
+}
+
+/****************
+ * GEMM kernels *
+ * **************/
+template<typename Scalar, typename Index, typename Packet, typename RhsPacket, typename DataMapper, const Index accRows, const Index accCols>
+EIGEN_STRONG_INLINE void gemm(const DataMapper& res, const Scalar* blockA, const Scalar* blockB, Index rows, Index depth, Index cols, Scalar alpha, Index strideA, Index strideB, Index offsetA, Index offsetB)
+{
+ const Index remaining_rows = rows % accCols;
+
+ if( strideA == -1 ) strideA = depth;
+ if( strideB == -1 ) strideB = depth;
+
+ const Packet pAlpha = pset1<Packet>(alpha);
+ const Packet pMask = bmask<Packet>((const int)(remaining_rows));
+
+ Index col = 0;
+ for(; col + accRows <= cols; col += accRows)
+ {
+ gemm_cols<Scalar, Packet, DataMapper, Index, accRows, accCols>(res, blockA, blockB, depth, strideA, offsetA, strideB, offsetB, col, rows, cols, remaining_rows, pAlpha, pMask);
+ }
+
+ gemm_extra_cols<Scalar, Packet, DataMapper, Index, accCols>(res, blockA, blockB, depth, strideA, offsetA, strideB, offsetB, col, rows, cols, remaining_rows, pAlpha, pMask);
+}
+
+#define accColsC (accCols / 2)
+#define advanceRows ((LhsIsReal) ? 1 : 2)
+#define advanceCols ((RhsIsReal) ? 1 : 2)
+
+// PEEL_COMPLEX loop factor.
+#define PEEL_COMPLEX 3
+#define PEEL_COMPLEX_ROW 3
+
+#define MICRO_COMPLEX_UNROLL_PEEL(func) \
+ func(0) func(1) func(2) func(3)
+
+#define MICRO_COMPLEX_ZERO_PEEL(peel) \
+ if ((PEEL_COMPLEX_ROW > peel) && (peel != 0)) { \
+ bsetzero<Scalar, Packet, accRows>(accReal##peel); \
+ bsetzero<Scalar, Packet, accRows>(accImag##peel); \
+ } else { \
+ EIGEN_UNUSED_VARIABLE(accReal##peel); \
+ EIGEN_UNUSED_VARIABLE(accImag##peel); \
+ }
+
+#define MICRO_COMPLEX_ZERO_PEEL_ROW \
+ MICRO_COMPLEX_UNROLL_PEEL(MICRO_COMPLEX_ZERO_PEEL);
+
+#define MICRO_COMPLEX_WORK_PEEL(peel) \
+ if (PEEL_COMPLEX_ROW > peel) { \
+ pbroadcastN_old<Packet,accRows>(rhs_ptr_real + (accRows * peel), rhsV##peel[0], rhsV##peel[1], rhsV##peel[2], rhsV##peel[3]); \
+ if(!RhsIsReal) pbroadcastN_old<Packet,accRows>(rhs_ptr_imag + (accRows * peel), rhsVi##peel[0], rhsVi##peel[1], rhsVi##peel[2], rhsVi##peel[3]); \
+ pgerc<accRows, Scalar, Packet, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(&accReal##peel, &accImag##peel, lhs_ptr_real + (remaining_rows * peel), lhs_ptr_imag + (remaining_rows * peel), rhsV##peel, rhsVi##peel); \
+ } else { \
+ EIGEN_UNUSED_VARIABLE(rhsV##peel); \
+ EIGEN_UNUSED_VARIABLE(rhsVi##peel); \
+ }
+
+#define MICRO_COMPLEX_WORK_PEEL_ROW \
+ Packet rhsV0[4], rhsV1[4], rhsV2[4], rhsV3[4]; \
+ Packet rhsVi0[4], rhsVi1[4], rhsVi2[4], rhsVi3[4]; \
+ MICRO_COMPLEX_UNROLL_PEEL(MICRO_COMPLEX_WORK_PEEL); \
+ lhs_ptr_real += (remaining_rows * PEEL_COMPLEX_ROW); \
+ if(!LhsIsReal) lhs_ptr_imag += (remaining_rows * PEEL_COMPLEX_ROW); \
+ else EIGEN_UNUSED_VARIABLE(lhs_ptr_imag); \
+ rhs_ptr_real += (accRows * PEEL_COMPLEX_ROW); \
+ if(!RhsIsReal) rhs_ptr_imag += (accRows * PEEL_COMPLEX_ROW); \
+ else EIGEN_UNUSED_VARIABLE(rhs_ptr_imag);
+
+#define MICRO_COMPLEX_ADD_PEEL(peel, sum) \
+ if (PEEL_COMPLEX_ROW > peel) { \
+ for (Index i = 0; i < accRows; i++) { \
+ accReal##sum.packet[i] += accReal##peel.packet[i]; \
+ accImag##sum.packet[i] += accImag##peel.packet[i]; \
+ } \
+ }
+
+#define MICRO_COMPLEX_ADD_PEEL_ROW \
+ MICRO_COMPLEX_ADD_PEEL(2, 0) MICRO_COMPLEX_ADD_PEEL(3, 1) \
+ MICRO_COMPLEX_ADD_PEEL(1, 0)
+
+template<typename Scalar, typename Packet, typename Index, const Index accRows, bool ConjugateLhs, bool ConjugateRhs, bool LhsIsReal, bool RhsIsReal, const Index remaining_rows>
+EIGEN_ALWAYS_INLINE void MICRO_COMPLEX_EXTRA_ROW(
+ const Scalar* &lhs_ptr_real, const Scalar* &lhs_ptr_imag,
+ const Scalar* &rhs_ptr_real, const Scalar* &rhs_ptr_imag,
+ PacketBlock<Packet,accRows> &accReal, PacketBlock<Packet,accRows> &accImag)
+{
+ Packet rhsV[4], rhsVi[4];
+ pbroadcastN_old<Packet,accRows>(rhs_ptr_real, rhsV[0], rhsV[1], rhsV[2], rhsV[3]);
+ if(!RhsIsReal) pbroadcastN_old<Packet,accRows>(rhs_ptr_imag, rhsVi[0], rhsVi[1], rhsVi[2], rhsVi[3]);
+ pgerc<accRows, Scalar, Packet, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(&accReal, &accImag, lhs_ptr_real, lhs_ptr_imag, rhsV, rhsVi);
+ lhs_ptr_real += remaining_rows;
+ if(!LhsIsReal) lhs_ptr_imag += remaining_rows;
+ else EIGEN_UNUSED_VARIABLE(lhs_ptr_imag);
+ rhs_ptr_real += accRows;
+ if(!RhsIsReal) rhs_ptr_imag += accRows;
+ else EIGEN_UNUSED_VARIABLE(rhs_ptr_imag);
+}
+
+template<typename Scalar, typename Packet, typename Packetc, typename DataMapper, typename Index, const Index accRows, const Index accCols, bool ConjugateLhs, bool ConjugateRhs, bool LhsIsReal, bool RhsIsReal, const Index remaining_rows>
+EIGEN_ALWAYS_INLINE void gemm_unrolled_complex_row_iteration(
+ const DataMapper& res,
+ const Scalar* lhs_base,
+ const Scalar* rhs_base,
+ Index depth,
+ Index strideA,
+ Index offsetA,
+ Index strideB,
+ Index row,
+ Index col,
+ Index rows,
+ Index cols,
+ const Packet& pAlphaReal,
+ const Packet& pAlphaImag,
+ const Packet& pMask)
+{
+ const Scalar* rhs_ptr_real = rhs_base;
+ const Scalar* rhs_ptr_imag = NULL;
+ if(!RhsIsReal) rhs_ptr_imag = rhs_base + accRows*strideB;
+ else EIGEN_UNUSED_VARIABLE(rhs_ptr_imag);
+ const Scalar* lhs_ptr_real = lhs_base + advanceRows*row*strideA + remaining_rows*offsetA;
+ const Scalar* lhs_ptr_imag = NULL;
+ if(!LhsIsReal) lhs_ptr_imag = lhs_ptr_real + remaining_rows*strideA;
+ else EIGEN_UNUSED_VARIABLE(lhs_ptr_imag);
+ PacketBlock<Packet,accRows> accReal0, accImag0, accReal1, accImag1, accReal2, accImag2, accReal3, accImag3;
+ PacketBlock<Packet,accRows> taccReal, taccImag;
+ PacketBlock<Packetc,accRows> acc0, acc1;
+ PacketBlock<Packetc,accRows*2> tRes;
+
+ bsetzero<Scalar, Packet, accRows>(accReal0);
+ bsetzero<Scalar, Packet, accRows>(accImag0);
+
+ Index remaining_depth = (col + quad_traits<Scalar>::rows < cols) ? depth : (depth & -quad_traits<Scalar>::rows);
+ Index k = 0;
+ if (remaining_depth >= PEEL_COMPLEX_ROW) {
+ MICRO_COMPLEX_ZERO_PEEL_ROW
+ do
+ {
+ EIGEN_POWER_PREFETCH(rhs_ptr_real);
+ if(!RhsIsReal) {
+ EIGEN_POWER_PREFETCH(rhs_ptr_imag);
+ }
+ EIGEN_POWER_PREFETCH(lhs_ptr_real);
+ if(!LhsIsReal) {
+ EIGEN_POWER_PREFETCH(lhs_ptr_imag);
+ }
+ MICRO_COMPLEX_WORK_PEEL_ROW
+ } while ((k += PEEL_COMPLEX_ROW) + PEEL_COMPLEX_ROW <= remaining_depth);
+ MICRO_COMPLEX_ADD_PEEL_ROW
+ }
+ for(; k < remaining_depth; k++)
+ {
+ MICRO_COMPLEX_EXTRA_ROW<Scalar, Packet, Index, accRows, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal, remaining_rows>(lhs_ptr_real, lhs_ptr_imag, rhs_ptr_real, rhs_ptr_imag, accReal0, accImag0);
+ }
+
+ if ((remaining_depth == depth) && (rows >= accCols))
+ {
+ bload<DataMapper, Packetc, Index, accColsC, ColMajor, true, accRows>(tRes, res, row, 0);
+ bscalec<Packet,accRows>(accReal0, accImag0, pAlphaReal, pAlphaImag, taccReal, taccImag, pMask);
+ bcouple<Packet, Packetc, accRows>(taccReal, taccImag, tRes, acc0, acc1);
+ res.template storePacketBlock<Packetc,accRows>(row + 0, 0, acc0);
+ res.template storePacketBlock<Packetc,accRows>(row + accColsC, 0, acc1);
+ } else {
+ for(; k < depth; k++)
+ {
+ Packet rhsV[4], rhsVi[4];
+ pbroadcastN_old<Packet,accRows>(rhs_ptr_real, rhsV[0], rhsV[1], rhsV[2], rhsV[3]);
+ if(!RhsIsReal) pbroadcastN_old<Packet,accRows>(rhs_ptr_imag, rhsVi[0], rhsVi[1], rhsVi[2], rhsVi[3]);
+ pgerc<accRows, Scalar, Packet, Index, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal, remaining_rows>(&accReal0, &accImag0, lhs_ptr_real, lhs_ptr_imag, rhsV, rhsVi);
+ lhs_ptr_real += remaining_rows;
+ if(!LhsIsReal) lhs_ptr_imag += remaining_rows;
+ rhs_ptr_real += accRows;
+ if(!RhsIsReal) rhs_ptr_imag += accRows;
+ }
+
+ bscalec<Packet,accRows>(accReal0, accImag0, pAlphaReal, pAlphaImag, taccReal, taccImag);
+ bcouple_common<Packet, Packetc, accRows>(taccReal, taccImag, acc0, acc1);
+
+ if ((sizeof(Scalar) == sizeof(float)) && (remaining_rows == 1))
+ {
+ for(Index j = 0; j < accRows; j++) {
+ res(row + 0, j) += pfirst<Packetc>(acc0.packet[j]);
+ }
+ } else {
+ for(Index j = 0; j < accRows; j++) {
+ PacketBlock<Packetc,1> acc2;
+ acc2.packet[0] = res.template loadPacket<Packetc>(row + 0, j) + acc0.packet[j];
+ res.template storePacketBlock<Packetc,1>(row + 0, j, acc2);
+ if(remaining_rows > accColsC) {
+ res(row + accColsC, j) += pfirst<Packetc>(acc1.packet[j]);
+ }
+ }
+ }
+ }
+}
+
+template<typename Scalar, typename Packet, typename Packetc, typename DataMapper, typename Index, const Index accRows, const Index accCols, bool ConjugateLhs, bool ConjugateRhs, bool LhsIsReal, bool RhsIsReal>
+EIGEN_ALWAYS_INLINE void gemm_complex_extra_row(
+ const DataMapper& res,
+ const Scalar* lhs_base,
+ const Scalar* rhs_base,
+ Index depth,
+ Index strideA,
+ Index offsetA,
+ Index strideB,
+ Index row,
+ Index col,
+ Index rows,
+ Index cols,
+ Index remaining_rows,
+ const Packet& pAlphaReal,
+ const Packet& pAlphaImag,
+ const Packet& pMask)
+{
+ switch(remaining_rows) {
+ case 1:
+ gemm_unrolled_complex_row_iteration<Scalar, Packet, Packetc, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal, 1>(res, lhs_base, rhs_base, depth, strideA, offsetA, strideB, row, col, rows, cols, pAlphaReal, pAlphaImag, pMask);
+ break;
+ case 2:
+ if (sizeof(Scalar) == sizeof(float)) {
+ gemm_unrolled_complex_row_iteration<Scalar, Packet, Packetc, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal, 2>(res, lhs_base, rhs_base, depth, strideA, offsetA, strideB, row, col, rows, cols, pAlphaReal, pAlphaImag, pMask);
+ }
+ break;
+ default:
+ if (sizeof(Scalar) == sizeof(float)) {
+ gemm_unrolled_complex_row_iteration<Scalar, Packet, Packetc, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal, 3>(res, lhs_base, rhs_base, depth, strideA, offsetA, strideB, row, col, rows, cols, pAlphaReal, pAlphaImag, pMask);
+ }
+ break;
+ }
+}
+
+#define MICRO_COMPLEX_UNROLL(func) \
+ func(0) func(1) func(2) func(3)
+
+#define MICRO_COMPLEX_UNROLL_WORK(func, func2, peel) \
+ MICRO_COMPLEX_UNROLL(func2); \
+ func(0,peel) func(1,peel) func(2,peel) func(3,peel)
+
+#define MICRO_COMPLEX_LOAD_ONE(iter) \
+ if (unroll_factor > iter) { \
+ lhsV##iter = ploadLhs<Scalar, Packet>(lhs_ptr_real##iter); \
+ if(!LhsIsReal) { \
+ lhsVi##iter = ploadLhs<Scalar, Packet>(lhs_ptr_real##iter + imag_delta); \
+ } else { \
+ EIGEN_UNUSED_VARIABLE(lhsVi##iter); \
+ } \
+ lhs_ptr_real##iter += accCols; \
+ } else { \
+ EIGEN_UNUSED_VARIABLE(lhsV##iter); \
+ EIGEN_UNUSED_VARIABLE(lhsVi##iter); \
+ }
+
+#define MICRO_COMPLEX_WORK_ONE4(iter, peel) \
+ if (unroll_factor > iter) { \
+ pgerc_common<accRows, Packet, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(&accReal##iter, &accImag##iter, lhsV##iter, lhsVi##iter, rhsV##peel, rhsVi##peel); \
+ }
+
+#define MICRO_COMPLEX_TYPE_PEEL4(func, func2, peel) \
+ if (PEEL_COMPLEX > peel) { \
+ Packet lhsV0, lhsV1, lhsV2, lhsV3; \
+ Packet lhsVi0, lhsVi1, lhsVi2, lhsVi3; \
+ pbroadcastN_old<Packet,accRows>(rhs_ptr_real + (accRows * peel), rhsV##peel[0], rhsV##peel[1], rhsV##peel[2], rhsV##peel[3]); \
+ if(!RhsIsReal) { \
+ pbroadcastN_old<Packet,accRows>(rhs_ptr_imag + (accRows * peel), rhsVi##peel[0], rhsVi##peel[1], rhsVi##peel[2], rhsVi##peel[3]); \
+ } else { \
+ EIGEN_UNUSED_VARIABLE(rhsVi##peel); \
+ } \
+ MICRO_COMPLEX_UNROLL_WORK(func, func2, peel) \
+ } else { \
+ EIGEN_UNUSED_VARIABLE(rhsV##peel); \
+ EIGEN_UNUSED_VARIABLE(rhsVi##peel); \
+ }
+
+#define MICRO_COMPLEX_UNROLL_TYPE_PEEL(M, func, func1, func2) \
+ Packet rhsV0[M], rhsV1[M], rhsV2[M], rhsV3[M]; \
+ Packet rhsVi0[M], rhsVi1[M], rhsVi2[M], rhsVi3[M]; \
+ func(func1,func2,0); func(func1,func2,1); \
+ func(func1,func2,2); func(func1,func2,3);
+
+#define MICRO_COMPLEX_UNROLL_TYPE_ONE(M, func, func1, func2) \
+ Packet rhsV0[M], rhsVi0[M];\
+ func(func1,func2,0);
+
+#define MICRO_COMPLEX_ONE_PEEL4 \
+ MICRO_COMPLEX_UNROLL_TYPE_PEEL(4, MICRO_COMPLEX_TYPE_PEEL4, MICRO_COMPLEX_WORK_ONE4, MICRO_COMPLEX_LOAD_ONE); \
+ rhs_ptr_real += (accRows * PEEL_COMPLEX); \
+ if(!RhsIsReal) rhs_ptr_imag += (accRows * PEEL_COMPLEX);
+
+#define MICRO_COMPLEX_ONE4 \
+ MICRO_COMPLEX_UNROLL_TYPE_ONE(4, MICRO_COMPLEX_TYPE_PEEL4, MICRO_COMPLEX_WORK_ONE4, MICRO_COMPLEX_LOAD_ONE); \
+ rhs_ptr_real += accRows; \
+ if(!RhsIsReal) rhs_ptr_imag += accRows;
+
+#define MICRO_COMPLEX_DST_PTR_ONE(iter) \
+ if (unroll_factor > iter) { \
+ bsetzero<Scalar, Packet, accRows>(accReal##iter); \
+ bsetzero<Scalar, Packet, accRows>(accImag##iter); \
+ } else { \
+ EIGEN_UNUSED_VARIABLE(accReal##iter); \
+ EIGEN_UNUSED_VARIABLE(accImag##iter); \
+ }
+
+#define MICRO_COMPLEX_DST_PTR MICRO_COMPLEX_UNROLL(MICRO_COMPLEX_DST_PTR_ONE)
+
+#define MICRO_COMPLEX_SRC_PTR_ONE(iter) \
+ if (unroll_factor > iter) { \
+ lhs_ptr_real##iter = lhs_base + ( ((advanceRows*row)/accCols) + iter*advanceRows )*strideA*accCols; \
+ } else { \
+ EIGEN_UNUSED_VARIABLE(lhs_ptr_real##iter); \
+ }
+
+#define MICRO_COMPLEX_SRC_PTR MICRO_COMPLEX_UNROLL(MICRO_COMPLEX_SRC_PTR_ONE)
+
+#define MICRO_COMPLEX_PREFETCH_ONE(iter) \
+ if (unroll_factor > iter) { \
+ EIGEN_POWER_PREFETCH(lhs_ptr_real##iter); \
+ }
+
+#define MICRO_COMPLEX_PREFETCH MICRO_COMPLEX_UNROLL(MICRO_COMPLEX_PREFETCH_ONE)
+
+#define MICRO_COMPLEX_STORE_ONE(iter) \
+ if (unroll_factor > iter) { \
+ bload<DataMapper, Packetc, Index, accColsC, ColMajor, true, accRows>(tRes, res, row + iter*accCols, 0); \
+ bscalec<Packet,accRows>(accReal##iter, accImag##iter, pAlphaReal, pAlphaImag, taccReal, taccImag); \
+ bcouple<Packet, Packetc, accRows>(taccReal, taccImag, tRes, acc0, acc1); \
+ res.template storePacketBlock<Packetc,accRows>(row + iter*accCols + 0, 0, acc0); \
+ res.template storePacketBlock<Packetc,accRows>(row + iter*accCols + accColsC, 0, acc1); \
+ }
+
+#define MICRO_COMPLEX_STORE MICRO_COMPLEX_UNROLL(MICRO_COMPLEX_STORE_ONE)
+
+template<int unroll_factor, typename Scalar, typename Packet, typename Packetc, typename DataMapper, typename Index, const Index accRows, const Index accCols, bool ConjugateLhs, bool ConjugateRhs, bool LhsIsReal, bool RhsIsReal>
+EIGEN_STRONG_INLINE void gemm_complex_unrolled_iteration(
+ const DataMapper& res,
+ const Scalar* lhs_base,
+ const Scalar* rhs_base,
+ Index depth,
+ Index strideA,
+ Index strideB,
+ Index& row,
+ const Packet& pAlphaReal,
+ const Packet& pAlphaImag)
+{
+ const Scalar* rhs_ptr_real = rhs_base;
+ const Scalar* rhs_ptr_imag = NULL;
+ const Index imag_delta = accCols*strideA;
+ if(!RhsIsReal) {
+ rhs_ptr_imag = rhs_base + accRows*strideB;
+ } else {
+ EIGEN_UNUSED_VARIABLE(rhs_ptr_imag);
+ }
+ const Scalar* lhs_ptr_real0 = NULL, * lhs_ptr_real1 = NULL;
+ const Scalar* lhs_ptr_real2 = NULL, * lhs_ptr_real3 = NULL;
+ PacketBlock<Packet,accRows> accReal0, accImag0, accReal1, accImag1;
+ PacketBlock<Packet,accRows> accReal2, accImag2, accReal3, accImag3;
+ PacketBlock<Packet,accRows> taccReal, taccImag;
+ PacketBlock<Packetc,accRows> acc0, acc1;
+ PacketBlock<Packetc,accRows*2> tRes;
+
+ MICRO_COMPLEX_SRC_PTR
+ MICRO_COMPLEX_DST_PTR
+
+ Index k = 0;
+ for(; k + PEEL_COMPLEX <= depth; k+= PEEL_COMPLEX)
+ {
+ EIGEN_POWER_PREFETCH(rhs_ptr_real);
+ if(!RhsIsReal) {
+ EIGEN_POWER_PREFETCH(rhs_ptr_imag);
+ }
+ MICRO_COMPLEX_PREFETCH
+ MICRO_COMPLEX_ONE_PEEL4
+ }
+ for(; k < depth; k++)
+ {
+ MICRO_COMPLEX_ONE4
+ }
+ MICRO_COMPLEX_STORE
+
+ row += unroll_factor*accCols;
+}
+
+template<typename Scalar, typename Packet, typename Packetc, typename DataMapper, typename Index, const Index accRows, const Index accCols, bool ConjugateLhs, bool ConjugateRhs, bool LhsIsReal, bool RhsIsReal>
+EIGEN_ALWAYS_INLINE void gemm_complex_cols(
+ const DataMapper& res,
+ const Scalar* blockA,
+ const Scalar* blockB,
+ Index depth,
+ Index strideA,
+ Index offsetA,
+ Index strideB,
+ Index offsetB,
+ Index col,
+ Index rows,
+ Index cols,
+ Index remaining_rows,
+ const Packet& pAlphaReal,
+ const Packet& pAlphaImag,
+ const Packet& pMask)
+{
+ const DataMapper res3 = res.getSubMapper(0, col);
+
+ const Scalar* rhs_base = blockB + advanceCols*col*strideB + accRows*offsetB;
+ const Scalar* lhs_base = blockA + accCols*offsetA;
+ Index row = 0;
+
+#define MAX_COMPLEX_UNROLL 3
+ while(row + MAX_COMPLEX_UNROLL*accCols <= rows) {
+ gemm_complex_unrolled_iteration<MAX_COMPLEX_UNROLL, Scalar, Packet, Packetc, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res3, lhs_base, rhs_base, depth, strideA, strideB, row, pAlphaReal, pAlphaImag);
+ }
+ switch( (rows-row)/accCols ) {
+#if MAX_COMPLEX_UNROLL > 4
+ case 4:
+ gemm_complex_unrolled_iteration<4, Scalar, Packet, Packetc, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res3, lhs_base, rhs_base, depth, strideA, strideB, row, pAlphaReal, pAlphaImag);
+ break;
+#endif
+#if MAX_COMPLEX_UNROLL > 3
+ case 3:
+ gemm_complex_unrolled_iteration<3, Scalar, Packet, Packetc, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res3, lhs_base, rhs_base, depth, strideA, strideB, row, pAlphaReal, pAlphaImag);
+ break;
+#endif
+#if MAX_COMPLEX_UNROLL > 2
+ case 2:
+ gemm_complex_unrolled_iteration<2, Scalar, Packet, Packetc, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res3, lhs_base, rhs_base, depth, strideA, strideB, row, pAlphaReal, pAlphaImag);
+ break;
+#endif
+#if MAX_COMPLEX_UNROLL > 1
+ case 1:
+ gemm_complex_unrolled_iteration<1, Scalar, Packet, Packetc, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res3, lhs_base, rhs_base, depth, strideA, strideB, row, pAlphaReal, pAlphaImag);
+ break;
+#endif
+ default:
+ break;
+ }
+#undef MAX_COMPLEX_UNROLL
+
+ if(remaining_rows > 0)
+ {
+ gemm_complex_extra_row<Scalar, Packet, Packetc, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res3, blockA, rhs_base, depth, strideA, offsetA, strideB, row, col, rows, cols, remaining_rows, pAlphaReal, pAlphaImag, pMask);
+ }
+}
+
+template<typename Scalar, typename Packet, typename Packetc, typename DataMapper, typename Index, const Index accCols, bool ConjugateLhs, bool ConjugateRhs, bool LhsIsReal, bool RhsIsReal>
+EIGEN_STRONG_INLINE void gemm_complex_extra_cols(
+ const DataMapper& res,
+ const Scalar* blockA,
+ const Scalar* blockB,
+ Index depth,
+ Index strideA,
+ Index offsetA,
+ Index strideB,
+ Index offsetB,
+ Index col,
+ Index rows,
+ Index cols,
+ Index remaining_rows,
+ const Packet& pAlphaReal,
+ const Packet& pAlphaImag,
+ const Packet& pMask)
+{
+ for (; col < cols; col++) {
+ gemm_complex_cols<Scalar, Packet, Packetc, DataMapper, Index, 1, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res, blockA, blockB, depth, strideA, offsetA, strideB, offsetB, col, rows, cols, remaining_rows, pAlphaReal, pAlphaImag, pMask);
+ }
+}
+
+template<typename LhsScalar, typename RhsScalar, typename Scalarc, typename Scalar, typename Index, typename Packet, typename Packetc, typename RhsPacket, typename DataMapper, const Index accRows, const Index accCols, bool ConjugateLhs, bool ConjugateRhs, bool LhsIsReal, bool RhsIsReal>
+EIGEN_STRONG_INLINE void gemm_complex(const DataMapper& res, const LhsScalar* blockAc, const RhsScalar* blockBc, Index rows, Index depth, Index cols, Scalarc alpha, Index strideA, Index strideB, Index offsetA, Index offsetB)
+{
+ const Index remaining_rows = rows % accCols;
+
+ if( strideA == -1 ) strideA = depth;
+ if( strideB == -1 ) strideB = depth;
+
+ const Packet pAlphaReal = pset1<Packet>(alpha.real());
+ const Packet pAlphaImag = pset1<Packet>(alpha.imag());
+ const Packet pMask = bmask<Packet>((const int)(remaining_rows));
+
+ const Scalar* blockA = (Scalar *) blockAc;
+ const Scalar* blockB = (Scalar *) blockBc;
+
+ Index col = 0;
+ for(; col + accRows <= cols; col += accRows)
+ {
+ gemm_complex_cols<Scalar, Packet, Packetc, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res, blockA, blockB, depth, strideA, offsetA, strideB, offsetB, col, rows, cols, remaining_rows, pAlphaReal, pAlphaImag, pMask);
+ }
+
+ gemm_complex_extra_cols<Scalar, Packet, Packetc, DataMapper, Index, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res, blockA, blockB, depth, strideA, offsetA, strideB, offsetB, col, rows, cols, remaining_rows, pAlphaReal, pAlphaImag, pMask);
+}
+
+#undef accColsC
+#undef advanceCols
+#undef advanceRows
+
+/************************************
+ * ppc64le template specializations *
+ * **********************************/
+template<typename Index, typename DataMapper, int Pack1, int Pack2, typename Packet, bool Conjugate, bool PanelMode>
+struct gemm_pack_lhs<double, Index, DataMapper, Pack1, Pack2, Packet, ColMajor, Conjugate, PanelMode>
+{
+ void operator()(double* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0);
+};
+
+template<typename Index, typename DataMapper, int Pack1, int Pack2, typename Packet, bool Conjugate, bool PanelMode>
+void gemm_pack_lhs<double, Index, DataMapper, Pack1, Pack2, Packet, ColMajor, Conjugate, PanelMode>
+ ::operator()(double* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)
+{
+ dhs_pack<double, Index, DataMapper, Packet2d, ColMajor, PanelMode, true> pack;
+ pack(blockA, lhs, depth, rows, stride, offset);
+}
+
+template<typename Index, typename DataMapper, int Pack1, int Pack2, typename Packet, bool Conjugate, bool PanelMode>
+struct gemm_pack_lhs<double, Index, DataMapper, Pack1, Pack2, Packet, RowMajor, Conjugate, PanelMode>
+{
+ void operator()(double* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0);
+};
+
+template<typename Index, typename DataMapper, int Pack1, int Pack2, typename Packet, bool Conjugate, bool PanelMode>
+void gemm_pack_lhs<double, Index, DataMapper, Pack1, Pack2, Packet, RowMajor, Conjugate, PanelMode>
+ ::operator()(double* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)
+{
+ dhs_pack<double, Index, DataMapper, Packet2d, RowMajor, PanelMode, true> pack;
+ pack(blockA, lhs, depth, rows, stride, offset);
+}
+
+#if EIGEN_ALTIVEC_USE_CUSTOM_PACK
+template<typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>
+struct gemm_pack_rhs<double, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode>
+{
+ void operator()(double* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0);
+};
+
+template<typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>
+void gemm_pack_rhs<double, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode>
+ ::operator()(double* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset)
+{
+ dhs_pack<double, Index, DataMapper, Packet2d, ColMajor, PanelMode, false> pack;
+ pack(blockB, rhs, depth, cols, stride, offset);
+}
+
+template<typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>
+struct gemm_pack_rhs<double, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode>
+{
+ void operator()(double* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0);
+};
+
+template<typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>
+void gemm_pack_rhs<double, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode>
+ ::operator()(double* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset)
+{
+ dhs_pack<double, Index, DataMapper, Packet2d, RowMajor, PanelMode, false> pack;
+ pack(blockB, rhs, depth, cols, stride, offset);
+}
+#endif
+
+template<typename Index, typename DataMapper, int Pack1, int Pack2, typename Packet, bool Conjugate, bool PanelMode>
+struct gemm_pack_lhs<float, Index, DataMapper, Pack1, Pack2, Packet, RowMajor, Conjugate, PanelMode>
+{
+ void operator()(float* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0);
+};
+
+template<typename Index, typename DataMapper, int Pack1, int Pack2, typename Packet, bool Conjugate, bool PanelMode>
+void gemm_pack_lhs<float, Index, DataMapper, Pack1, Pack2, Packet, RowMajor, Conjugate, PanelMode>
+ ::operator()(float* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)
+{
+ dhs_pack<float, Index, DataMapper, Packet4f, RowMajor, PanelMode, true> pack;
+ pack(blockA, lhs, depth, rows, stride, offset);
+}
+
+template<typename Index, typename DataMapper, int Pack1, int Pack2, typename Packet, bool Conjugate, bool PanelMode>
+struct gemm_pack_lhs<float, Index, DataMapper, Pack1, Pack2, Packet, ColMajor, Conjugate, PanelMode>
+{
+ void operator()(float* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0);
+};
+
+template<typename Index, typename DataMapper, int Pack1, int Pack2, typename Packet, bool Conjugate, bool PanelMode>
+void gemm_pack_lhs<float, Index, DataMapper, Pack1, Pack2, Packet, ColMajor, Conjugate, PanelMode>
+ ::operator()(float* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)
+{
+ dhs_pack<float, Index, DataMapper, Packet4f, ColMajor, PanelMode, true> pack;
+ pack(blockA, lhs, depth, rows, stride, offset);
+}
+
+template<typename Index, typename DataMapper, int Pack1, int Pack2, typename Packet, bool Conjugate, bool PanelMode>
+struct gemm_pack_lhs<std::complex<float>, Index, DataMapper, Pack1, Pack2, Packet, RowMajor, Conjugate, PanelMode>
+{
+ void operator()(std::complex<float>* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0);
+};
+
+template<typename Index, typename DataMapper, int Pack1, int Pack2, typename Packet, bool Conjugate, bool PanelMode>
+void gemm_pack_lhs<std::complex<float>, Index, DataMapper, Pack1, Pack2, Packet, RowMajor, Conjugate, PanelMode>
+ ::operator()(std::complex<float>* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)
+{
+ dhs_cpack<float, Index, DataMapper, Packet4f, Packet2cf, RowMajor, Conjugate, PanelMode, true> pack;
+ pack(blockA, lhs, depth, rows, stride, offset);
+}
+
+template<typename Index, typename DataMapper, int Pack1, int Pack2, typename Packet, bool Conjugate, bool PanelMode>
+struct gemm_pack_lhs<std::complex<float>, Index, DataMapper, Pack1, Pack2, Packet, ColMajor, Conjugate, PanelMode>
+{
+ void operator()(std::complex<float>* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0);
+};
+
+template<typename Index, typename DataMapper, int Pack1, int Pack2, typename Packet, bool Conjugate, bool PanelMode>
+void gemm_pack_lhs<std::complex<float>, Index, DataMapper, Pack1, Pack2, Packet, ColMajor, Conjugate, PanelMode>
+ ::operator()(std::complex<float>* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)
+{
+ dhs_cpack<float, Index, DataMapper, Packet4f, Packet2cf, ColMajor, Conjugate, PanelMode, true> pack;
+ pack(blockA, lhs, depth, rows, stride, offset);
+}
+
+#if EIGEN_ALTIVEC_USE_CUSTOM_PACK
+template<typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>
+struct gemm_pack_rhs<float, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode>
+{
+ void operator()(float* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0);
+};
+
+template<typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>
+void gemm_pack_rhs<float, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode>
+ ::operator()(float* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset)
+{
+ dhs_pack<float, Index, DataMapper, Packet4f, ColMajor, PanelMode, false> pack;
+ pack(blockB, rhs, depth, cols, stride, offset);
+}
+
+template<typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>
+struct gemm_pack_rhs<float, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode>
+{
+ void operator()(float* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0);
+};
+
+template<typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>
+void gemm_pack_rhs<float, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode>
+ ::operator()(float* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset)
+{
+ dhs_pack<float, Index, DataMapper, Packet4f, RowMajor, PanelMode, false> pack;
+ pack(blockB, rhs, depth, cols, stride, offset);
+}
+#endif
+
+template<typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>
+struct gemm_pack_rhs<std::complex<float>, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode>
+{
+ void operator()(std::complex<float>* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0);
+};
+
+template<typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>
+void gemm_pack_rhs<std::complex<float>, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode>
+ ::operator()(std::complex<float>* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset)
+{
+ dhs_cpack<float, Index, DataMapper, Packet4f, Packet2cf, ColMajor, Conjugate, PanelMode, false> pack;
+ pack(blockB, rhs, depth, cols, stride, offset);
+}
+
+template<typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>
+struct gemm_pack_rhs<std::complex<float>, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode>
+{
+ void operator()(std::complex<float>* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0);
+};
+
+template<typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>
+void gemm_pack_rhs<std::complex<float>, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode>
+ ::operator()(std::complex<float>* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset)
+{
+ dhs_cpack<float, Index, DataMapper, Packet4f, Packet2cf, RowMajor, Conjugate, PanelMode, false> pack;
+ pack(blockB, rhs, depth, cols, stride, offset);
+}
+
+template<typename Index, typename DataMapper, int Pack1, int Pack2, typename Packet, bool Conjugate, bool PanelMode>
+struct gemm_pack_lhs<std::complex<double>, Index, DataMapper, Pack1, Pack2, Packet, RowMajor, Conjugate, PanelMode>
+{
+ void operator()(std::complex<double>* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0);
+};
+
+template<typename Index, typename DataMapper, int Pack1, int Pack2, typename Packet, bool Conjugate, bool PanelMode>
+void gemm_pack_lhs<std::complex<double>, Index, DataMapper, Pack1, Pack2, Packet, RowMajor, Conjugate, PanelMode>
+ ::operator()(std::complex<double>* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)
+{
+ dhs_cpack<double, Index, DataMapper, Packet2d, Packet1cd, RowMajor, Conjugate, PanelMode, true> pack;
+ pack(blockA, lhs, depth, rows, stride, offset);
+}
+
+template<typename Index, typename DataMapper, int Pack1, int Pack2, typename Packet, bool Conjugate, bool PanelMode>
+struct gemm_pack_lhs<std::complex<double>, Index, DataMapper, Pack1, Pack2, Packet, ColMajor, Conjugate, PanelMode>
+{
+ void operator()(std::complex<double>* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0);
+};
+
+template<typename Index, typename DataMapper, int Pack1, int Pack2, typename Packet, bool Conjugate, bool PanelMode>
+void gemm_pack_lhs<std::complex<double>, Index, DataMapper, Pack1, Pack2, Packet, ColMajor, Conjugate, PanelMode>
+ ::operator()(std::complex<double>* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)
+{
+ dhs_cpack<double, Index, DataMapper, Packet2d, Packet1cd, ColMajor, Conjugate, PanelMode, true> pack;
+ pack(blockA, lhs, depth, rows, stride, offset);
+}
+
+template<typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>
+struct gemm_pack_rhs<std::complex<double>, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode>
+{
+ void operator()(std::complex<double>* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0);
+};
+
+template<typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>
+void gemm_pack_rhs<std::complex<double>, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode>
+ ::operator()(std::complex<double>* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset)
+{
+ dhs_cpack<double, Index, DataMapper, Packet2d, Packet1cd, ColMajor, Conjugate, PanelMode, false> pack;
+ pack(blockB, rhs, depth, cols, stride, offset);
+}
+
+template<typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>
+struct gemm_pack_rhs<std::complex<double>, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode>
+{
+ void operator()(std::complex<double>* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0);
+};
+
+template<typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>
+void gemm_pack_rhs<std::complex<double>, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode>
+ ::operator()(std::complex<double>* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset)
+{
+ dhs_cpack<double, Index, DataMapper, Packet2d, Packet1cd, RowMajor, Conjugate, PanelMode, false> pack;
+ pack(blockB, rhs, depth, cols, stride, offset);
+}
+
+// ********* gebp specializations *********
+template<typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
+struct gebp_kernel<float, float, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs>
+{
+ typedef typename quad_traits<float>::vectortype Packet;
+ typedef typename quad_traits<float>::rhstype RhsPacket;
+
+ void operator()(const DataMapper& res, const float* blockA, const float* blockB,
+ Index rows, Index depth, Index cols, float alpha,
+ Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0);
+};
+
+template<typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
+void gebp_kernel<float, float, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs>
+ ::operator()(const DataMapper& res, const float* blockA, const float* blockB,
+ Index rows, Index depth, Index cols, float alpha,
+ Index strideA, Index strideB, Index offsetA, Index offsetB)
+ {
+ const Index accRows = quad_traits<float>::rows;
+ const Index accCols = quad_traits<float>::size;
+ void (*gemm_function)(const DataMapper&, const float*, const float*, Index, Index, Index, float, Index, Index, Index, Index);
+
+ #ifdef EIGEN_ALTIVEC_MMA_ONLY
+ //generate with MMA only
+ gemm_function = &Eigen::internal::gemmMMA<float, Index, Packet, RhsPacket, DataMapper, accRows, accCols>;
+ #elif defined(ALTIVEC_MMA_SUPPORT) && !defined(EIGEN_ALTIVEC_DISABLE_MMA)
+ if (__builtin_cpu_supports ("arch_3_1") && __builtin_cpu_supports ("mma")){
+ gemm_function = &Eigen::internal::gemmMMA<float, Index, Packet, RhsPacket, DataMapper, accRows, accCols>;
+ }
+ else{
+ gemm_function = &Eigen::internal::gemm<float, Index, Packet, RhsPacket, DataMapper, accRows, accCols>;
+ }
+ #else
+ gemm_function = &Eigen::internal::gemm<float, Index, Packet, RhsPacket, DataMapper, accRows, accCols>;
+ #endif
+ gemm_function(res, blockA, blockB, rows, depth, cols, alpha, strideA, strideB, offsetA, offsetB);
+ }
+
+template<typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
+struct gebp_kernel<std::complex<float>, std::complex<float>, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs>
+{
+ typedef Packet4f Packet;
+ typedef Packet2cf Packetc;
+ typedef Packet4f RhsPacket;
+
+ void operator()(const DataMapper& res, const std::complex<float>* blockA, const std::complex<float>* blockB,
+ Index rows, Index depth, Index cols, std::complex<float> alpha,
+ Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0);
+};
+
+template<typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
+void gebp_kernel<std::complex<float>, std::complex<float>, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs>
+ ::operator()(const DataMapper& res, const std::complex<float>* blockA, const std::complex<float>* blockB,
+ Index rows, Index depth, Index cols, std::complex<float> alpha,
+ Index strideA, Index strideB, Index offsetA, Index offsetB)
+ {
+ const Index accRows = quad_traits<float>::rows;
+ const Index accCols = quad_traits<float>::size;
+ void (*gemm_function)(const DataMapper&, const std::complex<float>*, const std::complex<float>*,
+ Index, Index, Index, std::complex<float>, Index, Index, Index, Index);
+
+ #ifdef EIGEN_ALTIVEC_MMA_ONLY
+ //generate with MMA only
+ gemm_function = &Eigen::internal::gemm_complexMMA<std::complex<float>, std::complex<float>, std::complex<float>, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, false>;
+ #elif defined(ALTIVEC_MMA_SUPPORT) && !defined(EIGEN_ALTIVEC_DISABLE_MMA)
+ if (__builtin_cpu_supports ("arch_3_1") && __builtin_cpu_supports ("mma")){
+ gemm_function = &Eigen::internal::gemm_complexMMA<std::complex<float>, std::complex<float>, std::complex<float>, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, false>;
+ }
+ else{
+ gemm_function = &Eigen::internal::gemm_complex<std::complex<float>, std::complex<float>, std::complex<float>, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, false>;
+ }
+ #else
+ gemm_function = &Eigen::internal::gemm_complex<std::complex<float>, std::complex<float>, std::complex<float>, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, false>;
+ #endif
+ gemm_function(res, blockA, blockB, rows, depth, cols, alpha, strideA, strideB, offsetA, offsetB);
+ }
+
+template<typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
+struct gebp_kernel<float, std::complex<float>, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs>
+{
+ typedef Packet4f Packet;
+ typedef Packet2cf Packetc;
+ typedef Packet4f RhsPacket;
+
+ void operator()(const DataMapper& res, const float* blockA, const std::complex<float>* blockB,
+ Index rows, Index depth, Index cols, std::complex<float> alpha,
+ Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0);
+};
+
+template<typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
+void gebp_kernel<float, std::complex<float>, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs>
+ ::operator()(const DataMapper& res, const float* blockA, const std::complex<float>* blockB,
+ Index rows, Index depth, Index cols, std::complex<float> alpha,
+ Index strideA, Index strideB, Index offsetA, Index offsetB)
+ {
+ const Index accRows = quad_traits<float>::rows;
+ const Index accCols = quad_traits<float>::size;
+ void (*gemm_function)(const DataMapper&, const float*, const std::complex<float>*,
+ Index, Index, Index, std::complex<float>, Index, Index, Index, Index);
+ #ifdef EIGEN_ALTIVEC_MMA_ONLY
+ //generate with MMA only
+ gemm_function = &Eigen::internal::gemm_complexMMA<float, std::complex<float>, std::complex<float>, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, true, false>;
+ #elif defined(ALTIVEC_MMA_SUPPORT) && !defined(EIGEN_ALTIVEC_DISABLE_MMA)
+ if (__builtin_cpu_supports ("arch_3_1") && __builtin_cpu_supports ("mma")){
+ gemm_function = &Eigen::internal::gemm_complexMMA<float, std::complex<float>, std::complex<float>, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, true, false>;
+ }
+ else{
+ gemm_function = &Eigen::internal::gemm_complex<float, std::complex<float>, std::complex<float>, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, true, false>;
+ }
+ #else
+ gemm_function = &Eigen::internal::gemm_complex<float, std::complex<float>, std::complex<float>, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, true, false>;
+ #endif
+ gemm_function(res, blockA, blockB, rows, depth, cols, alpha, strideA, strideB, offsetA, offsetB);
+ }
+
+template<typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
+struct gebp_kernel<std::complex<float>, float, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs>
+{
+ typedef Packet4f Packet;
+ typedef Packet2cf Packetc;
+ typedef Packet4f RhsPacket;
+
+ void operator()(const DataMapper& res, const std::complex<float>* blockA, const float* blockB,
+ Index rows, Index depth, Index cols, std::complex<float> alpha,
+ Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0);
+};
+
+template<typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
+void gebp_kernel<std::complex<float>, float, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs>
+ ::operator()(const DataMapper& res, const std::complex<float>* blockA, const float* blockB,
+ Index rows, Index depth, Index cols, std::complex<float> alpha,
+ Index strideA, Index strideB, Index offsetA, Index offsetB)
+ {
+ const Index accRows = quad_traits<float>::rows;
+ const Index accCols = quad_traits<float>::size;
+ void (*gemm_function)(const DataMapper&, const std::complex<float>*, const float*,
+ Index, Index, Index, std::complex<float>, Index, Index, Index, Index);
+ #ifdef EIGEN_ALTIVEC_MMA_ONLY
+ //generate with MMA only
+ gemm_function = &Eigen::internal::gemm_complexMMA<std::complex<float>, float, std::complex<float>, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, true>;
+ #elif defined(ALTIVEC_MMA_SUPPORT) && !defined(EIGEN_ALTIVEC_DISABLE_MMA)
+ if (__builtin_cpu_supports ("arch_3_1") && __builtin_cpu_supports ("mma")){
+ gemm_function = &Eigen::internal::gemm_complexMMA<std::complex<float>, float, std::complex<float>, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, true>;
+ }
+ else{
+ gemm_function = &Eigen::internal::gemm_complex<std::complex<float>, float, std::complex<float>, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, true>;
+ }
+ #else
+ gemm_function = &Eigen::internal::gemm_complex<std::complex<float>, float, std::complex<float>, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, true>;
+ #endif
+ gemm_function(res, blockA, blockB, rows, depth, cols, alpha, strideA, strideB, offsetA, offsetB);
+ }
+
+template<typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
+struct gebp_kernel<double, double, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs>
+{
+ typedef typename quad_traits<double>::vectortype Packet;
+ typedef typename quad_traits<double>::rhstype RhsPacket;
+
+ void operator()(const DataMapper& res, const double* blockA, const double* blockB,
+ Index rows, Index depth, Index cols, double alpha,
+ Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0);
+};
+
+template<typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
+void gebp_kernel<double, double, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs>
+ ::operator()(const DataMapper& res, const double* blockA, const double* blockB,
+ Index rows, Index depth, Index cols, double alpha,
+ Index strideA, Index strideB, Index offsetA, Index offsetB)
+ {
+ const Index accRows = quad_traits<double>::rows;
+ const Index accCols = quad_traits<double>::size;
+ void (*gemm_function)(const DataMapper&, const double*, const double*, Index, Index, Index, double, Index, Index, Index, Index);
+
+ #ifdef EIGEN_ALTIVEC_MMA_ONLY
+ //generate with MMA only
+ gemm_function = &Eigen::internal::gemmMMA<double, Index, Packet, RhsPacket, DataMapper, accRows, accCols>;
+ #elif defined(ALTIVEC_MMA_SUPPORT) && !defined(EIGEN_ALTIVEC_DISABLE_MMA)
+ if (__builtin_cpu_supports ("arch_3_1") && __builtin_cpu_supports ("mma")){
+ gemm_function = &Eigen::internal::gemmMMA<double, Index, Packet, RhsPacket, DataMapper, accRows, accCols>;
+ }
+ else{
+ gemm_function = &Eigen::internal::gemm<double, Index, Packet, RhsPacket, DataMapper, accRows, accCols>;
+ }
+ #else
+ gemm_function = &Eigen::internal::gemm<double, Index, Packet, RhsPacket, DataMapper, accRows, accCols>;
+ #endif
+ gemm_function(res, blockA, blockB, rows, depth, cols, alpha, strideA, strideB, offsetA, offsetB);
+ }
+
+template<typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
+struct gebp_kernel<std::complex<double>, std::complex<double>, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs>
+{
+ typedef quad_traits<double>::vectortype Packet;
+ typedef Packet1cd Packetc;
+ typedef quad_traits<double>::rhstype RhsPacket;
+
+ void operator()(const DataMapper& res, const std::complex<double>* blockA, const std::complex<double>* blockB,
+ Index rows, Index depth, Index cols, std::complex<double> alpha,
+ Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0);
+};
+
+template<typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
+void gebp_kernel<std::complex<double>, std::complex<double>, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs>
+ ::operator()(const DataMapper& res, const std::complex<double>* blockA, const std::complex<double>* blockB,
+ Index rows, Index depth, Index cols, std::complex<double> alpha,
+ Index strideA, Index strideB, Index offsetA, Index offsetB)
+ {
+ const Index accRows = quad_traits<double>::rows;
+ const Index accCols = quad_traits<double>::size;
+ void (*gemm_function)(const DataMapper&, const std::complex<double>*, const std::complex<double>*,
+ Index, Index, Index, std::complex<double>, Index, Index, Index, Index);
+ #ifdef EIGEN_ALTIVEC_MMA_ONLY
+ //generate with MMA only
+ gemm_function = &Eigen::internal::gemm_complexMMA<std::complex<double>, std::complex<double>, std::complex<double>, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, false>;
+ #elif defined(ALTIVEC_MMA_SUPPORT) && !defined(EIGEN_ALTIVEC_DISABLE_MMA)
+ if (__builtin_cpu_supports ("arch_3_1") && __builtin_cpu_supports ("mma")){
+ gemm_function = &Eigen::internal::gemm_complexMMA<std::complex<double>, std::complex<double>, std::complex<double>, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, false>;
+ }
+ else{
+ gemm_function = &Eigen::internal::gemm_complex<std::complex<double>, std::complex<double>, std::complex<double>, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, false>;
+ }
+ #else
+ gemm_function = &Eigen::internal::gemm_complex<std::complex<double>, std::complex<double>, std::complex<double>, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, false>;
+ #endif
+ gemm_function(res, blockA, blockB, rows, depth, cols, alpha, strideA, strideB, offsetA, offsetB);
+ }
+
+template<typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
+struct gebp_kernel<std::complex<double>, double, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs>
+{
+ typedef quad_traits<double>::vectortype Packet;
+ typedef Packet1cd Packetc;
+ typedef quad_traits<double>::rhstype RhsPacket;
+
+ void operator()(const DataMapper& res, const std::complex<double>* blockA, const double* blockB,
+ Index rows, Index depth, Index cols, std::complex<double> alpha,
+ Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0);
+};
+
+template<typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
+void gebp_kernel<std::complex<double>, double, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs>
+ ::operator()(const DataMapper& res, const std::complex<double>* blockA, const double* blockB,
+ Index rows, Index depth, Index cols, std::complex<double> alpha,
+ Index strideA, Index strideB, Index offsetA, Index offsetB)
+ {
+ const Index accRows = quad_traits<double>::rows;
+ const Index accCols = quad_traits<double>::size;
+ void (*gemm_function)(const DataMapper&, const std::complex<double>*, const double*,
+ Index, Index, Index, std::complex<double>, Index, Index, Index, Index);
+ #ifdef EIGEN_ALTIVEC_MMA_ONLY
+ //generate with MMA only
+ gemm_function = &Eigen::internal::gemm_complexMMA<std::complex<double>, double, std::complex<double>, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, true>;
+ #elif defined(ALTIVEC_MMA_SUPPORT) && !defined(EIGEN_ALTIVEC_DISABLE_MMA)
+ if (__builtin_cpu_supports ("arch_3_1") && __builtin_cpu_supports ("mma")){
+ gemm_function = &Eigen::internal::gemm_complexMMA<std::complex<double>, double, std::complex<double>, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, true>;
+ }
+ else{
+ gemm_function = &Eigen::internal::gemm_complex<std::complex<double>, double, std::complex<double>, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, true>;
+ }
+ #else
+ gemm_function = &Eigen::internal::gemm_complex<std::complex<double>, double, std::complex<double>, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, true>;
+ #endif
+ gemm_function(res, blockA, blockB, rows, depth, cols, alpha, strideA, strideB, offsetA, offsetB);
+ }
+
+template<typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
+struct gebp_kernel<double, std::complex<double>, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs>
+{
+ typedef quad_traits<double>::vectortype Packet;
+ typedef Packet1cd Packetc;
+ typedef quad_traits<double>::rhstype RhsPacket;
+
+ void operator()(const DataMapper& res, const double* blockA, const std::complex<double>* blockB,
+ Index rows, Index depth, Index cols, std::complex<double> alpha,
+ Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0);
+};
+
+template<typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
+void gebp_kernel<double, std::complex<double>, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs>
+ ::operator()(const DataMapper& res, const double* blockA, const std::complex<double>* blockB,
+ Index rows, Index depth, Index cols, std::complex<double> alpha,
+ Index strideA, Index strideB, Index offsetA, Index offsetB)
+ {
+ const Index accRows = quad_traits<double>::rows;
+ const Index accCols = quad_traits<double>::size;
+ void (*gemm_function)(const DataMapper&, const double*, const std::complex<double>*,
+ Index, Index, Index, std::complex<double>, Index, Index, Index, Index);
+ #ifdef EIGEN_ALTIVEC_MMA_ONLY
+ //generate with MMA only
+ gemm_function = &Eigen::internal::gemm_complexMMA<double, std::complex<double>, std::complex<double>, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, true, false>;
+ #elif defined(ALTIVEC_MMA_SUPPORT) && !defined(EIGEN_ALTIVEC_DISABLE_MMA)
+ if (__builtin_cpu_supports ("arch_3_1") && __builtin_cpu_supports ("mma")){
+ gemm_function = &Eigen::internal::gemm_complexMMA<double, std::complex<double>, std::complex<double>, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, true, false>;
+ }
+ else{
+ gemm_function = &Eigen::internal::gemm_complex<double, std::complex<double>, std::complex<double>, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, true, false>;
+ }
+ #else
+ gemm_function = &Eigen::internal::gemm_complex<double, std::complex<double>, std::complex<double>, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, true, false>;
+ #endif
+ gemm_function(res, blockA, blockB, rows, depth, cols, alpha, strideA, strideB, offsetA, offsetB);
+ }
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_MATRIX_PRODUCT_ALTIVEC_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/AltiVec/MatrixProductCommon.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/AltiVec/MatrixProductCommon.h
new file mode 100644
index 000000000..bf01dba1c
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/AltiVec/MatrixProductCommon.h
@@ -0,0 +1,159 @@
+//#define EIGEN_POWER_USE_PREFETCH // Use prefetching in gemm routines
+#ifdef EIGEN_POWER_USE_PREFETCH
+#define EIGEN_POWER_PREFETCH(p) prefetch(p)
+#else
+#define EIGEN_POWER_PREFETCH(p)
+#endif
+
+namespace Eigen {
+
+namespace internal {
+
+template<typename Scalar, typename Packet, typename DataMapper, typename Index, const Index accRows, const Index accCols>
+EIGEN_ALWAYS_INLINE void gemm_extra_row(
+ const DataMapper& res,
+ const Scalar* lhs_base,
+ const Scalar* rhs_base,
+ Index depth,
+ Index strideA,
+ Index offsetA,
+ Index row,
+ Index col,
+ Index rows,
+ Index cols,
+ Index remaining_rows,
+ const Packet& pAlpha,
+ const Packet& pMask);
+
+template<typename Scalar, typename Packet, typename DataMapper, typename Index, const Index accCols, bool ConjugateLhs, bool ConjugateRhs, bool LhsIsReal, bool RhsIsReal>
+EIGEN_STRONG_INLINE void gemm_extra_cols(
+ const DataMapper& res,
+ const Scalar* blockA,
+ const Scalar* blockB,
+ Index depth,
+ Index strideA,
+ Index offsetA,
+ Index strideB,
+ Index offsetB,
+ Index col,
+ Index rows,
+ Index cols,
+ Index remaining_rows,
+ const Packet& pAlpha,
+ const Packet& pMask);
+
+template<typename Packet>
+EIGEN_ALWAYS_INLINE Packet bmask(const int remaining_rows);
+
+template<typename Scalar, typename Packet, typename Packetc, typename DataMapper, typename Index, const Index accRows, const Index accCols, bool ConjugateLhs, bool ConjugateRhs, bool LhsIsReal, bool RhsIsReal>
+EIGEN_ALWAYS_INLINE void gemm_complex_extra_row(
+ const DataMapper& res,
+ const Scalar* lhs_base,
+ const Scalar* rhs_base,
+ Index depth,
+ Index strideA,
+ Index offsetA,
+ Index strideB,
+ Index row,
+ Index col,
+ Index rows,
+ Index cols,
+ Index remaining_rows,
+ const Packet& pAlphaReal,
+ const Packet& pAlphaImag,
+ const Packet& pMask);
+
+template<typename Scalar, typename Packet, typename Packetc, typename DataMapper, typename Index, const Index accCols, bool ConjugateLhs, bool ConjugateRhs, bool LhsIsReal, bool RhsIsReal>
+EIGEN_STRONG_INLINE void gemm_complex_extra_cols(
+ const DataMapper& res,
+ const Scalar* blockA,
+ const Scalar* blockB,
+ Index depth,
+ Index strideA,
+ Index offsetA,
+ Index strideB,
+ Index offsetB,
+ Index col,
+ Index rows,
+ Index cols,
+ Index remaining_rows,
+ const Packet& pAlphaReal,
+ const Packet& pAlphaImag,
+ const Packet& pMask);
+
+template<typename Scalar, typename Packet>
+EIGEN_ALWAYS_INLINE Packet ploadLhs(const Scalar* lhs);
+
+template<typename DataMapper, typename Packet, typename Index, const Index accCols, int StorageOrder, bool Complex, int N>
+EIGEN_ALWAYS_INLINE void bload(PacketBlock<Packet,N>& acc, const DataMapper& res, Index row, Index col);
+
+template<typename Packet, int N>
+EIGEN_ALWAYS_INLINE void bscale(PacketBlock<Packet,N>& acc, PacketBlock<Packet,N>& accZ, const Packet& pAlpha);
+
+template<typename Packet, int N>
+EIGEN_ALWAYS_INLINE void bscalec(PacketBlock<Packet,N>& aReal, PacketBlock<Packet,N>& aImag, const Packet& bReal, const Packet& bImag, PacketBlock<Packet,N>& cReal, PacketBlock<Packet,N>& cImag);
+
+// Grab two decouples real/imaginary PacketBlocks and return two coupled (real/imaginary pairs) PacketBlocks.
+template<typename Packet, typename Packetc, int N>
+EIGEN_ALWAYS_INLINE void bcouple_common(PacketBlock<Packet,N>& taccReal, PacketBlock<Packet,N>& taccImag, PacketBlock<Packetc, N>& acc1, PacketBlock<Packetc, N>& acc2)
+{
+ acc1.packet[0].v = vec_mergeh(taccReal.packet[0], taccImag.packet[0]);
+ if (N > 1) {
+ acc1.packet[1].v = vec_mergeh(taccReal.packet[1], taccImag.packet[1]);
+ }
+ if (N > 2) {
+ acc1.packet[2].v = vec_mergeh(taccReal.packet[2], taccImag.packet[2]);
+ }
+ if (N > 3) {
+ acc1.packet[3].v = vec_mergeh(taccReal.packet[3], taccImag.packet[3]);
+ }
+
+ acc2.packet[0].v = vec_mergel(taccReal.packet[0], taccImag.packet[0]);
+ if (N > 1) {
+ acc2.packet[1].v = vec_mergel(taccReal.packet[1], taccImag.packet[1]);
+ }
+ if (N > 2) {
+ acc2.packet[2].v = vec_mergel(taccReal.packet[2], taccImag.packet[2]);
+ }
+ if (N > 3) {
+ acc2.packet[3].v = vec_mergel(taccReal.packet[3], taccImag.packet[3]);
+ }
+}
+
+template<typename Packet, typename Packetc, int N>
+EIGEN_ALWAYS_INLINE void bcouple(PacketBlock<Packet,N>& taccReal, PacketBlock<Packet,N>& taccImag, PacketBlock<Packetc,N*2>& tRes, PacketBlock<Packetc, N>& acc1, PacketBlock<Packetc, N>& acc2)
+{
+ bcouple_common<Packet, Packetc, N>(taccReal, taccImag, acc1, acc2);
+
+ acc1.packet[0] = padd<Packetc>(tRes.packet[0], acc1.packet[0]);
+ if (N > 1) {
+ acc1.packet[1] = padd<Packetc>(tRes.packet[1], acc1.packet[1]);
+ }
+ if (N > 2) {
+ acc1.packet[2] = padd<Packetc>(tRes.packet[2], acc1.packet[2]);
+ }
+ if (N > 3) {
+ acc1.packet[3] = padd<Packetc>(tRes.packet[3], acc1.packet[3]);
+ }
+
+ acc2.packet[0] = padd<Packetc>(tRes.packet[0+N], acc2.packet[0]);
+ if (N > 1) {
+ acc2.packet[1] = padd<Packetc>(tRes.packet[1+N], acc2.packet[1]);
+ }
+ if (N > 2) {
+ acc2.packet[2] = padd<Packetc>(tRes.packet[2+N], acc2.packet[2]);
+ }
+ if (N > 3) {
+ acc2.packet[3] = padd<Packetc>(tRes.packet[3+N], acc2.packet[3]);
+ }
+}
+
+// This is necessary because ploadRhs for double returns a pair of vectors when MMA is enabled.
+template<typename Scalar, typename Packet>
+EIGEN_ALWAYS_INLINE Packet ploadRhs(const Scalar* rhs)
+{
+ return ploadu<Packet>(rhs);
+}
+
+} // end namespace internal
+} // end namespace Eigen
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/AltiVec/MatrixProductMMA.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/AltiVec/MatrixProductMMA.h
new file mode 100644
index 000000000..5b4449537
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/AltiVec/MatrixProductMMA.h
@@ -0,0 +1,620 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2020 Everton Constantino (everton.constantino@ibm.com)
+// Copyright (C) 2021 Chip Kerchner (chip.kerchner@ibm.com)
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MATRIX_PRODUCT_MMA_ALTIVEC_H
+#define EIGEN_MATRIX_PRODUCT_MMA_ALTIVEC_H
+
+#pragma GCC target("cpu=power10,htm")
+
+#ifdef __has_builtin
+#if !__has_builtin(__builtin_vsx_assemble_pair)
+#define __builtin_vsx_assemble_pair __builtin_mma_assemble_pair
+#endif
+#endif
+
+namespace Eigen {
+
+namespace internal {
+
+template<typename Scalar, typename Packet>
+EIGEN_ALWAYS_INLINE void bsetzeroMMA(__vector_quad* acc)
+{
+ __builtin_mma_xxsetaccz(acc);
+}
+
+template<typename DataMapper, typename Index, typename Packet, const Index accCols>
+EIGEN_ALWAYS_INLINE void storeAccumulator(Index i, const DataMapper& data, const Packet& alpha, __vector_quad* acc)
+{
+ PacketBlock<Packet, 4> result;
+ __builtin_mma_disassemble_acc(&result.packet, acc);
+
+ PacketBlock<Packet, 4> tRes;
+ bload<DataMapper, Packet, Index, accCols, ColMajor, false, 4>(tRes, data, i, 0);
+
+ bscale<Packet, 4>(tRes, result, alpha);
+
+ data.template storePacketBlock<Packet, 4>(i, 0, tRes);
+}
+
+template<typename DataMapper, typename Index, typename Packet, typename Packetc, const Index accColsC>
+EIGEN_ALWAYS_INLINE void storeComplexAccumulator(Index i, const DataMapper& data, const Packet& alphaReal, const Packet& alphaImag, __vector_quad* accReal, __vector_quad* accImag)
+{
+ PacketBlock<Packet, 4> resultReal, resultImag;
+ __builtin_mma_disassemble_acc(&resultReal.packet, accReal);
+ __builtin_mma_disassemble_acc(&resultImag.packet, accImag);
+
+ PacketBlock<Packetc, 8> tRes;
+ bload<DataMapper, Packetc, Index, accColsC, ColMajor, true, 4>(tRes, data, i, 0);
+
+ PacketBlock<Packet,4> taccReal, taccImag;
+ bscalec<Packet,4>(resultReal, resultImag, alphaReal, alphaImag, taccReal, taccImag);
+
+ PacketBlock<Packetc, 4> acc1, acc2;
+ bcouple<Packet, Packetc, 4>(taccReal, taccImag, tRes, acc1, acc2);
+
+ data.template storePacketBlock<Packetc, 4>(i, 0, acc1);
+ data.template storePacketBlock<Packetc, 4>(i + accColsC, 0, acc2);
+}
+
+// Defaults to float32, since Eigen still supports C++03 we can't use default template arguments
+template<typename LhsPacket, typename RhsPacket, bool NegativeAccumulate>
+EIGEN_ALWAYS_INLINE void pgerMMA(__vector_quad* acc, const RhsPacket& a, const LhsPacket& b)
+{
+ if(NegativeAccumulate)
+ {
+ __builtin_mma_xvf32gernp(acc, (__vector unsigned char)a, (__vector unsigned char)b);
+ } else {
+ __builtin_mma_xvf32gerpp(acc, (__vector unsigned char)a, (__vector unsigned char)b);
+ }
+}
+
+template<typename LhsPacket, typename RhsPacket, bool NegativeAccumulate>
+EIGEN_ALWAYS_INLINE void pgerMMA(__vector_quad* acc, const PacketBlock<Packet2d,2>& a, const Packet2d& b)
+{
+ __vector_pair* a0 = (__vector_pair *)(&a.packet[0]);
+ if(NegativeAccumulate)
+ {
+ __builtin_mma_xvf64gernp(acc, *a0, (__vector unsigned char)b);
+ } else {
+ __builtin_mma_xvf64gerpp(acc, *a0, (__vector unsigned char)b);
+ }
+}
+
+template<typename LhsPacket, typename RhsPacket, bool NegativeAccumulate>
+EIGEN_ALWAYS_INLINE void pgerMMA(__vector_quad* acc, const __vector_pair& a, const Packet2d& b)
+{
+ if(NegativeAccumulate)
+ {
+ __builtin_mma_xvf64gernp(acc, (__vector_pair)a, (__vector unsigned char)b);
+ } else {
+ __builtin_mma_xvf64gerpp(acc, (__vector_pair)a, (__vector unsigned char)b);
+ }
+}
+
+template<typename LhsPacket, typename RhsPacket, bool NegativeAccumulate>
+EIGEN_ALWAYS_INLINE void pgerMMA(__vector_quad*, const __vector_pair&, const Packet4f&)
+{
+ // Just for compilation
+}
+
+template<typename Scalar, typename Packet, typename RhsPacket, bool ConjugateLhs, bool ConjugateRhs, bool LhsIsReal, bool RhsIsReal>
+EIGEN_ALWAYS_INLINE void pgercMMA(__vector_quad* accReal, __vector_quad* accImag, const Packet& lhsV, const Packet& lhsVi, const RhsPacket& rhsV, const RhsPacket& rhsVi)
+{
+ pgerMMA<Packet, RhsPacket, false>(accReal, rhsV, lhsV);
+ if(LhsIsReal) {
+ pgerMMA<Packet, RhsPacket, ConjugateRhs>(accImag, rhsVi, lhsV);
+ } else {
+ if(!RhsIsReal) {
+ pgerMMA<Packet, RhsPacket, ConjugateLhs == ConjugateRhs>(accReal, rhsVi, lhsVi);
+ pgerMMA<Packet, RhsPacket, ConjugateRhs>(accImag, rhsVi, lhsV);
+ } else {
+ EIGEN_UNUSED_VARIABLE(rhsVi);
+ }
+ pgerMMA<Packet, RhsPacket, ConjugateLhs>(accImag, rhsV, lhsVi);
+ }
+}
+
+// This is necessary because ploadRhs for double returns a pair of vectors when MMA is enabled.
+template<typename Scalar, typename Packet>
+EIGEN_ALWAYS_INLINE void ploadRhsMMA(const Scalar* rhs, Packet& rhsV)
+{
+ rhsV = ploadRhs<Scalar, Packet>(rhs);
+}
+
+template<>
+EIGEN_ALWAYS_INLINE void ploadRhsMMA<double, PacketBlock<Packet2d, 2> >(const double* rhs, PacketBlock<Packet2d, 2>& rhsV)
+{
+ rhsV.packet[0] = ploadRhs<double, Packet2d>((const double *)((Packet2d *)rhs ));
+ rhsV.packet[1] = ploadRhs<double, Packet2d>((const double *)(((Packet2d *)rhs) + 1));
+}
+
+template<>
+EIGEN_ALWAYS_INLINE void ploadRhsMMA<double, __vector_pair>(const double* rhs, __vector_pair& rhsV)
+{
+#if EIGEN_COMP_LLVM
+ __builtin_vsx_assemble_pair(&rhsV,
+ (__vector unsigned char)(ploadRhs<double, Packet2d>((const double *)(((Packet2d *)rhs) + 1))),
+ (__vector unsigned char)(ploadRhs<double, Packet2d>((const double *)((Packet2d *)rhs ))));
+#else
+ __asm__ ("lxvp %x0,%1" : "=wa" (rhsV) : "Y" (*rhs));
+#endif
+}
+
+template<>
+EIGEN_ALWAYS_INLINE void ploadRhsMMA(const float*, __vector_pair&)
+{
+ // Just for compilation
+}
+
+// PEEL_MMA loop factor.
+#define PEEL_MMA 7
+
+#define MICRO_MMA_UNROLL(func) \
+ func(0) func(1) func(2) func(3) func(4) func(5) func(6) func(7)
+
+#define MICRO_MMA_LOAD_ONE(iter) \
+ if (unroll_factor > iter) { \
+ lhsV##iter = ploadLhs<Scalar, Packet>(lhs_ptr##iter); \
+ lhs_ptr##iter += accCols; \
+ } else { \
+ EIGEN_UNUSED_VARIABLE(lhsV##iter); \
+ }
+
+#define MICRO_MMA_WORK_ONE(iter, type, peel) \
+ if (unroll_factor > iter) { \
+ pgerMMA<Packet, type, false>(&accZero##iter, rhsV##peel, lhsV##iter); \
+ }
+
+#define MICRO_MMA_TYPE_PEEL(func, func2, type, peel) \
+ if (PEEL_MMA > peel) { \
+ Packet lhsV0, lhsV1, lhsV2, lhsV3, lhsV4, lhsV5, lhsV6, lhsV7; \
+ ploadRhsMMA<Scalar, type>(rhs_ptr + (accRows * peel), rhsV##peel); \
+ MICRO_MMA_UNROLL(func2); \
+ func(0,type,peel) func(1,type,peel) func(2,type,peel) func(3,type,peel) \
+ func(4,type,peel) func(5,type,peel) func(6,type,peel) func(7,type,peel) \
+ } else { \
+ EIGEN_UNUSED_VARIABLE(rhsV##peel); \
+ }
+
+#define MICRO_MMA_UNROLL_TYPE_PEEL(func, func2, type) \
+ type rhsV0, rhsV1, rhsV2, rhsV3, rhsV4, rhsV5, rhsV6, rhsV7; \
+ MICRO_MMA_TYPE_PEEL(func,func2,type,0); MICRO_MMA_TYPE_PEEL(func,func2,type,1); \
+ MICRO_MMA_TYPE_PEEL(func,func2,type,2); MICRO_MMA_TYPE_PEEL(func,func2,type,3); \
+ MICRO_MMA_TYPE_PEEL(func,func2,type,4); MICRO_MMA_TYPE_PEEL(func,func2,type,5); \
+ MICRO_MMA_TYPE_PEEL(func,func2,type,6); MICRO_MMA_TYPE_PEEL(func,func2,type,7);
+
+#define MICRO_MMA_UNROLL_TYPE_ONE(func, func2, type) \
+ type rhsV0; \
+ MICRO_MMA_TYPE_PEEL(func,func2,type,0);
+
+#define MICRO_MMA_ONE_PEEL \
+ if (sizeof(Scalar) == sizeof(float)) { \
+ MICRO_MMA_UNROLL_TYPE_PEEL(MICRO_MMA_WORK_ONE, MICRO_MMA_LOAD_ONE, RhsPacket); \
+ } else { \
+ MICRO_MMA_UNROLL_TYPE_PEEL(MICRO_MMA_WORK_ONE, MICRO_MMA_LOAD_ONE, __vector_pair); \
+ } \
+ rhs_ptr += (accRows * PEEL_MMA);
+
+#define MICRO_MMA_ONE \
+ if (sizeof(Scalar) == sizeof(float)) { \
+ MICRO_MMA_UNROLL_TYPE_ONE(MICRO_MMA_WORK_ONE, MICRO_MMA_LOAD_ONE, RhsPacket); \
+ } else { \
+ MICRO_MMA_UNROLL_TYPE_ONE(MICRO_MMA_WORK_ONE, MICRO_MMA_LOAD_ONE, __vector_pair); \
+ } \
+ rhs_ptr += accRows;
+
+#define MICRO_MMA_DST_PTR_ONE(iter) \
+ if (unroll_factor > iter) { \
+ bsetzeroMMA<Scalar, Packet>(&accZero##iter); \
+ } else { \
+ EIGEN_UNUSED_VARIABLE(accZero##iter); \
+ }
+
+#define MICRO_MMA_DST_PTR MICRO_MMA_UNROLL(MICRO_MMA_DST_PTR_ONE)
+
+#define MICRO_MMA_SRC_PTR_ONE(iter) \
+ if (unroll_factor > iter) { \
+ lhs_ptr##iter = lhs_base + ( (row/accCols) + iter )*strideA*accCols; \
+ } else { \
+ EIGEN_UNUSED_VARIABLE(lhs_ptr##iter); \
+ }
+
+#define MICRO_MMA_SRC_PTR MICRO_MMA_UNROLL(MICRO_MMA_SRC_PTR_ONE)
+
+#define MICRO_MMA_PREFETCH_ONE(iter) \
+ if (unroll_factor > iter) { \
+ EIGEN_POWER_PREFETCH(lhs_ptr##iter); \
+ }
+
+#define MICRO_MMA_PREFETCH MICRO_MMA_UNROLL(MICRO_MMA_PREFETCH_ONE)
+
+#define MICRO_MMA_STORE_ONE(iter) \
+ if (unroll_factor > iter) { \
+ storeAccumulator<DataMapper, Index, Packet, accCols>(row + iter*accCols, res, pAlpha, &accZero##iter); \
+ }
+
+#define MICRO_MMA_STORE MICRO_MMA_UNROLL(MICRO_MMA_STORE_ONE)
+
+template<int unroll_factor, typename Scalar, typename Packet, typename RhsPacket, typename DataMapper, typename Index, const Index accRows, const Index accCols>
+EIGEN_ALWAYS_INLINE void gemm_unrolled_MMA_iteration(
+ const DataMapper& res,
+ const Scalar* lhs_base,
+ const Scalar* rhs_base,
+ Index depth,
+ Index strideA,
+ Index& row,
+ const Packet& pAlpha)
+{
+ const Scalar* rhs_ptr = rhs_base;
+ const Scalar* lhs_ptr0 = NULL, * lhs_ptr1 = NULL, * lhs_ptr2 = NULL, * lhs_ptr3 = NULL, * lhs_ptr4 = NULL, * lhs_ptr5 = NULL, * lhs_ptr6 = NULL, * lhs_ptr7 = NULL;
+ __vector_quad accZero0, accZero1, accZero2, accZero3, accZero4, accZero5, accZero6, accZero7;
+
+ MICRO_MMA_SRC_PTR
+ MICRO_MMA_DST_PTR
+
+ Index k = 0;
+ for(; k + PEEL_MMA <= depth; k+= PEEL_MMA)
+ {
+ EIGEN_POWER_PREFETCH(rhs_ptr);
+ MICRO_MMA_PREFETCH
+ MICRO_MMA_ONE_PEEL
+ }
+ for(; k < depth; k++)
+ {
+ MICRO_MMA_ONE
+ }
+ MICRO_MMA_STORE
+
+ row += unroll_factor*accCols;
+}
+
+template<typename Scalar, typename Packet, typename RhsPacket, typename DataMapper, typename Index, const Index accRows, const Index accCols>
+EIGEN_ALWAYS_INLINE void gemmMMA_cols(
+ const DataMapper& res,
+ const Scalar* blockA,
+ const Scalar* blockB,
+ Index depth,
+ Index strideA,
+ Index offsetA,
+ Index strideB,
+ Index offsetB,
+ Index col,
+ Index rows,
+ Index cols,
+ Index remaining_rows,
+ const Packet& pAlpha,
+ const Packet& pMask)
+{
+ const DataMapper res3 = res.getSubMapper(0, col);
+
+ const Scalar* rhs_base = blockB + col*strideB + accRows*offsetB;
+ const Scalar* lhs_base = blockA + accCols*offsetA;
+ Index row = 0;
+
+#define MAX_MMA_UNROLL 7
+ while(row + MAX_MMA_UNROLL*accCols <= rows) {
+ gemm_unrolled_MMA_iteration<MAX_MMA_UNROLL, Scalar, Packet, RhsPacket, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha);
+ }
+ switch( (rows-row)/accCols ) {
+#if MAX_MMA_UNROLL > 7
+ case 7:
+ gemm_unrolled_MMA_iteration<7, Scalar, Packet, RhsPacket, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha);
+ break;
+#endif
+#if MAX_MMA_UNROLL > 6
+ case 6:
+ gemm_unrolled_MMA_iteration<6, Scalar, Packet, RhsPacket, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha);
+ break;
+#endif
+#if MAX_MMA_UNROLL > 5
+ case 5:
+ gemm_unrolled_MMA_iteration<5, Scalar, Packet, RhsPacket, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha);
+ break;
+#endif
+#if MAX_MMA_UNROLL > 4
+ case 4:
+ gemm_unrolled_MMA_iteration<4, Scalar, Packet, RhsPacket, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha);
+ break;
+#endif
+#if MAX_MMA_UNROLL > 3
+ case 3:
+ gemm_unrolled_MMA_iteration<3, Scalar, Packet, RhsPacket, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha);
+ break;
+#endif
+#if MAX_MMA_UNROLL > 2
+ case 2:
+ gemm_unrolled_MMA_iteration<2, Scalar, Packet, RhsPacket, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha);
+ break;
+#endif
+#if MAX_MMA_UNROLL > 1
+ case 1:
+ gemm_unrolled_MMA_iteration<1, Scalar, Packet, RhsPacket, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha);
+ break;
+#endif
+ default:
+ break;
+ }
+#undef MAX_MMA_UNROLL
+
+ if(remaining_rows > 0)
+ {
+ gemm_extra_row<Scalar, Packet, DataMapper, Index, accRows, accCols>(res3, blockA, rhs_base, depth, strideA, offsetA, row, col, rows, cols, remaining_rows, pAlpha, pMask);
+ }
+}
+
+template<typename Scalar, typename Index, typename Packet, typename RhsPacket, typename DataMapper, const Index accRows, const Index accCols>
+void gemmMMA(const DataMapper& res, const Scalar* blockA, const Scalar* blockB, Index rows, Index depth, Index cols, Scalar alpha, Index strideA, Index strideB, Index offsetA, Index offsetB)
+{
+ const Index remaining_rows = rows % accCols;
+
+ if( strideA == -1 ) strideA = depth;
+ if( strideB == -1 ) strideB = depth;
+
+ const Packet pAlpha = pset1<Packet>(alpha);
+ const Packet pMask = bmask<Packet>((const int)(remaining_rows));
+
+ Index col = 0;
+ for(; col + accRows <= cols; col += accRows)
+ {
+ gemmMMA_cols<Scalar, Packet, RhsPacket, DataMapper, Index, accRows, accCols>(res, blockA, blockB, depth, strideA, offsetA, strideB, offsetB, col, rows, cols, remaining_rows, pAlpha, pMask);
+ }
+
+ gemm_extra_cols<Scalar, Packet, DataMapper, Index, accCols>(res, blockA, blockB, depth, strideA, offsetA, strideB, offsetB, col, rows, cols, remaining_rows, pAlpha, pMask);
+}
+
+#define accColsC (accCols / 2)
+#define advanceRows ((LhsIsReal) ? 1 : 2)
+#define advanceCols ((RhsIsReal) ? 1 : 2)
+
+// PEEL_COMPLEX_MMA loop factor.
+#define PEEL_COMPLEX_MMA 3
+
+#define MICRO_COMPLEX_MMA_UNROLL(func) \
+ func(0) func(1) func(2) func(3)
+
+#define MICRO_COMPLEX_MMA_LOAD_ONE(iter) \
+ if (unroll_factor > iter) { \
+ lhsV##iter = ploadLhs<Scalar, Packet>(lhs_ptr_real##iter); \
+ if(!LhsIsReal) { \
+ lhsVi##iter = ploadLhs<Scalar, Packet>(lhs_ptr_real##iter + imag_delta); \
+ } else { \
+ EIGEN_UNUSED_VARIABLE(lhsVi##iter); \
+ } \
+ lhs_ptr_real##iter += accCols; \
+ } else { \
+ EIGEN_UNUSED_VARIABLE(lhsV##iter); \
+ EIGEN_UNUSED_VARIABLE(lhsVi##iter); \
+ }
+
+#define MICRO_COMPLEX_MMA_WORK_ONE(iter, type, peel) \
+ if (unroll_factor > iter) { \
+ pgercMMA<Scalar, Packet, type, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(&accReal##iter, &accImag##iter, lhsV##iter, lhsVi##iter, rhsV##peel, rhsVi##peel); \
+ }
+
+#define MICRO_COMPLEX_MMA_TYPE_PEEL(func, func2, type, peel) \
+ if (PEEL_COMPLEX_MMA > peel) { \
+ Packet lhsV0, lhsV1, lhsV2, lhsV3; \
+ Packet lhsVi0, lhsVi1, lhsVi2, lhsVi3; \
+ ploadRhsMMA<Scalar, type>(rhs_ptr_real + (accRows * peel), rhsV##peel); \
+ if(!RhsIsReal) { \
+ ploadRhsMMA<Scalar, type>(rhs_ptr_imag + (accRows * peel), rhsVi##peel); \
+ } else { \
+ EIGEN_UNUSED_VARIABLE(rhsVi##peel); \
+ } \
+ MICRO_COMPLEX_MMA_UNROLL(func2); \
+ func(0,type,peel) func(1,type,peel) func(2,type,peel) func(3,type,peel) \
+ } else { \
+ EIGEN_UNUSED_VARIABLE(rhsV##peel); \
+ EIGEN_UNUSED_VARIABLE(rhsVi##peel); \
+ }
+
+#define MICRO_COMPLEX_MMA_UNROLL_TYPE_PEEL(func, func2, type) \
+ type rhsV0, rhsV1, rhsV2, rhsV3; \
+ type rhsVi0, rhsVi1, rhsVi2, rhsVi3; \
+ MICRO_COMPLEX_MMA_TYPE_PEEL(func,func2,type,0); MICRO_COMPLEX_MMA_TYPE_PEEL(func,func2,type,1); \
+ MICRO_COMPLEX_MMA_TYPE_PEEL(func,func2,type,2); MICRO_COMPLEX_MMA_TYPE_PEEL(func,func2,type,3);
+
+#define MICRO_COMPLEX_MMA_UNROLL_TYPE_ONE(func, func2, type) \
+ type rhsV0, rhsVi0; \
+ MICRO_COMPLEX_MMA_TYPE_PEEL(func,func2,type,0);
+
+#define MICRO_COMPLEX_MMA_ONE_PEEL \
+ if (sizeof(Scalar) == sizeof(float)) { \
+ MICRO_COMPLEX_MMA_UNROLL_TYPE_PEEL(MICRO_COMPLEX_MMA_WORK_ONE, MICRO_COMPLEX_MMA_LOAD_ONE, RhsPacket); \
+ } else { \
+ MICRO_COMPLEX_MMA_UNROLL_TYPE_PEEL(MICRO_COMPLEX_MMA_WORK_ONE, MICRO_COMPLEX_MMA_LOAD_ONE, __vector_pair); \
+ } \
+ rhs_ptr_real += (accRows * PEEL_COMPLEX_MMA); \
+ if(!RhsIsReal) rhs_ptr_imag += (accRows * PEEL_COMPLEX_MMA);
+
+#define MICRO_COMPLEX_MMA_ONE \
+ if (sizeof(Scalar) == sizeof(float)) { \
+ MICRO_COMPLEX_MMA_UNROLL_TYPE_ONE(MICRO_COMPLEX_MMA_WORK_ONE, MICRO_COMPLEX_MMA_LOAD_ONE, RhsPacket); \
+ } else { \
+ MICRO_COMPLEX_MMA_UNROLL_TYPE_ONE(MICRO_COMPLEX_MMA_WORK_ONE, MICRO_COMPLEX_MMA_LOAD_ONE, __vector_pair); \
+ } \
+ rhs_ptr_real += accRows; \
+ if(!RhsIsReal) rhs_ptr_imag += accRows;
+
+#define MICRO_COMPLEX_MMA_DST_PTR_ONE(iter) \
+ if (unroll_factor > iter) { \
+ bsetzeroMMA<Scalar, Packet>(&accReal##iter); \
+ bsetzeroMMA<Scalar, Packet>(&accImag##iter); \
+ } else { \
+ EIGEN_UNUSED_VARIABLE(accReal##iter); \
+ EIGEN_UNUSED_VARIABLE(accImag##iter); \
+ }
+
+#define MICRO_COMPLEX_MMA_DST_PTR MICRO_COMPLEX_MMA_UNROLL(MICRO_COMPLEX_MMA_DST_PTR_ONE)
+
+#define MICRO_COMPLEX_MMA_SRC_PTR_ONE(iter) \
+ if (unroll_factor > iter) { \
+ lhs_ptr_real##iter = lhs_base + ( ((advanceRows*row)/accCols) + iter*advanceRows )*strideA*accCols; \
+ } else { \
+ EIGEN_UNUSED_VARIABLE(lhs_ptr_real##iter); \
+ }
+
+#define MICRO_COMPLEX_MMA_SRC_PTR MICRO_COMPLEX_MMA_UNROLL(MICRO_COMPLEX_MMA_SRC_PTR_ONE)
+
+#define MICRO_COMPLEX_MMA_PREFETCH_ONE(iter) \
+ if (unroll_factor > iter) { \
+ EIGEN_POWER_PREFETCH(lhs_ptr_real##iter); \
+ }
+
+#define MICRO_COMPLEX_MMA_PREFETCH MICRO_COMPLEX_MMA_UNROLL(MICRO_COMPLEX_MMA_PREFETCH_ONE)
+
+#define MICRO_COMPLEX_MMA_STORE_ONE(iter) \
+ if (unroll_factor > iter) { \
+ storeComplexAccumulator<DataMapper, Index, Packet, Packetc, accColsC>(row + iter*accCols, res, pAlphaReal, pAlphaImag, &accReal##iter, &accImag##iter); \
+ }
+
+#define MICRO_COMPLEX_MMA_STORE MICRO_COMPLEX_MMA_UNROLL(MICRO_COMPLEX_MMA_STORE_ONE)
+
+template<int unroll_factor, typename Scalar, typename Packet, typename Packetc, typename RhsPacket, typename DataMapper, typename Index, const Index accRows, const Index accCols, bool ConjugateLhs, bool ConjugateRhs, bool LhsIsReal, bool RhsIsReal>
+EIGEN_ALWAYS_INLINE void gemm_complex_unrolled_MMA_iteration(
+ const DataMapper& res,
+ const Scalar* lhs_base,
+ const Scalar* rhs_base,
+ Index depth,
+ Index strideA,
+ Index strideB,
+ Index& row,
+ const Packet& pAlphaReal,
+ const Packet& pAlphaImag)
+{
+ const Scalar* rhs_ptr_real = rhs_base;
+ const Scalar* rhs_ptr_imag = NULL;
+ const Index imag_delta = accCols*strideA;
+ if(!RhsIsReal) {
+ rhs_ptr_imag = rhs_base + accRows*strideB;
+ } else {
+ EIGEN_UNUSED_VARIABLE(rhs_ptr_imag);
+ }
+ const Scalar* lhs_ptr_real0 = NULL, * lhs_ptr_real1 = NULL;
+ const Scalar* lhs_ptr_real2 = NULL, * lhs_ptr_real3 = NULL;
+ __vector_quad accReal0, accImag0, accReal1, accImag1, accReal2, accImag2, accReal3, accImag3;
+
+ MICRO_COMPLEX_MMA_SRC_PTR
+ MICRO_COMPLEX_MMA_DST_PTR
+
+ Index k = 0;
+ for(; k + PEEL_COMPLEX_MMA <= depth; k+= PEEL_COMPLEX_MMA)
+ {
+ EIGEN_POWER_PREFETCH(rhs_ptr_real);
+ if(!RhsIsReal) {
+ EIGEN_POWER_PREFETCH(rhs_ptr_imag);
+ }
+ MICRO_COMPLEX_MMA_PREFETCH
+ MICRO_COMPLEX_MMA_ONE_PEEL
+ }
+ for(; k < depth; k++)
+ {
+ MICRO_COMPLEX_MMA_ONE
+ }
+ MICRO_COMPLEX_MMA_STORE
+
+ row += unroll_factor*accCols;
+}
+
+template<typename Scalar, typename Packet, typename Packetc, typename RhsPacket, typename DataMapper, typename Index, const Index accRows, const Index accCols, bool ConjugateLhs, bool ConjugateRhs, bool LhsIsReal, bool RhsIsReal>
+EIGEN_ALWAYS_INLINE void gemmMMA_complex_cols(
+ const DataMapper& res,
+ const Scalar* blockA,
+ const Scalar* blockB,
+ Index depth,
+ Index strideA,
+ Index offsetA,
+ Index strideB,
+ Index offsetB,
+ Index col,
+ Index rows,
+ Index cols,
+ Index remaining_rows,
+ const Packet& pAlphaReal,
+ const Packet& pAlphaImag,
+ const Packet& pMask)
+{
+ const DataMapper res3 = res.getSubMapper(0, col);
+
+ const Scalar* rhs_base = blockB + advanceCols*col*strideB + accRows*offsetB;
+ const Scalar* lhs_base = blockA + accCols*offsetA;
+ Index row = 0;
+
+#define MAX_COMPLEX_MMA_UNROLL 4
+ while(row + MAX_COMPLEX_MMA_UNROLL*accCols <= rows) {
+ gemm_complex_unrolled_MMA_iteration<MAX_COMPLEX_MMA_UNROLL, Scalar, Packet, Packetc, RhsPacket, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res3, lhs_base, rhs_base, depth, strideA, strideB, row, pAlphaReal, pAlphaImag);
+ }
+ switch( (rows-row)/accCols ) {
+#if MAX_COMPLEX_MMA_UNROLL > 4
+ case 4:
+ gemm_complex_unrolled_MMA_iteration<4, Scalar, Packet, Packetc, RhsPacket, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res3, lhs_base, rhs_base, depth, strideA, strideB, row, pAlphaReal, pAlphaImag);
+ break;
+#endif
+#if MAX_COMPLEX_MMA_UNROLL > 3
+ case 3:
+ gemm_complex_unrolled_MMA_iteration<3, Scalar, Packet, Packetc, RhsPacket, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res3, lhs_base, rhs_base, depth, strideA, strideB, row, pAlphaReal, pAlphaImag);
+ break;
+#endif
+#if MAX_COMPLEX_MMA_UNROLL > 2
+ case 2:
+ gemm_complex_unrolled_MMA_iteration<2, Scalar, Packet, Packetc, RhsPacket, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res3, lhs_base, rhs_base, depth, strideA, strideB, row, pAlphaReal, pAlphaImag);
+ break;
+#endif
+#if MAX_COMPLEX_MMA_UNROLL > 1
+ case 1:
+ gemm_complex_unrolled_MMA_iteration<1, Scalar, Packet, Packetc, RhsPacket, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res3, lhs_base, rhs_base, depth, strideA, strideB, row, pAlphaReal, pAlphaImag);
+ break;
+#endif
+ default:
+ break;
+ }
+#undef MAX_COMPLEX_MMA_UNROLL
+
+ if(remaining_rows > 0)
+ {
+ gemm_complex_extra_row<Scalar, Packet, Packetc, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res3, blockA, rhs_base, depth, strideA, offsetA, strideB, row, col, rows, cols, remaining_rows, pAlphaReal, pAlphaImag, pMask);
+ }
+}
+
+template<typename LhsScalar, typename RhsScalar, typename Scalarc, typename Scalar, typename Index, typename Packet, typename Packetc, typename RhsPacket, typename DataMapper, const Index accRows, const Index accCols, bool ConjugateLhs, bool ConjugateRhs, bool LhsIsReal, bool RhsIsReal>
+void gemm_complexMMA(const DataMapper& res, const LhsScalar* blockAc, const RhsScalar* blockBc, Index rows, Index depth, Index cols, Scalarc alpha, Index strideA, Index strideB, Index offsetA, Index offsetB)
+{
+ const Index remaining_rows = rows % accCols;
+
+ if( strideA == -1 ) strideA = depth;
+ if( strideB == -1 ) strideB = depth;
+
+ const Packet pAlphaReal = pset1<Packet>(alpha.real());
+ const Packet pAlphaImag = pset1<Packet>(alpha.imag());
+ const Packet pMask = bmask<Packet>((const int)(remaining_rows));
+
+ const Scalar* blockA = (Scalar *) blockAc;
+ const Scalar* blockB = (Scalar *) blockBc;
+
+ Index col = 0;
+ for(; col + accRows <= cols; col += accRows)
+ {
+ gemmMMA_complex_cols<Scalar, Packet, Packetc, RhsPacket, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res, blockA, blockB, depth, strideA, offsetA, strideB, offsetB, col, rows, cols, remaining_rows, pAlphaReal, pAlphaImag, pMask);
+ }
+
+ gemm_complex_extra_cols<Scalar, Packet, Packetc, DataMapper, Index, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res, blockA, blockB, depth, strideA, offsetA, strideB, offsetB, col, rows, cols, remaining_rows, pAlphaReal, pAlphaImag, pMask);
+}
+
+#undef accColsC
+#undef advanceRows
+#undef advanceCols
+
+#pragma GCC reset_options
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_MATRIX_PRODUCT_MMA_ALTIVEC_H
+
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/AltiVec/PacketMath.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/AltiVec/PacketMath.h
index b3f1ea199..2a440545b 100644..100755
--- a/examples/ThirdPartyLibs/Eigen/src/Core/arch/AltiVec/PacketMath.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/AltiVec/PacketMath.h
@@ -22,31 +22,38 @@ namespace internal {
#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
#endif
-#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD
-#define EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD
-#endif
-
// NOTE Altivec has 32 registers, but Eigen only accepts a value of 8 or 16
#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32
#endif
-typedef __vector float Packet4f;
-typedef __vector int Packet4i;
-typedef __vector unsigned int Packet4ui;
-typedef __vector __bool int Packet4bi;
-typedef __vector short int Packet8i;
-typedef __vector unsigned char Packet16uc;
+typedef __vector float Packet4f;
+typedef __vector int Packet4i;
+typedef __vector unsigned int Packet4ui;
+typedef __vector __bool int Packet4bi;
+typedef __vector short int Packet8s;
+typedef __vector unsigned short int Packet8us;
+typedef __vector signed char Packet16c;
+typedef __vector unsigned char Packet16uc;
+typedef eigen_packet_wrapper<__vector unsigned short int,0> Packet8bf;
// We don't want to write the same code all the time, but we need to reuse the constants
// and it doesn't really work to declare them global, so we define macros instead
-
#define _EIGEN_DECLARE_CONST_FAST_Packet4f(NAME,X) \
- Packet4f p4f_##NAME = reinterpret_cast<Packet4f>(vec_splat_s32(X))
+ Packet4f p4f_##NAME = {X, X, X, X}
#define _EIGEN_DECLARE_CONST_FAST_Packet4i(NAME,X) \
Packet4i p4i_##NAME = vec_splat_s32(X)
+#define _EIGEN_DECLARE_CONST_FAST_Packet4ui(NAME,X) \
+ Packet4ui p4ui_##NAME = {X, X, X, X}
+
+#define _EIGEN_DECLARE_CONST_FAST_Packet8us(NAME,X) \
+ Packet8us p8us_##NAME = {X, X, X, X, X, X, X, X}
+
+#define _EIGEN_DECLARE_CONST_FAST_Packet16uc(NAME,X) \
+ Packet16uc p16uc_##NAME = {X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X}
+
#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
Packet4f p4f_##NAME = pset1<Packet4f>(X)
@@ -64,7 +71,7 @@ typedef __vector unsigned char Packet16uc;
#define DST_CHAN 1
#define DST_CTRL(size, count, stride) (((size) << 24) | ((count) << 16) | (stride))
-
+#define __UNPACK_TYPE__(PACKETNAME) typename unpacket_traits<PACKETNAME>::type
// These constants are endian-agnostic
static _EIGEN_DECLARE_CONST_FAST_Packet4f(ZERO, 0); //{ 0.0, 0.0, 0.0, 0.0}
@@ -72,25 +79,36 @@ static _EIGEN_DECLARE_CONST_FAST_Packet4i(ZERO, 0); //{ 0, 0, 0, 0,}
static _EIGEN_DECLARE_CONST_FAST_Packet4i(ONE,1); //{ 1, 1, 1, 1}
static _EIGEN_DECLARE_CONST_FAST_Packet4i(MINUS16,-16); //{ -16, -16, -16, -16}
static _EIGEN_DECLARE_CONST_FAST_Packet4i(MINUS1,-1); //{ -1, -1, -1, -1}
+static _EIGEN_DECLARE_CONST_FAST_Packet4ui(SIGN, 0x80000000u);
+static _EIGEN_DECLARE_CONST_FAST_Packet4ui(PREV0DOT5, 0x3EFFFFFFu);
+static _EIGEN_DECLARE_CONST_FAST_Packet8us(ONE,1); //{ 1, 1, 1, 1, 1, 1, 1, 1}
+static _EIGEN_DECLARE_CONST_FAST_Packet16uc(ONE,1);
static Packet4f p4f_MZERO = (Packet4f) vec_sl((Packet4ui)p4i_MINUS1, (Packet4ui)p4i_MINUS1); //{ 0x80000000, 0x80000000, 0x80000000, 0x80000000}
#ifndef __VSX__
static Packet4f p4f_ONE = vec_ctf(p4i_ONE, 0); //{ 1.0, 1.0, 1.0, 1.0}
#endif
-static Packet4f p4f_COUNTDOWN = { 0.0, 1.0, 2.0, 3.0 };
-static Packet4i p4i_COUNTDOWN = { 0, 1, 2, 3 };
+static Packet4f p4f_COUNTDOWN = { 0.0, 1.0, 2.0, 3.0 };
+static Packet4i p4i_COUNTDOWN = { 0, 1, 2, 3 };
+static Packet8s p8s_COUNTDOWN = { 0, 1, 2, 3, 4, 5, 6, 7 };
+static Packet8us p8us_COUNTDOWN = { 0, 1, 2, 3, 4, 5, 6, 7 };
+
+static Packet16c p16c_COUNTDOWN = { 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15};
+static Packet16uc p16uc_COUNTDOWN = { 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15};
static Packet16uc p16uc_REVERSE32 = { 12,13,14,15, 8,9,10,11, 4,5,6,7, 0,1,2,3 };
-static Packet16uc p16uc_DUPLICATE32_HI = { 0,1,2,3, 0,1,2,3, 4,5,6,7, 4,5,6,7 };
+static Packet16uc p16uc_REVERSE16 = { 14,15, 12,13, 10,11, 8,9, 6,7, 4,5, 2,3, 0,1 };
+static Packet16uc p16uc_REVERSE8 = { 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 };
-// Mask alignment
-#ifdef __PPC64__
-#define _EIGEN_MASK_ALIGNMENT 0xfffffffffffffff0
-#else
-#define _EIGEN_MASK_ALIGNMENT 0xfffffff0
-#endif
+static Packet16uc p16uc_DUPLICATE32_HI = { 0,1,2,3, 0,1,2,3, 4,5,6,7, 4,5,6,7 };
+static Packet16uc p16uc_DUPLICATE16_HI = { 0,1,0,1, 2,3,2,3, 4,5,4,5, 6,7,6,7 };
+static Packet16uc p16uc_DUPLICATE8_HI = { 0,0, 1,1, 2,2, 3,3, 4,4, 5,5, 6,6, 7,7 };
+static const Packet16uc p16uc_DUPLICATE16_EVEN= { 0,1 ,0,1, 4,5, 4,5, 8,9, 8,9, 12,13, 12,13 };
+static const Packet16uc p16uc_DUPLICATE16_ODD = { 2,3 ,2,3, 6,7, 6,7, 10,11, 10,11, 14,15, 14,15 };
-#define _EIGEN_ALIGNED_PTR(x) ((std::ptrdiff_t)(x) & _EIGEN_MASK_ALIGNMENT)
+static Packet16uc p16uc_QUADRUPLICATE16_HI = { 0,1,0,1,0,1,0,1, 2,3,2,3,2,3,2,3 };
// Handle endianness properly while loading constants
// Define global static constants:
@@ -103,7 +121,7 @@ static Packet16uc p16uc_PSET32_WODD = vec_sld((Packet16uc) vec_splat((Packet4u
static Packet16uc p16uc_PSET32_WEVEN = vec_sld(p16uc_DUPLICATE32_HI, (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 3), 8);//{ 4,5,6,7, 4,5,6,7, 12,13,14,15, 12,13,14,15 };
static Packet16uc p16uc_HALF64_0_16 = vec_sld((Packet16uc)p4i_ZERO, vec_splat((Packet16uc) vec_abs(p4i_MINUS16), 3), 8); //{ 0,0,0,0, 0,0,0,0, 16,16,16,16, 16,16,16,16};
#else
-static Packet16uc p16uc_FORWARD = p16uc_REVERSE32;
+static Packet16uc p16uc_FORWARD = p16uc_REVERSE32;
static Packet16uc p16uc_REVERSE64 = { 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 };
static Packet16uc p16uc_PSET32_WODD = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 1), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 3), 8);//{ 0,1,2,3, 0,1,2,3, 8,9,10,11, 8,9,10,11 };
static Packet16uc p16uc_PSET32_WEVEN = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 0), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 2), 8);//{ 4,5,6,7, 4,5,6,7, 12,13,14,15, 12,13,14,15 };
@@ -129,27 +147,27 @@ static Packet16uc p16uc_COMPLEX32_REV2 = vec_sld(p16uc_PSET64_HI, p16uc_PSET64_L
#define EIGEN_PPC_PREFETCH(ADDR) asm( " dcbt [%[addr]]\n" :: [addr] "r" (ADDR) : "cc" );
#endif
-template<> struct packet_traits<float> : default_packet_traits
-{
+template <>
+struct packet_traits<float> : default_packet_traits {
typedef Packet4f type;
typedef Packet4f half;
enum {
Vectorizable = 1,
AlignedOnScalar = 1,
- size=4,
+ size = 4,
HasHalfPacket = 1,
- HasAdd = 1,
- HasSub = 1,
- HasMul = 1,
- HasDiv = 1,
- HasMin = 1,
- HasMax = 1,
- HasAbs = 1,
- HasSin = 0,
- HasCos = 0,
- HasLog = 0,
- HasExp = 1,
+ HasAdd = 1,
+ HasSub = 1,
+ HasMul = 1,
+ HasDiv = 1,
+ HasMin = 1,
+ HasMax = 1,
+ HasAbs = 1,
+ HasSin = EIGEN_FAST_MATH,
+ HasCos = EIGEN_FAST_MATH,
+ HasLog = 1,
+ HasExp = 1,
#ifdef __VSX__
HasSqrt = 1,
#if !EIGEN_COMP_CLANG
@@ -160,16 +178,62 @@ template<> struct packet_traits<float> : default_packet_traits
#else
HasSqrt = 0,
HasRsqrt = 0,
+ HasTanh = EIGEN_FAST_MATH,
+ HasErf = EIGEN_FAST_MATH,
#endif
HasRound = 1,
HasFloor = 1,
HasCeil = 1,
+ HasRint = 1,
HasNegate = 1,
HasBlend = 1
};
};
-template<> struct packet_traits<int> : default_packet_traits
-{
+template <>
+struct packet_traits<bfloat16> : default_packet_traits {
+ typedef Packet8bf type;
+ typedef Packet8bf half;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 8,
+ HasHalfPacket = 0,
+
+ HasAdd = 1,
+ HasSub = 1,
+ HasMul = 1,
+ HasDiv = 1,
+ HasMin = 1,
+ HasMax = 1,
+ HasAbs = 1,
+ HasSin = EIGEN_FAST_MATH,
+ HasCos = EIGEN_FAST_MATH,
+ HasLog = 1,
+ HasExp = 1,
+#ifdef __VSX__
+ HasSqrt = 1,
+#if !EIGEN_COMP_CLANG
+ HasRsqrt = 1,
+#else
+ HasRsqrt = 0,
+#endif
+#else
+ HasSqrt = 0,
+ HasRsqrt = 0,
+ HasTanh = EIGEN_FAST_MATH,
+ HasErf = EIGEN_FAST_MATH,
+#endif
+ HasRound = 1,
+ HasFloor = 1,
+ HasCeil = 1,
+ HasRint = 1,
+ HasNegate = 1,
+ HasBlend = 1
+ };
+};
+
+template <>
+struct packet_traits<int> : default_packet_traits {
typedef Packet4i type;
typedef Packet4i half;
enum {
@@ -178,6 +242,25 @@ template<> struct packet_traits<int> : default_packet_traits
size = 4,
HasHalfPacket = 0,
+ HasAdd = 1,
+ HasSub = 1,
+ HasShift = 1,
+ HasMul = 1,
+ HasDiv = 0,
+ HasBlend = 1
+ };
+};
+
+template <>
+struct packet_traits<short int> : default_packet_traits {
+ typedef Packet8s type;
+ typedef Packet8s half;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 8,
+ HasHalfPacket = 0,
+
HasAdd = 1,
HasSub = 1,
HasMul = 1,
@@ -186,9 +269,116 @@ template<> struct packet_traits<int> : default_packet_traits
};
};
+template <>
+struct packet_traits<unsigned short int> : default_packet_traits {
+ typedef Packet8us type;
+ typedef Packet8us half;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 8,
+ HasHalfPacket = 0,
+
+ HasAdd = 1,
+ HasSub = 1,
+ HasMul = 1,
+ HasDiv = 0,
+ HasBlend = 1
+ };
+};
+
+template <>
+struct packet_traits<signed char> : default_packet_traits {
+ typedef Packet16c type;
+ typedef Packet16c half;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 16,
+ HasHalfPacket = 0,
+
+ HasAdd = 1,
+ HasSub = 1,
+ HasMul = 1,
+ HasDiv = 0,
+ HasBlend = 1
+ };
+};
+
+template <>
+struct packet_traits<unsigned char> : default_packet_traits {
+ typedef Packet16uc type;
+ typedef Packet16uc half;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 16,
+ HasHalfPacket = 0,
+
+ HasAdd = 1,
+ HasSub = 1,
+ HasMul = 1,
+ HasDiv = 0,
+ HasBlend = 1
+ };
+};
+
+template<> struct unpacket_traits<Packet4f>
+{
+ typedef float type;
+ typedef Packet4f half;
+ typedef Packet4i integer_packet;
+ enum {size=4, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false};
+};
+template<> struct unpacket_traits<Packet4i>
+{
+ typedef int type;
+ typedef Packet4i half;
+ enum {size=4, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false};
+};
+template<> struct unpacket_traits<Packet8s>
+{
+ typedef short int type;
+ typedef Packet8s half;
+ enum {size=8, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false};
+};
+template<> struct unpacket_traits<Packet8us>
+{
+ typedef unsigned short int type;
+ typedef Packet8us half;
+ enum {size=8, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false};
+};
+
+template<> struct unpacket_traits<Packet16c>
+{
+ typedef signed char type;
+ typedef Packet16c half;
+ enum {size=16, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false};
+};
+template<> struct unpacket_traits<Packet16uc>
+{
+ typedef unsigned char type;
+ typedef Packet16uc half;
+ enum {size=16, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false};
+};
-template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4, alignment=Aligned16}; typedef Packet4f half; };
-template<> struct unpacket_traits<Packet4i> { typedef int type; enum {size=4, alignment=Aligned16}; typedef Packet4i half; };
+template<> struct unpacket_traits<Packet8bf>
+{
+ typedef bfloat16 type;
+ typedef Packet8bf half;
+ enum {size=8, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false};
+};
+inline std::ostream & operator <<(std::ostream & s, const Packet16c & v)
+{
+ union {
+ Packet16c v;
+ signed char n[16];
+ } vt;
+ vt.v = v;
+ for (int i=0; i< 16; i++)
+ s << vt.n[i] << ", ";
+ return s;
+}
inline std::ostream & operator <<(std::ostream & s, const Packet16uc & v)
{
@@ -198,7 +388,7 @@ inline std::ostream & operator <<(std::ostream & s, const Packet16uc & v)
} vt;
vt.v = v;
for (int i=0; i< 16; i++)
- s << (int)vt.n[i] << ", ";
+ s << vt.n[i] << ", ";
return s;
}
@@ -235,122 +425,366 @@ inline std::ostream & operator <<(std::ostream & s, const Packet4ui & v)
return s;
}
-// Need to define them first or we get specialization after instantiation errors
-template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from)
+template <typename Packet>
+EIGEN_STRONG_INLINE Packet pload_common(const __UNPACK_TYPE__(Packet)* from)
{
+ // some versions of GCC throw "unused-but-set-parameter".
+ // ignoring these warnings for now.
+ EIGEN_UNUSED_VARIABLE(from);
EIGEN_DEBUG_ALIGNED_LOAD
#ifdef __VSX__
- return vec_vsx_ld(0, from);
+ return vec_xl(0, const_cast<__UNPACK_TYPE__(Packet)*>(from));
#else
return vec_ld(0, from);
#endif
}
+// Need to define them first or we get specialization after instantiation errors
+template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from)
+{
+ return pload_common<Packet4f>(from);
+}
+
template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int* from)
{
- EIGEN_DEBUG_ALIGNED_LOAD
-#ifdef __VSX__
- return vec_vsx_ld(0, from);
-#else
- return vec_ld(0, from);
-#endif
+ return pload_common<Packet4i>(from);
}
-template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from)
+template<> EIGEN_STRONG_INLINE Packet8s pload<Packet8s>(const short int* from)
+{
+ return pload_common<Packet8s>(from);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8us pload<Packet8us>(const unsigned short int* from)
{
+ return pload_common<Packet8us>(from);
+}
+
+template<> EIGEN_STRONG_INLINE Packet16c pload<Packet16c>(const signed char* from)
+{
+ return pload_common<Packet16c>(from);
+}
+
+template<> EIGEN_STRONG_INLINE Packet16uc pload<Packet16uc>(const unsigned char* from)
+{
+ return pload_common<Packet16uc>(from);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf pload<Packet8bf>(const bfloat16* from)
+{
+ return pload_common<Packet8us>(reinterpret_cast<const unsigned short int*>(from));
+}
+
+template <typename Packet>
+EIGEN_STRONG_INLINE void pstore_common(__UNPACK_TYPE__(Packet)* to, const Packet& from){
+ // some versions of GCC throw "unused-but-set-parameter" (float *to).
+ // ignoring these warnings for now.
+ EIGEN_UNUSED_VARIABLE(to);
EIGEN_DEBUG_ALIGNED_STORE
#ifdef __VSX__
- vec_vsx_st(from, 0, to);
+ vec_xst(from, 0, to);
#else
vec_st(from, 0, to);
#endif
}
+template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from)
+{
+ pstore_common<Packet4f>(to, from);
+}
+
template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet4i& from)
{
- EIGEN_DEBUG_ALIGNED_STORE
-#ifdef __VSX__
- vec_vsx_st(from, 0, to);
-#else
- vec_st(from, 0, to);
-#endif
+ pstore_common<Packet4i>(to, from);
}
-template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) {
- Packet4f v = {from, from, from, from};
+template<> EIGEN_STRONG_INLINE void pstore<short int>(short int* to, const Packet8s& from)
+{
+ pstore_common<Packet8s>(to, from);
+}
+
+template<> EIGEN_STRONG_INLINE void pstore<unsigned short int>(unsigned short int* to, const Packet8us& from)
+{
+ pstore_common<Packet8us>(to, from);
+}
+
+template<> EIGEN_STRONG_INLINE void pstore<bfloat16>(bfloat16* to, const Packet8bf& from)
+{
+ pstore_common<Packet8us>(reinterpret_cast<unsigned short int*>(to), from);
+}
+
+template<> EIGEN_STRONG_INLINE void pstore<signed char>(signed char* to, const Packet16c& from)
+{
+ pstore_common<Packet16c>(to, from);
+}
+
+template<> EIGEN_STRONG_INLINE void pstore<unsigned char>(unsigned char* to, const Packet16uc& from)
+{
+ pstore_common<Packet16uc>(to, from);
+}
+
+template<typename Packet>
+EIGEN_STRONG_INLINE Packet pset1_size4(const __UNPACK_TYPE__(Packet)& from)
+{
+ Packet v = {from, from, from, from};
return v;
}
-template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) {
- Packet4i v = {from, from, from, from};
+template<typename Packet>
+EIGEN_STRONG_INLINE Packet pset1_size8(const __UNPACK_TYPE__(Packet)& from)
+{
+ Packet v = {from, from, from, from, from, from, from, from};
return v;
}
-template<> EIGEN_STRONG_INLINE void
-pbroadcast4<Packet4f>(const float *a,
- Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3)
+
+template<typename Packet>
+EIGEN_STRONG_INLINE Packet pset1_size16(const __UNPACK_TYPE__(Packet)& from)
+{
+ Packet v = {from, from, from, from, from, from, from, from, from, from, from, from, from, from, from, from};
+ return v;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) {
+ return pset1_size4<Packet4f>(from);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) {
+ return pset1_size4<Packet4i>(from);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8s pset1<Packet8s>(const short int& from) {
+ return pset1_size8<Packet8s>(from);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8us pset1<Packet8us>(const unsigned short int& from) {
+ return pset1_size8<Packet8us>(from);
+}
+
+template<> EIGEN_STRONG_INLINE Packet16c pset1<Packet16c>(const signed char& from) {
+ return pset1_size16<Packet16c>(from);
+}
+
+template<> EIGEN_STRONG_INLINE Packet16uc pset1<Packet16uc>(const unsigned char& from) {
+ return pset1_size16<Packet16uc>(from);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pset1frombits<Packet4f>(unsigned int from) {
+ return reinterpret_cast<Packet4f>(pset1<Packet4i>(from));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf pset1<Packet8bf>(const bfloat16& from) {
+ return pset1_size8<Packet8us>(reinterpret_cast<const unsigned short int&>(from));
+}
+
+template<typename Packet> EIGEN_STRONG_INLINE void
+pbroadcast4_common(const __UNPACK_TYPE__(Packet) *a,
+ Packet& a0, Packet& a1, Packet& a2, Packet& a3)
{
- a3 = pload<Packet4f>(a);
+ a3 = pload<Packet>(a);
a0 = vec_splat(a3, 0);
a1 = vec_splat(a3, 1);
a2 = vec_splat(a3, 2);
a3 = vec_splat(a3, 3);
}
+
+template<> EIGEN_STRONG_INLINE void
+pbroadcast4<Packet4f>(const float *a,
+ Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3)
+{
+ pbroadcast4_common<Packet4f>(a, a0, a1, a2, a3);
+}
template<> EIGEN_STRONG_INLINE void
pbroadcast4<Packet4i>(const int *a,
Packet4i& a0, Packet4i& a1, Packet4i& a2, Packet4i& a3)
{
- a3 = pload<Packet4i>(a);
- a0 = vec_splat(a3, 0);
- a1 = vec_splat(a3, 1);
- a2 = vec_splat(a3, 2);
- a3 = vec_splat(a3, 3);
+ pbroadcast4_common<Packet4i>(a, a0, a1, a2, a3);
+}
+
+template<typename Packet> EIGEN_DEVICE_FUNC inline Packet pgather_common(const __UNPACK_TYPE__(Packet)* from, Index stride)
+{
+ EIGEN_ALIGN16 __UNPACK_TYPE__(Packet) a[4];
+ a[0] = from[0*stride];
+ a[1] = from[1*stride];
+ a[2] = from[2*stride];
+ a[3] = from[3*stride];
+ return pload<Packet>(a);
}
template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)
{
- float EIGEN_ALIGN16 af[4];
- af[0] = from[0*stride];
- af[1] = from[1*stride];
- af[2] = from[2*stride];
- af[3] = from[3*stride];
- return pload<Packet4f>(af);
+ return pgather_common<Packet4f>(from, stride);
}
+
template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, Index stride)
{
- int EIGEN_ALIGN16 ai[4];
- ai[0] = from[0*stride];
- ai[1] = from[1*stride];
- ai[2] = from[2*stride];
- ai[3] = from[3*stride];
- return pload<Packet4i>(ai);
+ return pgather_common<Packet4i>(from, stride);
+}
+
+template<typename Packet> EIGEN_DEVICE_FUNC inline Packet pgather_size8(const __UNPACK_TYPE__(Packet)* from, Index stride)
+{
+ EIGEN_ALIGN16 __UNPACK_TYPE__(Packet) a[8];
+ a[0] = from[0*stride];
+ a[1] = from[1*stride];
+ a[2] = from[2*stride];
+ a[3] = from[3*stride];
+ a[4] = from[4*stride];
+ a[5] = from[5*stride];
+ a[6] = from[6*stride];
+ a[7] = from[7*stride];
+ return pload<Packet>(a);
+}
+
+template<> EIGEN_DEVICE_FUNC inline Packet8s pgather<short int, Packet8s>(const short int* from, Index stride)
+{
+ return pgather_size8<Packet8s>(from, stride);
+}
+
+template<> EIGEN_DEVICE_FUNC inline Packet8us pgather<unsigned short int, Packet8us>(const unsigned short int* from, Index stride)
+{
+ return pgather_size8<Packet8us>(from, stride);
+}
+
+template<> EIGEN_DEVICE_FUNC inline Packet8bf pgather<bfloat16, Packet8bf>(const bfloat16* from, Index stride)
+{
+ return pgather_size8<Packet8bf>(from, stride);
}
+
+template<typename Packet> EIGEN_DEVICE_FUNC inline Packet pgather_size16(const __UNPACK_TYPE__(Packet)* from, Index stride)
+{
+ EIGEN_ALIGN16 __UNPACK_TYPE__(Packet) a[16];
+ a[0] = from[0*stride];
+ a[1] = from[1*stride];
+ a[2] = from[2*stride];
+ a[3] = from[3*stride];
+ a[4] = from[4*stride];
+ a[5] = from[5*stride];
+ a[6] = from[6*stride];
+ a[7] = from[7*stride];
+ a[8] = from[8*stride];
+ a[9] = from[9*stride];
+ a[10] = from[10*stride];
+ a[11] = from[11*stride];
+ a[12] = from[12*stride];
+ a[13] = from[13*stride];
+ a[14] = from[14*stride];
+ a[15] = from[15*stride];
+ return pload<Packet>(a);
+}
+
+
+template<> EIGEN_DEVICE_FUNC inline Packet16c pgather<signed char, Packet16c>(const signed char* from, Index stride)
+{
+ return pgather_size16<Packet16c>(from, stride);
+}
+
+template<> EIGEN_DEVICE_FUNC inline Packet16uc pgather<unsigned char, Packet16uc>(const unsigned char* from, Index stride)
+{
+ return pgather_size16<Packet16uc>(from, stride);
+}
+
+template<typename Packet> EIGEN_DEVICE_FUNC inline void pscatter_size4(__UNPACK_TYPE__(Packet)* to, const Packet& from, Index stride)
+{
+ EIGEN_ALIGN16 __UNPACK_TYPE__(Packet) a[4];
+ pstore<__UNPACK_TYPE__(Packet)>(a, from);
+ to[0*stride] = a[0];
+ to[1*stride] = a[1];
+ to[2*stride] = a[2];
+ to[3*stride] = a[3];
+}
+
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)
{
- float EIGEN_ALIGN16 af[4];
- pstore<float>(af, from);
- to[0*stride] = af[0];
- to[1*stride] = af[1];
- to[2*stride] = af[2];
- to[3*stride] = af[3];
+ pscatter_size4<Packet4f>(to, from, stride);
}
+
template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, Index stride)
{
- int EIGEN_ALIGN16 ai[4];
- pstore<int>((int *)ai, from);
- to[0*stride] = ai[0];
- to[1*stride] = ai[1];
- to[2*stride] = ai[2];
- to[3*stride] = ai[3];
+ pscatter_size4<Packet4i>(to, from, stride);
+}
+
+template<typename Packet> EIGEN_DEVICE_FUNC inline void pscatter_size8(__UNPACK_TYPE__(Packet)* to, const Packet& from, Index stride)
+{
+ EIGEN_ALIGN16 __UNPACK_TYPE__(Packet) a[8];
+ pstore<__UNPACK_TYPE__(Packet)>(a, from);
+ to[0*stride] = a[0];
+ to[1*stride] = a[1];
+ to[2*stride] = a[2];
+ to[3*stride] = a[3];
+ to[4*stride] = a[4];
+ to[5*stride] = a[5];
+ to[6*stride] = a[6];
+ to[7*stride] = a[7];
}
-template<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(const float& a) { return pset1<Packet4f>(a) + p4f_COUNTDOWN; }
-template<> EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(const int& a) { return pset1<Packet4i>(a) + p4i_COUNTDOWN; }
-template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return a + b; }
-template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return a + b; }
+template<> EIGEN_DEVICE_FUNC inline void pscatter<short int, Packet8s>(short int* to, const Packet8s& from, Index stride)
+{
+ pscatter_size8<Packet8s>(to, from, stride);
+}
-template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return a - b; }
-template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return a - b; }
+template<> EIGEN_DEVICE_FUNC inline void pscatter<unsigned short int, Packet8us>(unsigned short int* to, const Packet8us& from, Index stride)
+{
+ pscatter_size8<Packet8us>(to, from, stride);
+}
+
+template<> EIGEN_DEVICE_FUNC inline void pscatter<bfloat16, Packet8bf>(bfloat16* to, const Packet8bf& from, Index stride)
+{
+ pscatter_size8<Packet8bf>(to, from, stride);
+}
+
+template<typename Packet> EIGEN_DEVICE_FUNC inline void pscatter_size16(__UNPACK_TYPE__(Packet)* to, const Packet& from, Index stride)
+{
+ EIGEN_ALIGN16 __UNPACK_TYPE__(Packet) a[16];
+ pstore<__UNPACK_TYPE__(Packet)>(a, from);
+ to[0*stride] = a[0];
+ to[1*stride] = a[1];
+ to[2*stride] = a[2];
+ to[3*stride] = a[3];
+ to[4*stride] = a[4];
+ to[5*stride] = a[5];
+ to[6*stride] = a[6];
+ to[7*stride] = a[7];
+ to[8*stride] = a[8];
+ to[9*stride] = a[9];
+ to[10*stride] = a[10];
+ to[11*stride] = a[11];
+ to[12*stride] = a[12];
+ to[13*stride] = a[13];
+ to[14*stride] = a[14];
+ to[15*stride] = a[15];
+}
+
+template<> EIGEN_DEVICE_FUNC inline void pscatter<signed char, Packet16c>(signed char* to, const Packet16c& from, Index stride)
+{
+ pscatter_size16<Packet16c>(to, from, stride);
+}
+
+template<> EIGEN_DEVICE_FUNC inline void pscatter<unsigned char, Packet16uc>(unsigned char* to, const Packet16uc& from, Index stride)
+{
+ pscatter_size16<Packet16uc>(to, from, stride);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(const float& a) { return pset1<Packet4f>(a) + p4f_COUNTDOWN; }
+template<> EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(const int& a) { return pset1<Packet4i>(a) + p4i_COUNTDOWN; }
+template<> EIGEN_STRONG_INLINE Packet8s plset<Packet8s>(const short int& a) { return pset1<Packet8s>(a) + p8s_COUNTDOWN; }
+template<> EIGEN_STRONG_INLINE Packet8us plset<Packet8us>(const unsigned short int& a) { return pset1<Packet8us>(a) + p8us_COUNTDOWN; }
+template<> EIGEN_STRONG_INLINE Packet16c plset<Packet16c>(const signed char& a) { return pset1<Packet16c>(a) + p16c_COUNTDOWN; }
+template<> EIGEN_STRONG_INLINE Packet16uc plset<Packet16uc>(const unsigned char& a) { return pset1<Packet16uc>(a) + p16uc_COUNTDOWN; }
+
+template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f> (const Packet4f& a, const Packet4f& b) { return a + b; }
+template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i> (const Packet4i& a, const Packet4i& b) { return a + b; }
+template<> EIGEN_STRONG_INLINE Packet4ui padd<Packet4ui> (const Packet4ui& a, const Packet4ui& b) { return a + b; }
+template<> EIGEN_STRONG_INLINE Packet8s padd<Packet8s> (const Packet8s& a, const Packet8s& b) { return a + b; }
+template<> EIGEN_STRONG_INLINE Packet8us padd<Packet8us> (const Packet8us& a, const Packet8us& b) { return a + b; }
+template<> EIGEN_STRONG_INLINE Packet16c padd<Packet16c> (const Packet16c& a, const Packet16c& b) { return a + b; }
+template<> EIGEN_STRONG_INLINE Packet16uc padd<Packet16uc>(const Packet16uc& a, const Packet16uc& b) { return a + b; }
+
+template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f> (const Packet4f& a, const Packet4f& b) { return a - b; }
+template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i> (const Packet4i& a, const Packet4i& b) { return a - b; }
+template<> EIGEN_STRONG_INLINE Packet8s psub<Packet8s> (const Packet8s& a, const Packet8s& b) { return a - b; }
+template<> EIGEN_STRONG_INLINE Packet8us psub<Packet8us> (const Packet8us& a, const Packet8us& b) { return a - b; }
+template<> EIGEN_STRONG_INLINE Packet16c psub<Packet16c> (const Packet16c& a, const Packet16c& b) { return a - b; }
+template<> EIGEN_STRONG_INLINE Packet16uc psub<Packet16uc>(const Packet16uc& a, const Packet16uc& b) { return a - b; }
template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { return p4f_ZERO - a; }
template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return p4i_ZERO - a; }
@@ -358,8 +792,13 @@ template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return p4i_
template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }
template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }
-template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_madd(a,b, p4f_MZERO); }
-template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b) { return a * b; }
+template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f> (const Packet4f& a, const Packet4f& b) { return vec_madd(a,b, p4f_MZERO); }
+template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i> (const Packet4i& a, const Packet4i& b) { return a * b; }
+template<> EIGEN_STRONG_INLINE Packet8s pmul<Packet8s> (const Packet8s& a, const Packet8s& b) { return vec_mul(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8us pmul<Packet8us> (const Packet8us& a, const Packet8us& b) { return vec_mul(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16c pmul<Packet16c> (const Packet16c& a, const Packet16c& b) { return vec_mul(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16uc pmul<Packet16uc>(const Packet16uc& a, const Packet16uc& b) { return vec_mul(a,b); }
+
template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b)
{
@@ -387,85 +826,247 @@ template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, co
// for some weird raisons, it has to be overloaded for packet of integers
template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return vec_madd(a,b,c); }
template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return a*b + c; }
+template<> EIGEN_STRONG_INLINE Packet8s pmadd(const Packet8s& a, const Packet8s& b, const Packet8s& c) { return vec_madd(a,b,c); }
+template<> EIGEN_STRONG_INLINE Packet8us pmadd(const Packet8us& a, const Packet8us& b, const Packet8us& c) { return vec_madd(a,b,c); }
-template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_min(a, b); }
+template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+ #ifdef __VSX__
+ // NOTE: about 10% slower than vec_min, but consistent with std::min and SSE regarding NaN
+ Packet4f ret;
+ __asm__ ("xvcmpgesp %x0,%x1,%x2\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b));
+ return ret;
+ #else
+ return vec_min(a, b);
+ #endif
+}
template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_min(a, b); }
+template<> EIGEN_STRONG_INLINE Packet8s pmin<Packet8s>(const Packet8s& a, const Packet8s& b) { return vec_min(a, b); }
+template<> EIGEN_STRONG_INLINE Packet8us pmin<Packet8us>(const Packet8us& a, const Packet8us& b) { return vec_min(a, b); }
+template<> EIGEN_STRONG_INLINE Packet16c pmin<Packet16c>(const Packet16c& a, const Packet16c& b) { return vec_min(a, b); }
+template<> EIGEN_STRONG_INLINE Packet16uc pmin<Packet16uc>(const Packet16uc& a, const Packet16uc& b) { return vec_min(a, b); }
+
-template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_max(a, b); }
+template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+ #ifdef __VSX__
+ // NOTE: about 10% slower than vec_max, but consistent with std::max and SSE regarding NaN
+ Packet4f ret;
+ __asm__ ("xvcmpgtsp %x0,%x2,%x1\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b));
+ return ret;
+ #else
+ return vec_max(a, b);
+ #endif
+}
template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_max(a, b); }
+template<> EIGEN_STRONG_INLINE Packet8s pmax<Packet8s>(const Packet8s& a, const Packet8s& b) { return vec_max(a, b); }
+template<> EIGEN_STRONG_INLINE Packet8us pmax<Packet8us>(const Packet8us& a, const Packet8us& b) { return vec_max(a, b); }
+template<> EIGEN_STRONG_INLINE Packet16c pmax<Packet16c>(const Packet16c& a, const Packet16c& b) { return vec_max(a, b); }
+template<> EIGEN_STRONG_INLINE Packet16uc pmax<Packet16uc>(const Packet16uc& a, const Packet16uc& b) { return vec_max(a, b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pcmp_le(const Packet4f& a, const Packet4f& b) { return reinterpret_cast<Packet4f>(vec_cmple(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt(const Packet4f& a, const Packet4f& b) { return reinterpret_cast<Packet4f>(vec_cmplt(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet4f pcmp_eq(const Packet4f& a, const Packet4f& b) { return reinterpret_cast<Packet4f>(vec_cmpeq(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt_or_nan(const Packet4f& a, const Packet4f& b) {
+ Packet4f c = reinterpret_cast<Packet4f>(vec_cmpge(a,b));
+ return vec_nor(c,c);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4i pcmp_le(const Packet4i& a, const Packet4i& b) { return reinterpret_cast<Packet4i>(vec_cmple(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet4i pcmp_lt(const Packet4i& a, const Packet4i& b) { return reinterpret_cast<Packet4i>(vec_cmplt(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet4i pcmp_eq(const Packet4i& a, const Packet4i& b) { return reinterpret_cast<Packet4i>(vec_cmpeq(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet8s pcmp_le(const Packet8s& a, const Packet8s& b) { return reinterpret_cast<Packet8s>(vec_cmple(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet8s pcmp_lt(const Packet8s& a, const Packet8s& b) { return reinterpret_cast<Packet8s>(vec_cmplt(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet8s pcmp_eq(const Packet8s& a, const Packet8s& b) { return reinterpret_cast<Packet8s>(vec_cmpeq(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet8us pcmp_le(const Packet8us& a, const Packet8us& b) { return reinterpret_cast<Packet8us>(vec_cmple(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet8us pcmp_lt(const Packet8us& a, const Packet8us& b) { return reinterpret_cast<Packet8us>(vec_cmplt(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet8us pcmp_eq(const Packet8us& a, const Packet8us& b) { return reinterpret_cast<Packet8us>(vec_cmpeq(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet16c pcmp_le(const Packet16c& a, const Packet16c& b) { return reinterpret_cast<Packet16c>(vec_cmple(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet16c pcmp_lt(const Packet16c& a, const Packet16c& b) { return reinterpret_cast<Packet16c>(vec_cmplt(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet16c pcmp_eq(const Packet16c& a, const Packet16c& b) { return reinterpret_cast<Packet16c>(vec_cmpeq(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet16uc pcmp_le(const Packet16uc& a, const Packet16uc& b) { return reinterpret_cast<Packet16uc>(vec_cmple(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet16uc pcmp_lt(const Packet16uc& a, const Packet16uc& b) { return reinterpret_cast<Packet16uc>(vec_cmplt(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet16uc pcmp_eq(const Packet16uc& a, const Packet16uc& b) { return reinterpret_cast<Packet16uc>(vec_cmpeq(a,b)); }
template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_and(a, b); }
template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_and(a, b); }
+template<> EIGEN_STRONG_INLINE Packet4ui pand<Packet4ui>(const Packet4ui& a, const Packet4ui& b) { return vec_and(a, b); }
+template<> EIGEN_STRONG_INLINE Packet8us pand<Packet8us>(const Packet8us& a, const Packet8us& b) { return vec_and(a, b); }
+template<> EIGEN_STRONG_INLINE Packet8bf pand<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
+ return pand<Packet8us>(a, b);
+}
+
template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_or(a, b); }
template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_or(a, b); }
+template<> EIGEN_STRONG_INLINE Packet8s por<Packet8s>(const Packet8s& a, const Packet8s& b) { return vec_or(a, b); }
+template<> EIGEN_STRONG_INLINE Packet8us por<Packet8us>(const Packet8us& a, const Packet8us& b) { return vec_or(a, b); }
+template<> EIGEN_STRONG_INLINE Packet8bf por<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
+ return por<Packet8us>(a, b);
+}
template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_xor(a, b); }
template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_xor(a, b); }
+template<> EIGEN_STRONG_INLINE Packet8bf pxor<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
+ return pxor<Packet8us>(a, b);
+}
-template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_and(a, vec_nor(b, b)); }
-template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_and(a, vec_nor(b, b)); }
+template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_andc(a, b); }
+template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_andc(a, b); }
-template<> EIGEN_STRONG_INLINE Packet4f pround<Packet4f>(const Packet4f& a) { return vec_round(a); }
+template<> EIGEN_STRONG_INLINE Packet4f pselect(const Packet4f& mask, const Packet4f& a, const Packet4f& b) {
+ return vec_sel(b, a, reinterpret_cast<Packet4ui>(mask));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pround<Packet4f>(const Packet4f& a)
+{
+ Packet4f t = vec_add(reinterpret_cast<Packet4f>(vec_or(vec_and(reinterpret_cast<Packet4ui>(a), p4ui_SIGN), p4ui_PREV0DOT5)), a);
+ Packet4f res;
+
+#ifdef __VSX__
+ __asm__("xvrspiz %x0, %x1\n\t"
+ : "=&wa" (res)
+ : "wa" (t));
+#else
+ __asm__("vrfiz %0, %1\n\t"
+ : "=v" (res)
+ : "v" (t));
+#endif
+
+ return res;
+}
template<> EIGEN_STRONG_INLINE Packet4f pceil<Packet4f>(const Packet4f& a) { return vec_ceil(a); }
template<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a) { return vec_floor(a); }
+template<> EIGEN_STRONG_INLINE Packet4f print<Packet4f>(const Packet4f& a)
+{
+ Packet4f res;
-#ifdef _BIG_ENDIAN
-template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from)
+ __asm__("xvrspic %x0, %x1\n\t"
+ : "=&wa" (res)
+ : "wa" (a));
+
+ return res;
+}
+
+template<typename Packet> EIGEN_STRONG_INLINE Packet ploadu_common(const __UNPACK_TYPE__(Packet)* from)
{
EIGEN_DEBUG_ALIGNED_LOAD
+#ifdef _BIG_ENDIAN
Packet16uc MSQ, LSQ;
Packet16uc mask;
MSQ = vec_ld(0, (unsigned char *)from); // most significant quadword
LSQ = vec_ld(15, (unsigned char *)from); // least significant quadword
mask = vec_lvsl(0, from); // create the permute mask
- return (Packet4f) vec_perm(MSQ, LSQ, mask); // align the data
+ //TODO: Add static_cast here
+ return (Packet) vec_perm(MSQ, LSQ, mask); // align the data
+#else
+ EIGEN_DEBUG_UNALIGNED_LOAD
+ return vec_xl(0, const_cast<__UNPACK_TYPE__(Packet)*>(from));
+#endif
+}
+template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from)
+{
+ return ploadu_common<Packet4f>(from);
}
template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
{
- EIGEN_DEBUG_ALIGNED_LOAD
- // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html
- Packet16uc MSQ, LSQ;
- Packet16uc mask;
- MSQ = vec_ld(0, (unsigned char *)from); // most significant quadword
- LSQ = vec_ld(15, (unsigned char *)from); // least significant quadword
- mask = vec_lvsl(0, from); // create the permute mask
- return (Packet4i) vec_perm(MSQ, LSQ, mask); // align the data
+ return ploadu_common<Packet4i>(from);
}
-#else
-// We also need ot redefine little endian loading of Packet4i/Packet4f using VSX
-template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
+template<> EIGEN_STRONG_INLINE Packet8s ploadu<Packet8s>(const short int* from)
{
- EIGEN_DEBUG_UNALIGNED_LOAD
- return (Packet4i) vec_vsx_ld((long)from & 15, (const int*) _EIGEN_ALIGNED_PTR(from));
+ return ploadu_common<Packet8s>(from);
}
-template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from)
+template<> EIGEN_STRONG_INLINE Packet8us ploadu<Packet8us>(const unsigned short int* from)
{
- EIGEN_DEBUG_UNALIGNED_LOAD
- return (Packet4f) vec_vsx_ld((long)from & 15, (const float*) _EIGEN_ALIGNED_PTR(from));
+ return ploadu_common<Packet8us>(from);
+}
+template<> EIGEN_STRONG_INLINE Packet8bf ploadu<Packet8bf>(const bfloat16* from)
+{
+ return ploadu_common<Packet8us>(reinterpret_cast<const unsigned short int*>(from));
+}
+template<> EIGEN_STRONG_INLINE Packet16c ploadu<Packet16c>(const signed char* from)
+{
+ return ploadu_common<Packet16c>(from);
+}
+template<> EIGEN_STRONG_INLINE Packet16uc ploadu<Packet16uc>(const unsigned char* from)
+{
+ return ploadu_common<Packet16uc>(from);
}
-#endif
-template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
+template<typename Packet> EIGEN_STRONG_INLINE Packet ploaddup_common(const __UNPACK_TYPE__(Packet)* from)
{
- Packet4f p;
- if((std::ptrdiff_t(from) % 16) == 0) p = pload<Packet4f>(from);
- else p = ploadu<Packet4f>(from);
+ Packet p;
+ if((std::ptrdiff_t(from) % 16) == 0) p = pload<Packet>(from);
+ else p = ploadu<Packet>(from);
return vec_perm(p, p, p16uc_DUPLICATE32_HI);
}
+template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
+{
+ return ploaddup_common<Packet4f>(from);
+}
template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int* from)
{
- Packet4i p;
- if((std::ptrdiff_t(from) % 16) == 0) p = pload<Packet4i>(from);
- else p = ploadu<Packet4i>(from);
- return vec_perm(p, p, p16uc_DUPLICATE32_HI);
+ return ploaddup_common<Packet4i>(from);
}
-#ifdef _BIG_ENDIAN
-template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from)
+template<> EIGEN_STRONG_INLINE Packet8s ploaddup<Packet8s>(const short int* from)
+{
+ Packet8s p;
+ if((std::ptrdiff_t(from) % 16) == 0) p = pload<Packet8s>(from);
+ else p = ploadu<Packet8s>(from);
+ return vec_perm(p, p, p16uc_DUPLICATE16_HI);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8us ploaddup<Packet8us>(const unsigned short int* from)
+{
+ Packet8us p;
+ if((std::ptrdiff_t(from) % 16) == 0) p = pload<Packet8us>(from);
+ else p = ploadu<Packet8us>(from);
+ return vec_perm(p, p, p16uc_DUPLICATE16_HI);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8s ploadquad<Packet8s>(const short int* from)
+{
+ Packet8s p;
+ if((std::ptrdiff_t(from) % 16) == 0) p = pload<Packet8s>(from);
+ else p = ploadu<Packet8s>(from);
+ return vec_perm(p, p, p16uc_QUADRUPLICATE16_HI);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8us ploadquad<Packet8us>(const unsigned short int* from)
+{
+ Packet8us p;
+ if((std::ptrdiff_t(from) % 16) == 0) p = pload<Packet8us>(from);
+ else p = ploadu<Packet8us>(from);
+ return vec_perm(p, p, p16uc_QUADRUPLICATE16_HI);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf ploadquad<Packet8bf>(const bfloat16* from)
+{
+ return ploadquad<Packet8us>(reinterpret_cast<const unsigned short int*>(from));
+}
+
+template<> EIGEN_STRONG_INLINE Packet16c ploaddup<Packet16c>(const signed char* from)
+{
+ Packet16c p;
+ if((std::ptrdiff_t(from) % 16) == 0) p = pload<Packet16c>(from);
+ else p = ploadu<Packet16c>(from);
+ return vec_perm(p, p, p16uc_DUPLICATE8_HI);
+}
+
+template<> EIGEN_STRONG_INLINE Packet16uc ploaddup<Packet16uc>(const unsigned char* from)
+{
+ Packet16uc p;
+ if((std::ptrdiff_t(from) % 16) == 0) p = pload<Packet16uc>(from);
+ else p = ploadu<Packet16uc>(from);
+ return vec_perm(p, p, p16uc_DUPLICATE8_HI);
+}
+
+template<typename Packet> EIGEN_STRONG_INLINE void pstoreu_common(__UNPACK_TYPE__(Packet)* to, const Packet& from)
{
EIGEN_DEBUG_UNALIGNED_STORE
+#ifdef _BIG_ENDIAN
// Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html
// Warning: not thread safe!
Packet16uc MSQ, LSQ, edges;
@@ -479,45 +1080,69 @@ template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& f
MSQ = vec_perm(edges,(Packet16uc)from,align); // misalign the data (MSQ)
LSQ = vec_perm((Packet16uc)from,edges,align); // misalign the data (LSQ)
vec_st( LSQ, 15, (unsigned char *)to ); // Store the LSQ part first
- vec_st( MSQ, 0, (unsigned char *)to ); // Store the MSQ part
+ vec_st( MSQ, 0, (unsigned char *)to ); // Store the MSQ part second
+#else
+ vec_xst(from, 0, to);
+#endif
+}
+template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from)
+{
+ pstoreu_common<Packet4f>(to, from);
}
template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from)
{
- EIGEN_DEBUG_UNALIGNED_STORE
- // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html
- // Warning: not thread safe!
- Packet16uc MSQ, LSQ, edges;
- Packet16uc edgeAlign, align;
-
- MSQ = vec_ld(0, (unsigned char *)to); // most significant quadword
- LSQ = vec_ld(15, (unsigned char *)to); // least significant quadword
- edgeAlign = vec_lvsl(0, to); // permute map to extract edges
- edges=vec_perm(LSQ, MSQ, edgeAlign); // extract the edges
- align = vec_lvsr( 0, to ); // permute map to misalign data
- MSQ = vec_perm(edges, (Packet16uc) from, align); // misalign the data (MSQ)
- LSQ = vec_perm((Packet16uc) from, edges, align); // misalign the data (LSQ)
- vec_st( LSQ, 15, (unsigned char *)to ); // Store the LSQ part first
- vec_st( MSQ, 0, (unsigned char *)to ); // Store the MSQ part
+ pstoreu_common<Packet4i>(to, from);
}
-#else
-// We also need ot redefine little endian loading of Packet4i/Packet4f using VSX
-template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from)
+template<> EIGEN_STRONG_INLINE void pstoreu<short int>(short int* to, const Packet8s& from)
{
- EIGEN_DEBUG_ALIGNED_STORE
- vec_vsx_st(from, (long)to & 15, (int*) _EIGEN_ALIGNED_PTR(to));
+ pstoreu_common<Packet8s>(to, from);
}
-template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from)
+template<> EIGEN_STRONG_INLINE void pstoreu<unsigned short int>(unsigned short int* to, const Packet8us& from)
{
- EIGEN_DEBUG_ALIGNED_STORE
- vec_vsx_st(from, (long)to & 15, (float*) _EIGEN_ALIGNED_PTR(to));
+ pstoreu_common<Packet8us>(to, from);
+}
+template<> EIGEN_STRONG_INLINE void pstoreu<bfloat16>(bfloat16* to, const Packet8bf& from)
+{
+ pstoreu_common<Packet8us>(reinterpret_cast<unsigned short int*>(to), from);
+}
+template<> EIGEN_STRONG_INLINE void pstoreu<signed char>(signed char* to, const Packet16c& from)
+{
+ pstoreu_common<Packet16c>(to, from);
+}
+template<> EIGEN_STRONG_INLINE void pstoreu<unsigned char>(unsigned char* to, const Packet16uc& from)
+{
+ pstoreu_common<Packet16uc>(to, from);
}
-#endif
template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { EIGEN_PPC_PREFETCH(addr); }
template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { EIGEN_PPC_PREFETCH(addr); }
-template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { float EIGEN_ALIGN16 x; vec_ste(a, 0, &x); return x; }
-template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { int EIGEN_ALIGN16 x; vec_ste(a, 0, &x); return x; }
+template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { EIGEN_ALIGN16 float x; vec_ste(a, 0, &x); return x; }
+template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { EIGEN_ALIGN16 int x; vec_ste(a, 0, &x); return x; }
+
+template<typename Packet> EIGEN_STRONG_INLINE __UNPACK_TYPE__(Packet) pfirst_common(const Packet& a) {
+ EIGEN_ALIGN16 __UNPACK_TYPE__(Packet) x;
+ vec_ste(a, 0, &x);
+ return x;
+}
+
+template<> EIGEN_STRONG_INLINE short int pfirst<Packet8s>(const Packet8s& a) {
+ return pfirst_common<Packet8s>(a);
+}
+
+template<> EIGEN_STRONG_INLINE unsigned short int pfirst<Packet8us>(const Packet8us& a) {
+ return pfirst_common<Packet8us>(a);
+}
+
+template<> EIGEN_STRONG_INLINE signed char pfirst<Packet16c>(const Packet16c& a)
+{
+ return pfirst_common<Packet16c>(a);
+}
+
+template<> EIGEN_STRONG_INLINE unsigned char pfirst<Packet16uc>(const Packet16uc& a)
+{
+ return pfirst_common<Packet16uc>(a);
+}
template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)
{
@@ -525,10 +1150,296 @@ template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)
}
template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a)
{
- return reinterpret_cast<Packet4i>(vec_perm(reinterpret_cast<Packet16uc>(a), reinterpret_cast<Packet16uc>(a), p16uc_REVERSE32)); }
+ return reinterpret_cast<Packet4i>(vec_perm(reinterpret_cast<Packet16uc>(a), reinterpret_cast<Packet16uc>(a), p16uc_REVERSE32));
+}
+template<> EIGEN_STRONG_INLINE Packet8s preverse(const Packet8s& a)
+{
+ return reinterpret_cast<Packet8s>(vec_perm(reinterpret_cast<Packet16uc>(a), reinterpret_cast<Packet16uc>(a), p16uc_REVERSE16));
+}
+template<> EIGEN_STRONG_INLINE Packet8us preverse(const Packet8us& a)
+{
+ return reinterpret_cast<Packet8us>(vec_perm(reinterpret_cast<Packet16uc>(a), reinterpret_cast<Packet16uc>(a), p16uc_REVERSE16));
+}
+template<> EIGEN_STRONG_INLINE Packet16c preverse(const Packet16c& a)
+{
+ return vec_perm(a, a, p16uc_REVERSE8);
+}
+template<> EIGEN_STRONG_INLINE Packet16uc preverse(const Packet16uc& a)
+{
+ return vec_perm(a, a, p16uc_REVERSE8);
+}
+template<> EIGEN_STRONG_INLINE Packet8bf preverse(const Packet8bf& a)
+{
+ return preverse<Packet8us>(a);
+}
template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { return vec_abs(a); }
template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vec_abs(a); }
+template<> EIGEN_STRONG_INLINE Packet8s pabs(const Packet8s& a) { return vec_abs(a); }
+template<> EIGEN_STRONG_INLINE Packet8us pabs(const Packet8us& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet16c pabs(const Packet16c& a) { return vec_abs(a); }
+template<> EIGEN_STRONG_INLINE Packet16uc pabs(const Packet16uc& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet8bf pabs(const Packet8bf& a) {
+ _EIGEN_DECLARE_CONST_FAST_Packet8us(abs_mask,0x7FFF);
+ return pand<Packet8us>(p8us_abs_mask, a);
+}
+
+template<int N> EIGEN_STRONG_INLINE Packet4i parithmetic_shift_right(const Packet4i& a)
+{ return vec_sra(a,reinterpret_cast<Packet4ui>(pset1<Packet4i>(N))); }
+template<int N> EIGEN_STRONG_INLINE Packet4i plogical_shift_right(const Packet4i& a)
+{ return vec_sr(a,reinterpret_cast<Packet4ui>(pset1<Packet4i>(N))); }
+template<int N> EIGEN_STRONG_INLINE Packet4i plogical_shift_left(const Packet4i& a)
+{ return vec_sl(a,reinterpret_cast<Packet4ui>(pset1<Packet4i>(N))); }
+template<int N> EIGEN_STRONG_INLINE Packet4f plogical_shift_left(const Packet4f& a)
+{
+ const _EIGEN_DECLARE_CONST_FAST_Packet4ui(mask, N);
+ Packet4ui r = vec_sl(reinterpret_cast<Packet4ui>(a), p4ui_mask);
+ return reinterpret_cast<Packet4f>(r);
+}
+
+template<int N> EIGEN_STRONG_INLINE Packet4f plogical_shift_right(const Packet4f& a)
+{
+ const _EIGEN_DECLARE_CONST_FAST_Packet4ui(mask, N);
+ Packet4ui r = vec_sr(reinterpret_cast<Packet4ui>(a), p4ui_mask);
+ return reinterpret_cast<Packet4f>(r);
+}
+
+template<int N> EIGEN_STRONG_INLINE Packet4ui plogical_shift_right(const Packet4ui& a)
+{
+ const _EIGEN_DECLARE_CONST_FAST_Packet4ui(mask, N);
+ return vec_sr(a, p4ui_mask);
+}
+
+template<int N> EIGEN_STRONG_INLINE Packet4ui plogical_shift_left(const Packet4ui& a)
+{
+ const _EIGEN_DECLARE_CONST_FAST_Packet4ui(mask, N);
+ return vec_sl(a, p4ui_mask);
+}
+
+template<int N> EIGEN_STRONG_INLINE Packet8us plogical_shift_left(const Packet8us& a)
+{
+ const _EIGEN_DECLARE_CONST_FAST_Packet8us(mask, N);
+ return vec_sl(a, p8us_mask);
+}
+template<int N> EIGEN_STRONG_INLINE Packet8us plogical_shift_right(const Packet8us& a)
+{
+ const _EIGEN_DECLARE_CONST_FAST_Packet8us(mask, N);
+ return vec_sr(a, p8us_mask);
+}
+
+EIGEN_STRONG_INLINE Packet4f Bf16ToF32Even(const Packet8bf& bf){
+ return plogical_shift_left<16>(reinterpret_cast<Packet4f>(bf.m_val));
+}
+
+EIGEN_STRONG_INLINE Packet4f Bf16ToF32Odd(const Packet8bf& bf){
+ const _EIGEN_DECLARE_CONST_FAST_Packet4ui(high_mask, 0xFFFF0000);
+ return pand<Packet4f>(
+ reinterpret_cast<Packet4f>(bf.m_val),
+ reinterpret_cast<Packet4f>(p4ui_high_mask)
+ );
+}
+
+// Simple interleaving of bool masks, prevents true values from being
+// converted to NaNs.
+EIGEN_STRONG_INLINE Packet8bf F32ToBf16Bool(Packet4f even, Packet4f odd) {
+ const _EIGEN_DECLARE_CONST_FAST_Packet4ui(high_mask, 0xFFFF0000);
+ Packet4f bf_odd, bf_even;
+ bf_odd = pand(reinterpret_cast<Packet4f>(p4ui_high_mask), odd);
+ bf_even = plogical_shift_right<16>(even);
+ return reinterpret_cast<Packet8us>(por<Packet4f>(bf_even, bf_odd));
+}
+
+EIGEN_STRONG_INLINE Packet8bf F32ToBf16(Packet4f p4f){
+ Packet4ui input = reinterpret_cast<Packet4ui>(p4f);
+ Packet4ui lsb = plogical_shift_right<16>(input);
+ lsb = pand<Packet4ui>(lsb, reinterpret_cast<Packet4ui>(p4i_ONE));
+
+ _EIGEN_DECLARE_CONST_FAST_Packet4ui(BIAS,0x7FFFu);
+ Packet4ui rounding_bias = padd<Packet4ui>(lsb, p4ui_BIAS);
+ input = padd<Packet4ui>(input, rounding_bias);
+
+ //Test NaN and Subnormal - Begin
+ const _EIGEN_DECLARE_CONST_FAST_Packet4ui(exp_mask, 0x7F800000);
+ Packet4ui exp = pand<Packet4ui>(p4ui_exp_mask, reinterpret_cast<Packet4ui>(p4f));
+
+ const _EIGEN_DECLARE_CONST_FAST_Packet4ui(mantissa_mask, 0x7FFFFF);
+ Packet4ui mantissa = pand<Packet4ui>(p4ui_mantissa_mask, reinterpret_cast<Packet4ui>(p4f));
+
+ const _EIGEN_DECLARE_CONST_FAST_Packet4ui(max_exp, 0x7F800000);
+ Packet4bi is_max_exp = vec_cmpeq(exp, p4ui_max_exp);
+ Packet4bi is_zero_exp = vec_cmpeq(exp, reinterpret_cast<Packet4ui>(p4i_ZERO));
+
+ Packet4bi is_mant_zero = vec_cmpeq(mantissa, reinterpret_cast<Packet4ui>(p4i_ZERO));
+ Packet4ui nan_selector = pandnot<Packet4ui>(
+ reinterpret_cast<Packet4ui>(is_max_exp),
+ reinterpret_cast<Packet4ui>(is_mant_zero)
+ );
+
+ Packet4ui subnormal_selector = pandnot<Packet4ui>(
+ reinterpret_cast<Packet4ui>(is_zero_exp),
+ reinterpret_cast<Packet4ui>(is_mant_zero)
+ );
+
+ const _EIGEN_DECLARE_CONST_FAST_Packet4ui(nan, 0x7FC00000);
+ input = vec_sel(input, p4ui_nan, nan_selector);
+ input = vec_sel(input, reinterpret_cast<Packet4ui>(p4f), subnormal_selector);
+ //Test NaN and Subnormal - End
+
+ input = plogical_shift_right<16>(input);
+ return reinterpret_cast<Packet8us>(input);
+}
+
+EIGEN_STRONG_INLINE Packet8bf F32ToBf16(Packet4f even, Packet4f odd){
+ Packet4f bf_odd, bf_even;
+ bf_odd = reinterpret_cast<Packet4f>(F32ToBf16(odd).m_val);
+ bf_odd = plogical_shift_left<16>(bf_odd);
+ bf_even = reinterpret_cast<Packet4f>(F32ToBf16(even).m_val);
+ return reinterpret_cast<Packet8us>(por<Packet4f>(bf_even, bf_odd));
+}
+#define BF16_TO_F32_UNARY_OP_WRAPPER(OP, A) \
+ Packet4f a_even = Bf16ToF32Even(A);\
+ Packet4f a_odd = Bf16ToF32Odd(A);\
+ Packet4f op_even = OP(a_even);\
+ Packet4f op_odd = OP(a_odd);\
+ return F32ToBf16(op_even, op_odd);\
+
+#define BF16_TO_F32_BINARY_OP_WRAPPER(OP, A, B) \
+ Packet4f a_even = Bf16ToF32Even(A);\
+ Packet4f a_odd = Bf16ToF32Odd(A);\
+ Packet4f b_even = Bf16ToF32Even(B);\
+ Packet4f b_odd = Bf16ToF32Odd(B);\
+ Packet4f op_even = OP(a_even, b_even);\
+ Packet4f op_odd = OP(a_odd, b_odd);\
+ return F32ToBf16(op_even, op_odd);\
+
+#define BF16_TO_F32_BINARY_OP_WRAPPER_BOOL(OP, A, B) \
+ Packet4f a_even = Bf16ToF32Even(A);\
+ Packet4f a_odd = Bf16ToF32Odd(A);\
+ Packet4f b_even = Bf16ToF32Even(B);\
+ Packet4f b_odd = Bf16ToF32Odd(B);\
+ Packet4f op_even = OP(a_even, b_even);\
+ Packet4f op_odd = OP(a_odd, b_odd);\
+ return F32ToBf16Bool(op_even, op_odd);\
+
+template<> EIGEN_STRONG_INLINE Packet8bf padd<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
+ BF16_TO_F32_BINARY_OP_WRAPPER(padd<Packet4f>, a, b);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf pmul<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
+ BF16_TO_F32_BINARY_OP_WRAPPER(pmul<Packet4f>, a, b);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf pdiv<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
+ BF16_TO_F32_BINARY_OP_WRAPPER(pdiv<Packet4f>, a, b);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf pnegate<Packet8bf>(const Packet8bf& a) {
+ BF16_TO_F32_UNARY_OP_WRAPPER(pnegate<Packet4f>, a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf psub<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
+ BF16_TO_F32_BINARY_OP_WRAPPER(psub<Packet4f>, a, b);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf psqrt<Packet8bf> (const Packet8bf& a){
+ BF16_TO_F32_UNARY_OP_WRAPPER(vec_sqrt, a);
+}
+template<> EIGEN_STRONG_INLINE Packet8bf prsqrt<Packet8bf> (const Packet8bf& a){
+ BF16_TO_F32_UNARY_OP_WRAPPER(prsqrt<Packet4f>, a);
+}
+template<> EIGEN_STRONG_INLINE Packet8bf pexp<Packet8bf> (const Packet8bf& a){
+ BF16_TO_F32_UNARY_OP_WRAPPER(pexp_float, a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pldexp<Packet4f>(const Packet4f& a, const Packet4f& exponent) {
+ return pldexp_generic(a,exponent);
+}
+template<> EIGEN_STRONG_INLINE Packet8bf pldexp<Packet8bf> (const Packet8bf& a, const Packet8bf& exponent){
+ BF16_TO_F32_BINARY_OP_WRAPPER(pldexp<Packet4f>, a, exponent);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pfrexp<Packet4f>(const Packet4f& a, Packet4f& exponent) {
+ return pfrexp_generic(a,exponent);
+}
+template<> EIGEN_STRONG_INLINE Packet8bf pfrexp<Packet8bf> (const Packet8bf& a, Packet8bf& e){
+ Packet4f a_even = Bf16ToF32Even(a);
+ Packet4f a_odd = Bf16ToF32Odd(a);
+ Packet4f e_even;
+ Packet4f e_odd;
+ Packet4f op_even = pfrexp<Packet4f>(a_even, e_even);
+ Packet4f op_odd = pfrexp<Packet4f>(a_odd, e_odd);
+ e = F32ToBf16(e_even, e_odd);
+ return F32ToBf16(op_even, op_odd);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf psin<Packet8bf> (const Packet8bf& a){
+ BF16_TO_F32_UNARY_OP_WRAPPER(psin_float, a);
+}
+template<> EIGEN_STRONG_INLINE Packet8bf pcos<Packet8bf> (const Packet8bf& a){
+ BF16_TO_F32_UNARY_OP_WRAPPER(pcos_float, a);
+}
+template<> EIGEN_STRONG_INLINE Packet8bf plog<Packet8bf> (const Packet8bf& a){
+ BF16_TO_F32_UNARY_OP_WRAPPER(plog_float, a);
+}
+template<> EIGEN_STRONG_INLINE Packet8bf pfloor<Packet8bf> (const Packet8bf& a){
+ BF16_TO_F32_UNARY_OP_WRAPPER(pfloor<Packet4f>, a);
+}
+template<> EIGEN_STRONG_INLINE Packet8bf pceil<Packet8bf> (const Packet8bf& a){
+ BF16_TO_F32_UNARY_OP_WRAPPER(pceil<Packet4f>, a);
+}
+template<> EIGEN_STRONG_INLINE Packet8bf pround<Packet8bf> (const Packet8bf& a){
+ BF16_TO_F32_UNARY_OP_WRAPPER(pround<Packet4f>, a);
+}
+template<> EIGEN_STRONG_INLINE Packet8bf print<Packet8bf> (const Packet8bf& a){
+ BF16_TO_F32_UNARY_OP_WRAPPER(print<Packet4f>, a);
+}
+template<> EIGEN_STRONG_INLINE Packet8bf pmadd(const Packet8bf& a, const Packet8bf& b, const Packet8bf& c) {
+ Packet4f a_even = Bf16ToF32Even(a);
+ Packet4f a_odd = Bf16ToF32Odd(a);
+ Packet4f b_even = Bf16ToF32Even(b);
+ Packet4f b_odd = Bf16ToF32Odd(b);
+ Packet4f c_even = Bf16ToF32Even(c);
+ Packet4f c_odd = Bf16ToF32Odd(c);
+ Packet4f pmadd_even = pmadd<Packet4f>(a_even, b_even, c_even);
+ Packet4f pmadd_odd = pmadd<Packet4f>(a_odd, b_odd, c_odd);
+ return F32ToBf16(pmadd_even, pmadd_odd);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf pmin<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
+ BF16_TO_F32_BINARY_OP_WRAPPER(pmin<Packet4f>, a, b);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf pmax<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
+ BF16_TO_F32_BINARY_OP_WRAPPER(pmax<Packet4f>, a, b);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf pcmp_lt(const Packet8bf& a, const Packet8bf& b) {
+ BF16_TO_F32_BINARY_OP_WRAPPER_BOOL(pcmp_lt<Packet4f>, a, b);
+}
+template<> EIGEN_STRONG_INLINE Packet8bf pcmp_lt_or_nan(const Packet8bf& a, const Packet8bf& b) {
+ BF16_TO_F32_BINARY_OP_WRAPPER_BOOL(pcmp_lt_or_nan<Packet4f>, a, b);
+}
+template<> EIGEN_STRONG_INLINE Packet8bf pcmp_le(const Packet8bf& a, const Packet8bf& b) {
+ BF16_TO_F32_BINARY_OP_WRAPPER_BOOL(pcmp_le<Packet4f>, a, b);
+}
+template<> EIGEN_STRONG_INLINE Packet8bf pcmp_eq(const Packet8bf& a, const Packet8bf& b) {
+ BF16_TO_F32_BINARY_OP_WRAPPER_BOOL(pcmp_eq<Packet4f>, a, b);
+}
+
+template<> EIGEN_STRONG_INLINE bfloat16 pfirst(const Packet8bf& a) {
+ return Eigen::bfloat16_impl::raw_uint16_to_bfloat16((pfirst<Packet8us>(a)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf ploaddup<Packet8bf>(const bfloat16* from)
+{
+ return ploaddup<Packet8us>(reinterpret_cast<const unsigned short int*>(from));
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf plset<Packet8bf>(const bfloat16& a) {
+ bfloat16 countdown[8] = { bfloat16(0), bfloat16(1), bfloat16(2), bfloat16(3),
+ bfloat16(4), bfloat16(5), bfloat16(6), bfloat16(7) };
+ return padd<Packet8bf>(pset1<Packet8bf>(a), pload<Packet8bf>(countdown));
+}
template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
{
@@ -540,34 +1451,6 @@ template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
return pfirst(sum);
}
-template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
-{
- Packet4f v[4], sum[4];
-
- // It's easier and faster to transpose then add as columns
- // Check: http://www.freevec.org/function/matrix_4x4_transpose_floats for explanation
- // Do the transpose, first set of moves
- v[0] = vec_mergeh(vecs[0], vecs[2]);
- v[1] = vec_mergel(vecs[0], vecs[2]);
- v[2] = vec_mergeh(vecs[1], vecs[3]);
- v[3] = vec_mergel(vecs[1], vecs[3]);
- // Get the resulting vectors
- sum[0] = vec_mergeh(v[0], v[2]);
- sum[1] = vec_mergel(v[0], v[2]);
- sum[2] = vec_mergeh(v[1], v[3]);
- sum[3] = vec_mergel(v[1], v[3]);
-
- // Now do the summation:
- // Lines 0+1
- sum[0] = sum[0] + sum[1];
- // Lines 2+3
- sum[1] = sum[2] + sum[3];
- // Add the results
- sum[0] = sum[0] + sum[1];
-
- return sum[0];
-}
-
template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
{
Packet4i sum;
@@ -580,32 +1463,69 @@ template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
return pfirst(sum);
}
-template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
+template<> EIGEN_STRONG_INLINE bfloat16 predux<Packet8bf>(const Packet8bf& a)
{
- Packet4i v[4], sum[4];
+ float redux_even = predux<Packet4f>(Bf16ToF32Even(a));
+ float redux_odd = predux<Packet4f>(Bf16ToF32Odd(a));
+ float f32_result = redux_even + redux_odd;
+ return bfloat16(f32_result);
+}
+template<typename Packet> EIGEN_STRONG_INLINE __UNPACK_TYPE__(Packet) predux_size8(const Packet& a)
+{
+ union{
+ Packet v;
+ __UNPACK_TYPE__(Packet) n[8];
+ } vt;
+ vt.v = a;
+
+ EIGEN_ALIGN16 int first_loader[4] = { vt.n[0], vt.n[1], vt.n[2], vt.n[3] };
+ EIGEN_ALIGN16 int second_loader[4] = { vt.n[4], vt.n[5], vt.n[6], vt.n[7] };
+ Packet4i first_half = pload<Packet4i>(first_loader);
+ Packet4i second_half = pload<Packet4i>(second_loader);
+
+ return static_cast<__UNPACK_TYPE__(Packet)>(predux(first_half) + predux(second_half));
+}
+
+template<> EIGEN_STRONG_INLINE short int predux<Packet8s>(const Packet8s& a)
+{
+ return predux_size8<Packet8s>(a);
+}
+
+template<> EIGEN_STRONG_INLINE unsigned short int predux<Packet8us>(const Packet8us& a)
+{
+ return predux_size8<Packet8us>(a);
+}
+
+template<typename Packet> EIGEN_STRONG_INLINE __UNPACK_TYPE__(Packet) predux_size16(const Packet& a)
+{
+ union{
+ Packet v;
+ __UNPACK_TYPE__(Packet) n[16];
+ } vt;
+ vt.v = a;
+
+ EIGEN_ALIGN16 int first_loader[4] = { vt.n[0], vt.n[1], vt.n[2], vt.n[3] };
+ EIGEN_ALIGN16 int second_loader[4] = { vt.n[4], vt.n[5], vt.n[6], vt.n[7] };
+ EIGEN_ALIGN16 int third_loader[4] = { vt.n[8], vt.n[9], vt.n[10], vt.n[11] };
+ EIGEN_ALIGN16 int fourth_loader[4] = { vt.n[12], vt.n[13], vt.n[14], vt.n[15] };
- // It's easier and faster to transpose then add as columns
- // Check: http://www.freevec.org/function/matrix_4x4_transpose_floats for explanation
- // Do the transpose, first set of moves
- v[0] = vec_mergeh(vecs[0], vecs[2]);
- v[1] = vec_mergel(vecs[0], vecs[2]);
- v[2] = vec_mergeh(vecs[1], vecs[3]);
- v[3] = vec_mergel(vecs[1], vecs[3]);
- // Get the resulting vectors
- sum[0] = vec_mergeh(v[0], v[2]);
- sum[1] = vec_mergel(v[0], v[2]);
- sum[2] = vec_mergeh(v[1], v[3]);
- sum[3] = vec_mergel(v[1], v[3]);
+ Packet4i first_quarter = pload<Packet4i>(first_loader);
+ Packet4i second_quarter = pload<Packet4i>(second_loader);
+ Packet4i third_quarter = pload<Packet4i>(third_loader);
+ Packet4i fourth_quarter = pload<Packet4i>(fourth_loader);
- // Now do the summation:
- // Lines 0+1
- sum[0] = sum[0] + sum[1];
- // Lines 2+3
- sum[1] = sum[2] + sum[3];
- // Add the results
- sum[0] = sum[0] + sum[1];
+ return static_cast<__UNPACK_TYPE__(Packet)>(predux(first_quarter) + predux(second_quarter)
+ + predux(third_quarter) + predux(fourth_quarter));
+}
+
+template<> EIGEN_STRONG_INLINE signed char predux<Packet16c>(const Packet16c& a)
+{
+ return predux_size16<Packet16c>(a);
+}
- return sum[0];
+template<> EIGEN_STRONG_INLINE unsigned char predux<Packet16uc>(const Packet16uc& a)
+{
+ return predux_size16<Packet16uc>(a);
}
// Other reduction functions:
@@ -624,97 +1544,255 @@ template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
return aux[0] * aux[1] * aux[2] * aux[3];
}
+template<> EIGEN_STRONG_INLINE short int predux_mul<Packet8s>(const Packet8s& a)
+{
+ Packet8s pair, quad, octo;
+
+ pair = vec_mul(a, vec_sld(a, a, 8));
+ quad = vec_mul(pair, vec_sld(pair, pair, 4));
+ octo = vec_mul(quad, vec_sld(quad, quad, 2));
+
+ return pfirst(octo);
+}
+
+template<> EIGEN_STRONG_INLINE unsigned short int predux_mul<Packet8us>(const Packet8us& a)
+{
+ Packet8us pair, quad, octo;
+
+ pair = vec_mul(a, vec_sld(a, a, 8));
+ quad = vec_mul(pair, vec_sld(pair, pair, 4));
+ octo = vec_mul(quad, vec_sld(quad, quad, 2));
+
+ return pfirst(octo);
+}
+
+template<> EIGEN_STRONG_INLINE bfloat16 predux_mul<Packet8bf>(const Packet8bf& a)
+{
+ float redux_even = predux_mul<Packet4f>(Bf16ToF32Even(a));
+ float redux_odd = predux_mul<Packet4f>(Bf16ToF32Odd(a));
+ float f32_result = redux_even * redux_odd;
+ return bfloat16(f32_result);
+}
+
+
+template<> EIGEN_STRONG_INLINE signed char predux_mul<Packet16c>(const Packet16c& a)
+{
+ Packet16c pair, quad, octo, result;
+
+ pair = vec_mul(a, vec_sld(a, a, 8));
+ quad = vec_mul(pair, vec_sld(pair, pair, 4));
+ octo = vec_mul(quad, vec_sld(quad, quad, 2));
+ result = vec_mul(octo, vec_sld(octo, octo, 1));
+
+ return pfirst(result);
+}
+
+template<> EIGEN_STRONG_INLINE unsigned char predux_mul<Packet16uc>(const Packet16uc& a)
+{
+ Packet16uc pair, quad, octo, result;
+
+ pair = vec_mul(a, vec_sld(a, a, 8));
+ quad = vec_mul(pair, vec_sld(pair, pair, 4));
+ octo = vec_mul(quad, vec_sld(quad, quad, 2));
+ result = vec_mul(octo, vec_sld(octo, octo, 1));
+
+ return pfirst(result);
+}
+
// min
-template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
+template<typename Packet> EIGEN_STRONG_INLINE
+__UNPACK_TYPE__(Packet) predux_min4(const Packet& a)
{
- Packet4f b, res;
+ Packet b, res;
b = vec_min(a, vec_sld(a, a, 8));
res = vec_min(b, vec_sld(b, b, 4));
return pfirst(res);
}
+
+template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
+{
+ return predux_min4<Packet4f>(a);
+}
+
template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
{
- Packet4i b, res;
- b = vec_min(a, vec_sld(a, a, 8));
- res = vec_min(b, vec_sld(b, b, 4));
- return pfirst(res);
+ return predux_min4<Packet4i>(a);
+}
+
+template<> EIGEN_STRONG_INLINE bfloat16 predux_min<Packet8bf>(const Packet8bf& a)
+{
+ float redux_even = predux_min<Packet4f>(Bf16ToF32Even(a));
+ float redux_odd = predux_min<Packet4f>(Bf16ToF32Odd(a));
+ float f32_result = (std::min)(redux_even, redux_odd);
+ return bfloat16(f32_result);
}
+template<> EIGEN_STRONG_INLINE short int predux_min<Packet8s>(const Packet8s& a)
+{
+ Packet8s pair, quad, octo;
+
+ //pair = { Min(a0,a4), Min(a1,a5), Min(a2,a6), Min(a3,a7) }
+ pair = vec_min(a, vec_sld(a, a, 8));
+
+ //quad = { Min(a0, a4, a2, a6), Min(a1, a5, a3, a7) }
+ quad = vec_min(pair, vec_sld(pair, pair, 4));
+
+ //octo = { Min(a0, a4, a2, a6, a1, a5, a3, a7) }
+ octo = vec_min(quad, vec_sld(quad, quad, 2));
+ return pfirst(octo);
+}
+
+template<> EIGEN_STRONG_INLINE unsigned short int predux_min<Packet8us>(const Packet8us& a)
+{
+ Packet8us pair, quad, octo;
+
+ //pair = { Min(a0,a4), Min(a1,a5), Min(a2,a6), Min(a3,a7) }
+ pair = vec_min(a, vec_sld(a, a, 8));
+
+ //quad = { Min(a0, a4, a2, a6), Min(a1, a5, a3, a7) }
+ quad = vec_min(pair, vec_sld(pair, pair, 4));
+
+ //octo = { Min(a0, a4, a2, a6, a1, a5, a3, a7) }
+ octo = vec_min(quad, vec_sld(quad, quad, 2));
+ return pfirst(octo);
+}
+
+template<> EIGEN_STRONG_INLINE signed char predux_min<Packet16c>(const Packet16c& a)
+{
+ Packet16c pair, quad, octo, result;
+
+ pair = vec_min(a, vec_sld(a, a, 8));
+ quad = vec_min(pair, vec_sld(pair, pair, 4));
+ octo = vec_min(quad, vec_sld(quad, quad, 2));
+ result = vec_min(octo, vec_sld(octo, octo, 1));
+
+ return pfirst(result);
+}
+
+template<> EIGEN_STRONG_INLINE unsigned char predux_min<Packet16uc>(const Packet16uc& a)
+{
+ Packet16uc pair, quad, octo, result;
+
+ pair = vec_min(a, vec_sld(a, a, 8));
+ quad = vec_min(pair, vec_sld(pair, pair, 4));
+ octo = vec_min(quad, vec_sld(quad, quad, 2));
+ result = vec_min(octo, vec_sld(octo, octo, 1));
+
+ return pfirst(result);
+}
// max
-template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
+template<typename Packet> EIGEN_STRONG_INLINE __UNPACK_TYPE__(Packet) predux_max4(const Packet& a)
{
- Packet4f b, res;
+ Packet b, res;
b = vec_max(a, vec_sld(a, a, 8));
res = vec_max(b, vec_sld(b, b, 4));
return pfirst(res);
}
+template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
+{
+ return predux_max4<Packet4f>(a);
+}
+
template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
{
- Packet4i b, res;
- b = vec_max(a, vec_sld(a, a, 8));
- res = vec_max(b, vec_sld(b, b, 4));
- return pfirst(res);
+ return predux_max4<Packet4i>(a);
}
-template<int Offset>
-struct palign_impl<Offset,Packet4f>
+template<> EIGEN_STRONG_INLINE bfloat16 predux_max<Packet8bf>(const Packet8bf& a)
{
- static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
- {
-#ifdef _BIG_ENDIAN
- switch (Offset % 4) {
- case 1:
- first = vec_sld(first, second, 4); break;
- case 2:
- first = vec_sld(first, second, 8); break;
- case 3:
- first = vec_sld(first, second, 12); break;
- }
-#else
- switch (Offset % 4) {
- case 1:
- first = vec_sld(second, first, 12); break;
- case 2:
- first = vec_sld(second, first, 8); break;
- case 3:
- first = vec_sld(second, first, 4); break;
- }
-#endif
- }
-};
+ float redux_even = predux_max<Packet4f>(Bf16ToF32Even(a));
+ float redux_odd = predux_max<Packet4f>(Bf16ToF32Odd(a));
+ float f32_result = (std::max)(redux_even, redux_odd);
+ return bfloat16(f32_result);
+}
-template<int Offset>
-struct palign_impl<Offset,Packet4i>
+template<> EIGEN_STRONG_INLINE short int predux_max<Packet8s>(const Packet8s& a)
{
- static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)
- {
-#ifdef _BIG_ENDIAN
- switch (Offset % 4) {
- case 1:
- first = vec_sld(first, second, 4); break;
- case 2:
- first = vec_sld(first, second, 8); break;
- case 3:
- first = vec_sld(first, second, 12); break;
- }
-#else
- switch (Offset % 4) {
- case 1:
- first = vec_sld(second, first, 12); break;
- case 2:
- first = vec_sld(second, first, 8); break;
- case 3:
- first = vec_sld(second, first, 4); break;
- }
-#endif
- }
-};
+ Packet8s pair, quad, octo;
+
+ //pair = { Max(a0,a4), Max(a1,a5), Max(a2,a6), Max(a3,a7) }
+ pair = vec_max(a, vec_sld(a, a, 8));
+
+ //quad = { Max(a0, a4, a2, a6), Max(a1, a5, a3, a7) }
+ quad = vec_max(pair, vec_sld(pair, pair, 4));
+
+ //octo = { Max(a0, a4, a2, a6, a1, a5, a3, a7) }
+ octo = vec_max(quad, vec_sld(quad, quad, 2));
+ return pfirst(octo);
+}
+
+template<> EIGEN_STRONG_INLINE unsigned short int predux_max<Packet8us>(const Packet8us& a)
+{
+ Packet8us pair, quad, octo;
+
+ //pair = { Max(a0,a4), Max(a1,a5), Max(a2,a6), Max(a3,a7) }
+ pair = vec_max(a, vec_sld(a, a, 8));
+
+ //quad = { Max(a0, a4, a2, a6), Max(a1, a5, a3, a7) }
+ quad = vec_max(pair, vec_sld(pair, pair, 4));
+
+ //octo = { Max(a0, a4, a2, a6, a1, a5, a3, a7) }
+ octo = vec_max(quad, vec_sld(quad, quad, 2));
+ return pfirst(octo);
+}
+
+template<> EIGEN_STRONG_INLINE signed char predux_max<Packet16c>(const Packet16c& a)
+{
+ Packet16c pair, quad, octo, result;
+
+ pair = vec_max(a, vec_sld(a, a, 8));
+ quad = vec_max(pair, vec_sld(pair, pair, 4));
+ octo = vec_max(quad, vec_sld(quad, quad, 2));
+ result = vec_max(octo, vec_sld(octo, octo, 1));
+
+ return pfirst(result);
+}
+
+template<> EIGEN_STRONG_INLINE unsigned char predux_max<Packet16uc>(const Packet16uc& a)
+{
+ Packet16uc pair, quad, octo, result;
+
+ pair = vec_max(a, vec_sld(a, a, 8));
+ quad = vec_max(pair, vec_sld(pair, pair, 4));
+ octo = vec_max(quad, vec_sld(quad, quad, 2));
+ result = vec_max(octo, vec_sld(octo, octo, 1));
+
+ return pfirst(result);
+}
+
+template<> EIGEN_STRONG_INLINE bool predux_any(const Packet4f& x)
+{
+ return vec_any_ne(x, pzero(x));
+}
+
+template <typename T> EIGEN_DEVICE_FUNC inline void
+ptranpose_common(PacketBlock<T,4>& kernel){
+ T t0, t1, t2, t3;
+ t0 = vec_mergeh(kernel.packet[0], kernel.packet[2]);
+ t1 = vec_mergel(kernel.packet[0], kernel.packet[2]);
+ t2 = vec_mergeh(kernel.packet[1], kernel.packet[3]);
+ t3 = vec_mergel(kernel.packet[1], kernel.packet[3]);
+ kernel.packet[0] = vec_mergeh(t0, t2);
+ kernel.packet[1] = vec_mergel(t0, t2);
+ kernel.packet[2] = vec_mergeh(t1, t3);
+ kernel.packet[3] = vec_mergel(t1, t3);
+}
EIGEN_DEVICE_FUNC inline void
ptranspose(PacketBlock<Packet4f,4>& kernel) {
- Packet4f t0, t1, t2, t3;
+ ptranpose_common<Packet4f>(kernel);
+}
+
+EIGEN_DEVICE_FUNC inline void
+ptranspose(PacketBlock<Packet4i,4>& kernel) {
+ ptranpose_common<Packet4i>(kernel);
+}
+
+EIGEN_DEVICE_FUNC inline void
+ptranspose(PacketBlock<Packet8s,4>& kernel) {
+ Packet8s t0, t1, t2, t3;
t0 = vec_mergeh(kernel.packet[0], kernel.packet[2]);
t1 = vec_mergel(kernel.packet[0], kernel.packet[2]);
t2 = vec_mergeh(kernel.packet[1], kernel.packet[3]);
@@ -726,8 +1804,8 @@ ptranspose(PacketBlock<Packet4f,4>& kernel) {
}
EIGEN_DEVICE_FUNC inline void
-ptranspose(PacketBlock<Packet4i,4>& kernel) {
- Packet4i t0, t1, t2, t3;
+ptranspose(PacketBlock<Packet8us,4>& kernel) {
+ Packet8us t0, t1, t2, t3;
t0 = vec_mergeh(kernel.packet[0], kernel.packet[2]);
t1 = vec_mergel(kernel.packet[0], kernel.packet[2]);
t2 = vec_mergeh(kernel.packet[1], kernel.packet[3]);
@@ -738,18 +1816,440 @@ ptranspose(PacketBlock<Packet4i,4>& kernel) {
kernel.packet[3] = vec_mergel(t1, t3);
}
-template<> EIGEN_STRONG_INLINE Packet4i pblend(const Selector<4>& ifPacket, const Packet4i& thenPacket, const Packet4i& elsePacket) {
+
+EIGEN_DEVICE_FUNC inline void
+ptranspose(PacketBlock<Packet8bf,4>& kernel) {
+ Packet8us t0, t1, t2, t3;
+
+ t0 = vec_mergeh(kernel.packet[0].m_val, kernel.packet[2].m_val);
+ t1 = vec_mergel(kernel.packet[0].m_val, kernel.packet[2].m_val);
+ t2 = vec_mergeh(kernel.packet[1].m_val, kernel.packet[3].m_val);
+ t3 = vec_mergel(kernel.packet[1].m_val, kernel.packet[3].m_val);
+ kernel.packet[0] = vec_mergeh(t0, t2);
+ kernel.packet[1] = vec_mergel(t0, t2);
+ kernel.packet[2] = vec_mergeh(t1, t3);
+ kernel.packet[3] = vec_mergel(t1, t3);
+}
+
+EIGEN_DEVICE_FUNC inline void
+ptranspose(PacketBlock<Packet16c,4>& kernel) {
+ Packet16c t0, t1, t2, t3;
+ t0 = vec_mergeh(kernel.packet[0], kernel.packet[2]);
+ t1 = vec_mergel(kernel.packet[0], kernel.packet[2]);
+ t2 = vec_mergeh(kernel.packet[1], kernel.packet[3]);
+ t3 = vec_mergel(kernel.packet[1], kernel.packet[3]);
+ kernel.packet[0] = vec_mergeh(t0, t2);
+ kernel.packet[1] = vec_mergel(t0, t2);
+ kernel.packet[2] = vec_mergeh(t1, t3);
+ kernel.packet[3] = vec_mergel(t1, t3);
+}
+
+
+EIGEN_DEVICE_FUNC inline void
+ptranspose(PacketBlock<Packet16uc,4>& kernel) {
+ Packet16uc t0, t1, t2, t3;
+ t0 = vec_mergeh(kernel.packet[0], kernel.packet[2]);
+ t1 = vec_mergel(kernel.packet[0], kernel.packet[2]);
+ t2 = vec_mergeh(kernel.packet[1], kernel.packet[3]);
+ t3 = vec_mergel(kernel.packet[1], kernel.packet[3]);
+ kernel.packet[0] = vec_mergeh(t0, t2);
+ kernel.packet[1] = vec_mergel(t0, t2);
+ kernel.packet[2] = vec_mergeh(t1, t3);
+ kernel.packet[3] = vec_mergel(t1, t3);
+}
+
+EIGEN_DEVICE_FUNC inline void
+ptranspose(PacketBlock<Packet8s,8>& kernel) {
+ Packet8s v[8], sum[8];
+
+ v[0] = vec_mergeh(kernel.packet[0], kernel.packet[4]);
+ v[1] = vec_mergel(kernel.packet[0], kernel.packet[4]);
+ v[2] = vec_mergeh(kernel.packet[1], kernel.packet[5]);
+ v[3] = vec_mergel(kernel.packet[1], kernel.packet[5]);
+ v[4] = vec_mergeh(kernel.packet[2], kernel.packet[6]);
+ v[5] = vec_mergel(kernel.packet[2], kernel.packet[6]);
+ v[6] = vec_mergeh(kernel.packet[3], kernel.packet[7]);
+ v[7] = vec_mergel(kernel.packet[3], kernel.packet[7]);
+ sum[0] = vec_mergeh(v[0], v[4]);
+ sum[1] = vec_mergel(v[0], v[4]);
+ sum[2] = vec_mergeh(v[1], v[5]);
+ sum[3] = vec_mergel(v[1], v[5]);
+ sum[4] = vec_mergeh(v[2], v[6]);
+ sum[5] = vec_mergel(v[2], v[6]);
+ sum[6] = vec_mergeh(v[3], v[7]);
+ sum[7] = vec_mergel(v[3], v[7]);
+
+ kernel.packet[0] = vec_mergeh(sum[0], sum[4]);
+ kernel.packet[1] = vec_mergel(sum[0], sum[4]);
+ kernel.packet[2] = vec_mergeh(sum[1], sum[5]);
+ kernel.packet[3] = vec_mergel(sum[1], sum[5]);
+ kernel.packet[4] = vec_mergeh(sum[2], sum[6]);
+ kernel.packet[5] = vec_mergel(sum[2], sum[6]);
+ kernel.packet[6] = vec_mergeh(sum[3], sum[7]);
+ kernel.packet[7] = vec_mergel(sum[3], sum[7]);
+}
+
+EIGEN_DEVICE_FUNC inline void
+ptranspose(PacketBlock<Packet8us,8>& kernel) {
+ Packet8us v[8], sum[8];
+
+ v[0] = vec_mergeh(kernel.packet[0], kernel.packet[4]);
+ v[1] = vec_mergel(kernel.packet[0], kernel.packet[4]);
+ v[2] = vec_mergeh(kernel.packet[1], kernel.packet[5]);
+ v[3] = vec_mergel(kernel.packet[1], kernel.packet[5]);
+ v[4] = vec_mergeh(kernel.packet[2], kernel.packet[6]);
+ v[5] = vec_mergel(kernel.packet[2], kernel.packet[6]);
+ v[6] = vec_mergeh(kernel.packet[3], kernel.packet[7]);
+ v[7] = vec_mergel(kernel.packet[3], kernel.packet[7]);
+ sum[0] = vec_mergeh(v[0], v[4]);
+ sum[1] = vec_mergel(v[0], v[4]);
+ sum[2] = vec_mergeh(v[1], v[5]);
+ sum[3] = vec_mergel(v[1], v[5]);
+ sum[4] = vec_mergeh(v[2], v[6]);
+ sum[5] = vec_mergel(v[2], v[6]);
+ sum[6] = vec_mergeh(v[3], v[7]);
+ sum[7] = vec_mergel(v[3], v[7]);
+
+ kernel.packet[0] = vec_mergeh(sum[0], sum[4]);
+ kernel.packet[1] = vec_mergel(sum[0], sum[4]);
+ kernel.packet[2] = vec_mergeh(sum[1], sum[5]);
+ kernel.packet[3] = vec_mergel(sum[1], sum[5]);
+ kernel.packet[4] = vec_mergeh(sum[2], sum[6]);
+ kernel.packet[5] = vec_mergel(sum[2], sum[6]);
+ kernel.packet[6] = vec_mergeh(sum[3], sum[7]);
+ kernel.packet[7] = vec_mergel(sum[3], sum[7]);
+}
+
+EIGEN_DEVICE_FUNC inline void
+ptranspose(PacketBlock<Packet8bf,8>& kernel) {
+ Packet8bf v[8], sum[8];
+
+ v[0] = vec_mergeh(kernel.packet[0].m_val, kernel.packet[4].m_val);
+ v[1] = vec_mergel(kernel.packet[0].m_val, kernel.packet[4].m_val);
+ v[2] = vec_mergeh(kernel.packet[1].m_val, kernel.packet[5].m_val);
+ v[3] = vec_mergel(kernel.packet[1].m_val, kernel.packet[5].m_val);
+ v[4] = vec_mergeh(kernel.packet[2].m_val, kernel.packet[6].m_val);
+ v[5] = vec_mergel(kernel.packet[2].m_val, kernel.packet[6].m_val);
+ v[6] = vec_mergeh(kernel.packet[3].m_val, kernel.packet[7].m_val);
+ v[7] = vec_mergel(kernel.packet[3].m_val, kernel.packet[7].m_val);
+ sum[0] = vec_mergeh(v[0].m_val, v[4].m_val);
+ sum[1] = vec_mergel(v[0].m_val, v[4].m_val);
+ sum[2] = vec_mergeh(v[1].m_val, v[5].m_val);
+ sum[3] = vec_mergel(v[1].m_val, v[5].m_val);
+ sum[4] = vec_mergeh(v[2].m_val, v[6].m_val);
+ sum[5] = vec_mergel(v[2].m_val, v[6].m_val);
+ sum[6] = vec_mergeh(v[3].m_val, v[7].m_val);
+ sum[7] = vec_mergel(v[3].m_val, v[7].m_val);
+
+ kernel.packet[0] = vec_mergeh(sum[0].m_val, sum[4].m_val);
+ kernel.packet[1] = vec_mergel(sum[0].m_val, sum[4].m_val);
+ kernel.packet[2] = vec_mergeh(sum[1].m_val, sum[5].m_val);
+ kernel.packet[3] = vec_mergel(sum[1].m_val, sum[5].m_val);
+ kernel.packet[4] = vec_mergeh(sum[2].m_val, sum[6].m_val);
+ kernel.packet[5] = vec_mergel(sum[2].m_val, sum[6].m_val);
+ kernel.packet[6] = vec_mergeh(sum[3].m_val, sum[7].m_val);
+ kernel.packet[7] = vec_mergel(sum[3].m_val, sum[7].m_val);
+}
+
+EIGEN_DEVICE_FUNC inline void
+ptranspose(PacketBlock<Packet16c,16>& kernel) {
+ Packet16c step1[16], step2[16], step3[16];
+
+ step1[0] = vec_mergeh(kernel.packet[0], kernel.packet[8]);
+ step1[1] = vec_mergel(kernel.packet[0], kernel.packet[8]);
+ step1[2] = vec_mergeh(kernel.packet[1], kernel.packet[9]);
+ step1[3] = vec_mergel(kernel.packet[1], kernel.packet[9]);
+ step1[4] = vec_mergeh(kernel.packet[2], kernel.packet[10]);
+ step1[5] = vec_mergel(kernel.packet[2], kernel.packet[10]);
+ step1[6] = vec_mergeh(kernel.packet[3], kernel.packet[11]);
+ step1[7] = vec_mergel(kernel.packet[3], kernel.packet[11]);
+ step1[8] = vec_mergeh(kernel.packet[4], kernel.packet[12]);
+ step1[9] = vec_mergel(kernel.packet[4], kernel.packet[12]);
+ step1[10] = vec_mergeh(kernel.packet[5], kernel.packet[13]);
+ step1[11] = vec_mergel(kernel.packet[5], kernel.packet[13]);
+ step1[12] = vec_mergeh(kernel.packet[6], kernel.packet[14]);
+ step1[13] = vec_mergel(kernel.packet[6], kernel.packet[14]);
+ step1[14] = vec_mergeh(kernel.packet[7], kernel.packet[15]);
+ step1[15] = vec_mergel(kernel.packet[7], kernel.packet[15]);
+
+ step2[0] = vec_mergeh(step1[0], step1[8]);
+ step2[1] = vec_mergel(step1[0], step1[8]);
+ step2[2] = vec_mergeh(step1[1], step1[9]);
+ step2[3] = vec_mergel(step1[1], step1[9]);
+ step2[4] = vec_mergeh(step1[2], step1[10]);
+ step2[5] = vec_mergel(step1[2], step1[10]);
+ step2[6] = vec_mergeh(step1[3], step1[11]);
+ step2[7] = vec_mergel(step1[3], step1[11]);
+ step2[8] = vec_mergeh(step1[4], step1[12]);
+ step2[9] = vec_mergel(step1[4], step1[12]);
+ step2[10] = vec_mergeh(step1[5], step1[13]);
+ step2[11] = vec_mergel(step1[5], step1[13]);
+ step2[12] = vec_mergeh(step1[6], step1[14]);
+ step2[13] = vec_mergel(step1[6], step1[14]);
+ step2[14] = vec_mergeh(step1[7], step1[15]);
+ step2[15] = vec_mergel(step1[7], step1[15]);
+
+ step3[0] = vec_mergeh(step2[0], step2[8]);
+ step3[1] = vec_mergel(step2[0], step2[8]);
+ step3[2] = vec_mergeh(step2[1], step2[9]);
+ step3[3] = vec_mergel(step2[1], step2[9]);
+ step3[4] = vec_mergeh(step2[2], step2[10]);
+ step3[5] = vec_mergel(step2[2], step2[10]);
+ step3[6] = vec_mergeh(step2[3], step2[11]);
+ step3[7] = vec_mergel(step2[3], step2[11]);
+ step3[8] = vec_mergeh(step2[4], step2[12]);
+ step3[9] = vec_mergel(step2[4], step2[12]);
+ step3[10] = vec_mergeh(step2[5], step2[13]);
+ step3[11] = vec_mergel(step2[5], step2[13]);
+ step3[12] = vec_mergeh(step2[6], step2[14]);
+ step3[13] = vec_mergel(step2[6], step2[14]);
+ step3[14] = vec_mergeh(step2[7], step2[15]);
+ step3[15] = vec_mergel(step2[7], step2[15]);
+
+ kernel.packet[0] = vec_mergeh(step3[0], step3[8]);
+ kernel.packet[1] = vec_mergel(step3[0], step3[8]);
+ kernel.packet[2] = vec_mergeh(step3[1], step3[9]);
+ kernel.packet[3] = vec_mergel(step3[1], step3[9]);
+ kernel.packet[4] = vec_mergeh(step3[2], step3[10]);
+ kernel.packet[5] = vec_mergel(step3[2], step3[10]);
+ kernel.packet[6] = vec_mergeh(step3[3], step3[11]);
+ kernel.packet[7] = vec_mergel(step3[3], step3[11]);
+ kernel.packet[8] = vec_mergeh(step3[4], step3[12]);
+ kernel.packet[9] = vec_mergel(step3[4], step3[12]);
+ kernel.packet[10] = vec_mergeh(step3[5], step3[13]);
+ kernel.packet[11] = vec_mergel(step3[5], step3[13]);
+ kernel.packet[12] = vec_mergeh(step3[6], step3[14]);
+ kernel.packet[13] = vec_mergel(step3[6], step3[14]);
+ kernel.packet[14] = vec_mergeh(step3[7], step3[15]);
+ kernel.packet[15] = vec_mergel(step3[7], step3[15]);
+}
+
+EIGEN_DEVICE_FUNC inline void
+ptranspose(PacketBlock<Packet16uc,16>& kernel) {
+ Packet16uc step1[16], step2[16], step3[16];
+
+ step1[0] = vec_mergeh(kernel.packet[0], kernel.packet[8]);
+ step1[1] = vec_mergel(kernel.packet[0], kernel.packet[8]);
+ step1[2] = vec_mergeh(kernel.packet[1], kernel.packet[9]);
+ step1[3] = vec_mergel(kernel.packet[1], kernel.packet[9]);
+ step1[4] = vec_mergeh(kernel.packet[2], kernel.packet[10]);
+ step1[5] = vec_mergel(kernel.packet[2], kernel.packet[10]);
+ step1[6] = vec_mergeh(kernel.packet[3], kernel.packet[11]);
+ step1[7] = vec_mergel(kernel.packet[3], kernel.packet[11]);
+ step1[8] = vec_mergeh(kernel.packet[4], kernel.packet[12]);
+ step1[9] = vec_mergel(kernel.packet[4], kernel.packet[12]);
+ step1[10] = vec_mergeh(kernel.packet[5], kernel.packet[13]);
+ step1[11] = vec_mergel(kernel.packet[5], kernel.packet[13]);
+ step1[12] = vec_mergeh(kernel.packet[6], kernel.packet[14]);
+ step1[13] = vec_mergel(kernel.packet[6], kernel.packet[14]);
+ step1[14] = vec_mergeh(kernel.packet[7], kernel.packet[15]);
+ step1[15] = vec_mergel(kernel.packet[7], kernel.packet[15]);
+
+ step2[0] = vec_mergeh(step1[0], step1[8]);
+ step2[1] = vec_mergel(step1[0], step1[8]);
+ step2[2] = vec_mergeh(step1[1], step1[9]);
+ step2[3] = vec_mergel(step1[1], step1[9]);
+ step2[4] = vec_mergeh(step1[2], step1[10]);
+ step2[5] = vec_mergel(step1[2], step1[10]);
+ step2[6] = vec_mergeh(step1[3], step1[11]);
+ step2[7] = vec_mergel(step1[3], step1[11]);
+ step2[8] = vec_mergeh(step1[4], step1[12]);
+ step2[9] = vec_mergel(step1[4], step1[12]);
+ step2[10] = vec_mergeh(step1[5], step1[13]);
+ step2[11] = vec_mergel(step1[5], step1[13]);
+ step2[12] = vec_mergeh(step1[6], step1[14]);
+ step2[13] = vec_mergel(step1[6], step1[14]);
+ step2[14] = vec_mergeh(step1[7], step1[15]);
+ step2[15] = vec_mergel(step1[7], step1[15]);
+
+ step3[0] = vec_mergeh(step2[0], step2[8]);
+ step3[1] = vec_mergel(step2[0], step2[8]);
+ step3[2] = vec_mergeh(step2[1], step2[9]);
+ step3[3] = vec_mergel(step2[1], step2[9]);
+ step3[4] = vec_mergeh(step2[2], step2[10]);
+ step3[5] = vec_mergel(step2[2], step2[10]);
+ step3[6] = vec_mergeh(step2[3], step2[11]);
+ step3[7] = vec_mergel(step2[3], step2[11]);
+ step3[8] = vec_mergeh(step2[4], step2[12]);
+ step3[9] = vec_mergel(step2[4], step2[12]);
+ step3[10] = vec_mergeh(step2[5], step2[13]);
+ step3[11] = vec_mergel(step2[5], step2[13]);
+ step3[12] = vec_mergeh(step2[6], step2[14]);
+ step3[13] = vec_mergel(step2[6], step2[14]);
+ step3[14] = vec_mergeh(step2[7], step2[15]);
+ step3[15] = vec_mergel(step2[7], step2[15]);
+
+ kernel.packet[0] = vec_mergeh(step3[0], step3[8]);
+ kernel.packet[1] = vec_mergel(step3[0], step3[8]);
+ kernel.packet[2] = vec_mergeh(step3[1], step3[9]);
+ kernel.packet[3] = vec_mergel(step3[1], step3[9]);
+ kernel.packet[4] = vec_mergeh(step3[2], step3[10]);
+ kernel.packet[5] = vec_mergel(step3[2], step3[10]);
+ kernel.packet[6] = vec_mergeh(step3[3], step3[11]);
+ kernel.packet[7] = vec_mergel(step3[3], step3[11]);
+ kernel.packet[8] = vec_mergeh(step3[4], step3[12]);
+ kernel.packet[9] = vec_mergel(step3[4], step3[12]);
+ kernel.packet[10] = vec_mergeh(step3[5], step3[13]);
+ kernel.packet[11] = vec_mergel(step3[5], step3[13]);
+ kernel.packet[12] = vec_mergeh(step3[6], step3[14]);
+ kernel.packet[13] = vec_mergel(step3[6], step3[14]);
+ kernel.packet[14] = vec_mergeh(step3[7], step3[15]);
+ kernel.packet[15] = vec_mergel(step3[7], step3[15]);
+}
+
+template<typename Packet> EIGEN_STRONG_INLINE
+Packet pblend4(const Selector<4>& ifPacket, const Packet& thenPacket, const Packet& elsePacket) {
Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3] };
Packet4ui mask = reinterpret_cast<Packet4ui>(vec_cmpeq(reinterpret_cast<Packet4ui>(select), reinterpret_cast<Packet4ui>(p4i_ONE)));
return vec_sel(elsePacket, thenPacket, mask);
}
+template<> EIGEN_STRONG_INLINE Packet4i pblend(const Selector<4>& ifPacket, const Packet4i& thenPacket, const Packet4i& elsePacket) {
+ return pblend4<Packet4i>(ifPacket, thenPacket, elsePacket);
+}
+
template<> EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, const Packet4f& thenPacket, const Packet4f& elsePacket) {
- Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3] };
- Packet4ui mask = reinterpret_cast<Packet4ui>(vec_cmpeq(reinterpret_cast<Packet4ui>(select), reinterpret_cast<Packet4ui>(p4i_ONE)));
+ return pblend4<Packet4f>(ifPacket, thenPacket, elsePacket);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8s pblend(const Selector<8>& ifPacket, const Packet8s& thenPacket, const Packet8s& elsePacket) {
+ Packet8us select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3],
+ ifPacket.select[4], ifPacket.select[5], ifPacket.select[6], ifPacket.select[7] };
+ Packet8us mask = reinterpret_cast<Packet8us>(vec_cmpeq(select, p8us_ONE));
+ Packet8s result = vec_sel(elsePacket, thenPacket, mask);
+ return result;
+}
+
+template<> EIGEN_STRONG_INLINE Packet8us pblend(const Selector<8>& ifPacket, const Packet8us& thenPacket, const Packet8us& elsePacket) {
+ Packet8us select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3],
+ ifPacket.select[4], ifPacket.select[5], ifPacket.select[6], ifPacket.select[7] };
+ Packet8us mask = reinterpret_cast<Packet8us>(vec_cmpeq(reinterpret_cast<Packet8us>(select), p8us_ONE));
+ return vec_sel(elsePacket, thenPacket, mask);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf pblend(const Selector<8>& ifPacket, const Packet8bf& thenPacket, const Packet8bf& elsePacket) {
+ return pblend<Packet8us>(ifPacket, thenPacket, elsePacket);
+}
+
+template<> EIGEN_STRONG_INLINE Packet16c pblend(const Selector<16>& ifPacket, const Packet16c& thenPacket, const Packet16c& elsePacket) {
+ Packet16uc select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3],
+ ifPacket.select[4], ifPacket.select[5], ifPacket.select[6], ifPacket.select[7],
+ ifPacket.select[8], ifPacket.select[9], ifPacket.select[10], ifPacket.select[11],
+ ifPacket.select[12], ifPacket.select[13], ifPacket.select[14], ifPacket.select[15] };
+
+ Packet16uc mask = reinterpret_cast<Packet16uc>(vec_cmpeq(reinterpret_cast<Packet16uc>(select), p16uc_ONE));
+ return vec_sel(elsePacket, thenPacket, mask);
+}
+
+template<> EIGEN_STRONG_INLINE Packet16uc pblend(const Selector<16>& ifPacket, const Packet16uc& thenPacket, const Packet16uc& elsePacket) {
+ Packet16uc select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3],
+ ifPacket.select[4], ifPacket.select[5], ifPacket.select[6], ifPacket.select[7],
+ ifPacket.select[8], ifPacket.select[9], ifPacket.select[10], ifPacket.select[11],
+ ifPacket.select[12], ifPacket.select[13], ifPacket.select[14], ifPacket.select[15] };
+
+ Packet16uc mask = reinterpret_cast<Packet16uc>(vec_cmpeq(reinterpret_cast<Packet16uc>(select), p16uc_ONE));
return vec_sel(elsePacket, thenPacket, mask);
}
+template <>
+struct type_casting_traits<float, int> {
+ enum {
+ VectorizedCast = 1,
+ SrcCoeffRatio = 1,
+ TgtCoeffRatio = 1
+ };
+};
+
+template <>
+struct type_casting_traits<int, float> {
+ enum {
+ VectorizedCast = 1,
+ SrcCoeffRatio = 1,
+ TgtCoeffRatio = 1
+ };
+};
+
+template <>
+struct type_casting_traits<bfloat16, unsigned short int> {
+ enum {
+ VectorizedCast = 1,
+ SrcCoeffRatio = 1,
+ TgtCoeffRatio = 1
+ };
+};
+
+template <>
+struct type_casting_traits<unsigned short int, bfloat16> {
+ enum {
+ VectorizedCast = 1,
+ SrcCoeffRatio = 1,
+ TgtCoeffRatio = 1
+ };
+};
+
+template<> EIGEN_STRONG_INLINE Packet4i pcast<Packet4f, Packet4i>(const Packet4f& a) {
+ return vec_cts(a,0);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4ui pcast<Packet4f, Packet4ui>(const Packet4f& a) {
+ return vec_ctu(a,0);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pcast<Packet4i, Packet4f>(const Packet4i& a) {
+ return vec_ctf(a,0);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pcast<Packet4ui, Packet4f>(const Packet4ui& a) {
+ return vec_ctf(a,0);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8us pcast<Packet8bf, Packet8us>(const Packet8bf& a) {
+ Packet4f float_even = Bf16ToF32Even(a);
+ Packet4f float_odd = Bf16ToF32Odd(a);
+ Packet4ui int_even = pcast<Packet4f, Packet4ui>(float_even);
+ Packet4ui int_odd = pcast<Packet4f, Packet4ui>(float_odd);
+ const _EIGEN_DECLARE_CONST_FAST_Packet4ui(low_mask, 0x0000FFFF);
+ Packet4ui low_even = pand<Packet4ui>(int_even, p4ui_low_mask);
+ Packet4ui low_odd = pand<Packet4ui>(int_odd, p4ui_low_mask);
+
+ //Check values that are bigger than USHRT_MAX (0xFFFF)
+ Packet4bi overflow_selector;
+ if(vec_any_gt(int_even, p4ui_low_mask)){
+ overflow_selector = vec_cmpgt(int_even, p4ui_low_mask);
+ low_even = vec_sel(low_even, p4ui_low_mask, overflow_selector);
+ }
+ if(vec_any_gt(int_odd, p4ui_low_mask)){
+ overflow_selector = vec_cmpgt(int_odd, p4ui_low_mask);
+ low_odd = vec_sel(low_even, p4ui_low_mask, overflow_selector);
+ }
+
+ low_odd = plogical_shift_left<16>(low_odd);
+
+ Packet4ui int_final = por<Packet4ui>(low_even, low_odd);
+ return reinterpret_cast<Packet8us>(int_final);
+}
+
+template<> EIGEN_STRONG_INLINE Packet8bf pcast<Packet8us, Packet8bf>(const Packet8us& a) {
+ //short -> int -> float -> bfloat16
+ const _EIGEN_DECLARE_CONST_FAST_Packet4ui(low_mask, 0x0000FFFF);
+ Packet4ui int_cast = reinterpret_cast<Packet4ui>(a);
+ Packet4ui int_even = pand<Packet4ui>(int_cast, p4ui_low_mask);
+ Packet4ui int_odd = plogical_shift_right<16>(int_cast);
+ Packet4f float_even = pcast<Packet4ui, Packet4f>(int_even);
+ Packet4f float_odd = pcast<Packet4ui, Packet4f>(int_odd);
+ return F32ToBf16(float_even, float_odd);
+}
+
+
+template<> EIGEN_STRONG_INLINE Packet4i preinterpret<Packet4i,Packet4f>(const Packet4f& a) {
+ return reinterpret_cast<Packet4i>(a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f preinterpret<Packet4f,Packet4i>(const Packet4i& a) {
+ return reinterpret_cast<Packet4f>(a);
+}
+
+
//---------- double ----------
#ifdef __VSX__
@@ -764,9 +2264,12 @@ typedef __vector __bool long Packet2bl;
static Packet2l p2l_ONE = { 1, 1 };
static Packet2l p2l_ZERO = reinterpret_cast<Packet2l>(p4i_ZERO);
-static Packet2d p2d_ONE = { 1.0, 1.0 };
+static Packet2ul p2ul_SIGN = { 0x8000000000000000ull, 0x8000000000000000ull };
+static Packet2ul p2ul_PREV0DOT5 = { 0x3FDFFFFFFFFFFFFFull, 0x3FDFFFFFFFFFFFFFull };
+static Packet2d p2d_ONE = { 1.0, 1.0 };
static Packet2d p2d_ZERO = reinterpret_cast<Packet2d>(p4f_ZERO);
-static Packet2d p2d_MZERO = { -0.0, -0.0 };
+static Packet2d p2d_MZERO = { numext::bit_cast<double>(0x8000000000000000ull),
+ numext::bit_cast<double>(0x8000000000000000ull) };
#ifdef _BIG_ENDIAN
static Packet2d p2d_COUNTDOWN = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4f>(p2d_ZERO), reinterpret_cast<Packet4f>(p2d_ONE), 8));
@@ -774,16 +2277,9 @@ static Packet2d p2d_COUNTDOWN = reinterpret_cast<Packet2d>(vec_sld(reinterpret_c
static Packet2d p2d_COUNTDOWN = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4f>(p2d_ONE), reinterpret_cast<Packet4f>(p2d_ZERO), 8));
#endif
-template<int index> Packet2d vec_splat_dbl(Packet2d& a);
-
-template<> EIGEN_STRONG_INLINE Packet2d vec_splat_dbl<0>(Packet2d& a)
-{
- return reinterpret_cast<Packet2d>(vec_perm(a, a, p16uc_PSET64_HI));
-}
-
-template<> EIGEN_STRONG_INLINE Packet2d vec_splat_dbl<1>(Packet2d& a)
+template<int index> Packet2d vec_splat_dbl(Packet2d& a)
{
- return reinterpret_cast<Packet2d>(vec_perm(a, a, p16uc_PSET64_LO));
+ return vec_splat(a, index);
}
template<> struct packet_traits<double> : default_packet_traits
@@ -812,12 +2308,13 @@ template<> struct packet_traits<double> : default_packet_traits
HasRound = 1,
HasFloor = 1,
HasCeil = 1,
+ HasRint = 1,
HasNegate = 1,
HasBlend = 1
};
};
-template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2, alignment=Aligned16}; typedef Packet2d half; };
+template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet2d half; };
inline std::ostream & operator <<(std::ostream & s, const Packet2l & v)
{
@@ -845,21 +2342,13 @@ inline std::ostream & operator <<(std::ostream & s, const Packet2d & v)
template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from)
{
EIGEN_DEBUG_ALIGNED_LOAD
-#ifdef __VSX__
- return vec_vsx_ld(0, from);
-#else
- return vec_ld(0, from);
-#endif
+ return vec_xl(0, const_cast<double *>(from)); // cast needed by Clang
}
template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from)
{
EIGEN_DEBUG_ALIGNED_STORE
-#ifdef __VSX__
- vec_vsx_st(from, 0, to);
-#else
- vec_st(from, 0, to);
-#endif
+ vec_xst(from, 0, to);
}
template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) {
@@ -867,28 +2356,32 @@ template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) {
return v;
}
+template<> EIGEN_STRONG_INLINE Packet2d pset1frombits<Packet2d>(unsigned long from) {
+ Packet2l v = {static_cast<long long>(from), static_cast<long long>(from)};
+ return reinterpret_cast<Packet2d>(v);
+}
+
template<> EIGEN_STRONG_INLINE void
pbroadcast4<Packet2d>(const double *a,
Packet2d& a0, Packet2d& a1, Packet2d& a2, Packet2d& a3)
{
- a1 = pload<Packet2d>(a);
- a0 = vec_splat_dbl<0>(a1);
- a1 = vec_splat_dbl<1>(a1);
- a3 = pload<Packet2d>(a+2);
- a2 = vec_splat_dbl<0>(a3);
- a3 = vec_splat_dbl<1>(a3);
+ //This way is faster than vec_splat (at least for doubles in Power 9)
+ a0 = pset1<Packet2d>(a[0]);
+ a1 = pset1<Packet2d>(a[1]);
+ a2 = pset1<Packet2d>(a[2]);
+ a3 = pset1<Packet2d>(a[3]);
}
template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, Index stride)
{
- double EIGEN_ALIGN16 af[2];
+ EIGEN_ALIGN16 double af[2];
af[0] = from[0*stride];
af[1] = from[1*stride];
return pload<Packet2d>(af);
}
template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, Index stride)
{
- double EIGEN_ALIGN16 af[2];
+ EIGEN_ALIGN16 double af[2];
pstore<double>(af, from);
to[0*stride] = af[0];
to[1*stride] = af[1];
@@ -910,9 +2403,29 @@ template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const
// for some weird raisons, it has to be overloaded for packet of integers
template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return vec_madd(a, b, c); }
-template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_min(a, b); }
+template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b)
+{
+ // NOTE: about 10% slower than vec_min, but consistent with std::min and SSE regarding NaN
+ Packet2d ret;
+ __asm__ ("xvcmpgedp %x0,%x1,%x2\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b));
+ return ret;
+ }
-template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_max(a, b); }
+template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b)
+{
+ // NOTE: about 10% slower than vec_max, but consistent with std::max and SSE regarding NaN
+ Packet2d ret;
+ __asm__ ("xvcmpgtdp %x0,%x2,%x1\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b));
+ return ret;
+}
+
+template<> EIGEN_STRONG_INLINE Packet2d pcmp_le(const Packet2d& a, const Packet2d& b) { return reinterpret_cast<Packet2d>(vec_cmple(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet2d pcmp_lt(const Packet2d& a, const Packet2d& b) { return reinterpret_cast<Packet2d>(vec_cmplt(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet2d pcmp_eq(const Packet2d& a, const Packet2d& b) { return reinterpret_cast<Packet2d>(vec_cmpeq(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet2d pcmp_lt_or_nan(const Packet2d& a, const Packet2d& b) {
+ Packet2d c = reinterpret_cast<Packet2d>(vec_cmpge(a,b));
+ return vec_nor(c,c);
+}
template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_and(a, b); }
@@ -922,14 +2435,34 @@ template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const
template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_and(a, vec_nor(b, b)); }
-template<> EIGEN_STRONG_INLINE Packet2d pround<Packet2d>(const Packet2d& a) { return vec_round(a); }
+template<> EIGEN_STRONG_INLINE Packet2d pround<Packet2d>(const Packet2d& a)
+{
+ Packet2d t = vec_add(reinterpret_cast<Packet2d>(vec_or(vec_and(reinterpret_cast<Packet2ul>(a), p2ul_SIGN), p2ul_PREV0DOT5)), a);
+ Packet2d res;
+
+ __asm__("xvrdpiz %x0, %x1\n\t"
+ : "=&wa" (res)
+ : "wa" (t));
+
+ return res;
+}
template<> EIGEN_STRONG_INLINE Packet2d pceil<Packet2d>(const Packet2d& a) { return vec_ceil(a); }
template<> EIGEN_STRONG_INLINE Packet2d pfloor<Packet2d>(const Packet2d& a) { return vec_floor(a); }
+template<> EIGEN_STRONG_INLINE Packet2d print<Packet2d>(const Packet2d& a)
+{
+ Packet2d res;
+
+ __asm__("xvrdpic %x0, %x1\n\t"
+ : "=&wa" (res)
+ : "wa" (a));
+
+ return res;
+}
template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from)
{
- EIGEN_DEBUG_ALIGNED_LOAD
- return (Packet2d) vec_vsx_ld((long)from & 15, (const double*) _EIGEN_ALIGNED_PTR(from));
+ EIGEN_DEBUG_UNALIGNED_LOAD
+ return vec_xl(0, const_cast<double*>(from));
}
template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double* from)
@@ -942,13 +2475,13 @@ template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double* from)
template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from)
{
- EIGEN_DEBUG_ALIGNED_STORE
- vec_vsx_st((Packet4f)from, (long)to & 15, (float*) _EIGEN_ALIGNED_PTR(to));
+ EIGEN_DEBUG_UNALIGNED_STORE
+ vec_xst(from, 0, to);
}
template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { EIGEN_PPC_PREFETCH(addr); }
-template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { double EIGEN_ALIGN16 x[2]; pstore<double>(x, a); return x[0]; }
+template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { EIGEN_ALIGN16 double x[2]; pstore<double>(x, a); return x[0]; }
template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a)
{
@@ -956,6 +2489,177 @@ template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a)
}
template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a) { return vec_abs(a); }
+// VSX support varies between different compilers and even different
+// versions of the same compiler. For gcc version >= 4.9.3, we can use
+// vec_cts to efficiently convert Packet2d to Packet2l. Otherwise, use
+// a slow version that works with older compilers.
+// Update: apparently vec_cts/vec_ctf intrinsics for 64-bit doubles
+// are buggy, https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70963
+template<>
+inline Packet2l pcast<Packet2d, Packet2l>(const Packet2d& x) {
+#if EIGEN_GNUC_AT_LEAST(5, 4) || \
+ (EIGEN_GNUC_AT(6, 1) && __GNUC_PATCHLEVEL__ >= 1)
+ return vec_cts(x, 0); // TODO: check clang version.
+#else
+ double tmp[2];
+ memcpy(tmp, &x, sizeof(tmp));
+ Packet2l l = { static_cast<long long>(tmp[0]),
+ static_cast<long long>(tmp[1]) };
+ return l;
+#endif
+}
+
+template<>
+inline Packet2d pcast<Packet2l, Packet2d>(const Packet2l& x) {
+ unsigned long long tmp[2];
+ memcpy(tmp, &x, sizeof(tmp));
+ Packet2d d = { static_cast<double>(tmp[0]),
+ static_cast<double>(tmp[1]) };
+ return d;
+}
+
+
+// Packet2l shifts.
+// For POWER8 we simply use vec_sr/l.
+//
+// Things are more complicated for POWER7. There is actually a
+// vec_xxsxdi intrinsic but it is not supported by some gcc versions.
+// So we need to shift by N % 32 and rearrage bytes.
+#ifdef __POWER8_VECTOR__
+
+template<int N>
+EIGEN_STRONG_INLINE Packet2l plogical_shift_left(const Packet2l& a) {
+ const Packet2ul shift = { N, N };
+ return vec_sl(a, shift);
+}
+
+template<int N>
+EIGEN_STRONG_INLINE Packet2l plogical_shift_right(const Packet2l& a) {
+ const Packet2ul shift = { N, N };
+ return vec_sr(a, shift);
+}
+
+#else
+
+// Shifts [A, B, C, D] to [B, 0, D, 0].
+// Used to implement left shifts for Packet2l.
+EIGEN_ALWAYS_INLINE Packet4i shift_even_left(const Packet4i& a) {
+ static const Packet16uc perm = {
+ 0x14, 0x15, 0x16, 0x17, 0x00, 0x01, 0x02, 0x03,
+ 0x1c, 0x1d, 0x1e, 0x1f, 0x08, 0x09, 0x0a, 0x0b };
+ #ifdef _BIG_ENDIAN
+ return vec_perm(p4i_ZERO, a, perm);
+ #else
+ return vec_perm(a, p4i_ZERO, perm);
+ #endif
+}
+
+// Shifts [A, B, C, D] to [0, A, 0, C].
+// Used to implement right shifts for Packet2l.
+EIGEN_ALWAYS_INLINE Packet4i shift_odd_right(const Packet4i& a) {
+ static const Packet16uc perm = {
+ 0x04, 0x05, 0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
+ 0x0c, 0x0d, 0x0e, 0x0f, 0x18, 0x19, 0x1a, 0x1b };
+ #ifdef _BIG_ENDIAN
+ return vec_perm(p4i_ZERO, a, perm);
+ #else
+ return vec_perm(a, p4i_ZERO, perm);
+ #endif
+}
+
+template<int N, typename EnableIf = void>
+struct plogical_shift_left_impl;
+
+template<int N>
+struct plogical_shift_left_impl<N, typename enable_if<(N < 32) && (N >= 0)>::type> {
+ static EIGEN_STRONG_INLINE Packet2l run(const Packet2l& a) {
+ static const unsigned n = static_cast<unsigned>(N);
+ const Packet4ui shift = {n, n, n, n};
+ const Packet4i ai = reinterpret_cast<Packet4i>(a);
+ static const unsigned m = static_cast<unsigned>(32 - N);
+ const Packet4ui shift_right = {m, m, m, m};
+ const Packet4i out_hi = vec_sl(ai, shift);
+ const Packet4i out_lo = shift_even_left(vec_sr(ai, shift_right));
+ return reinterpret_cast<Packet2l>(por<Packet4i>(out_hi, out_lo));
+ }
+};
+
+template<int N>
+struct plogical_shift_left_impl<N, typename enable_if<(N >= 32)>::type> {
+ static EIGEN_STRONG_INLINE Packet2l run(const Packet2l& a) {
+ static const unsigned m = static_cast<unsigned>(N - 32);
+ const Packet4ui shift = {m, m, m, m};
+ const Packet4i ai = reinterpret_cast<Packet4i>(a);
+ return reinterpret_cast<Packet2l>(shift_even_left(vec_sl(ai, shift)));
+ }
+};
+
+template<int N>
+EIGEN_STRONG_INLINE Packet2l plogical_shift_left(const Packet2l& a) {
+ return plogical_shift_left_impl<N>::run(a);
+}
+
+template<int N, typename EnableIf = void>
+struct plogical_shift_right_impl;
+
+template<int N>
+struct plogical_shift_right_impl<N, typename enable_if<(N < 32) && (N >= 0)>::type> {
+ static EIGEN_STRONG_INLINE Packet2l run(const Packet2l& a) {
+ static const unsigned n = static_cast<unsigned>(N);
+ const Packet4ui shift = {n, n, n, n};
+ const Packet4i ai = reinterpret_cast<Packet4i>(a);
+ static const unsigned m = static_cast<unsigned>(32 - N);
+ const Packet4ui shift_left = {m, m, m, m};
+ const Packet4i out_lo = vec_sr(ai, shift);
+ const Packet4i out_hi = shift_odd_right(vec_sl(ai, shift_left));
+ return reinterpret_cast<Packet2l>(por<Packet4i>(out_hi, out_lo));
+ }
+};
+
+template<int N>
+struct plogical_shift_right_impl<N, typename enable_if<(N >= 32)>::type> {
+ static EIGEN_STRONG_INLINE Packet2l run(const Packet2l& a) {
+ static const unsigned m = static_cast<unsigned>(N - 32);
+ const Packet4ui shift = {m, m, m, m};
+ const Packet4i ai = reinterpret_cast<Packet4i>(a);
+ return reinterpret_cast<Packet2l>(shift_odd_right(vec_sr(ai, shift)));
+ }
+};
+
+template<int N>
+EIGEN_STRONG_INLINE Packet2l plogical_shift_right(const Packet2l& a) {
+ return plogical_shift_right_impl<N>::run(a);
+}
+#endif
+
+template<> EIGEN_STRONG_INLINE Packet2d pldexp<Packet2d>(const Packet2d& a, const Packet2d& exponent) {
+ // Clamp exponent to [-2099, 2099]
+ const Packet2d max_exponent = pset1<Packet2d>(2099.0);
+ const Packet2l e = pcast<Packet2d, Packet2l>(pmin(pmax(exponent, pnegate(max_exponent)), max_exponent));
+
+ // Split 2^e into four factors and multiply:
+ const Packet2l bias = { 1023, 1023 };
+ Packet2l b = plogical_shift_right<2>(e); // floor(e/4)
+ Packet2d c = reinterpret_cast<Packet2d>(plogical_shift_left<52>(b + bias));
+ Packet2d out = pmul(pmul(pmul(a, c), c), c); // a * 2^(3b)
+ b = psub(psub(psub(e, b), b), b); // e - 3b
+ c = reinterpret_cast<Packet2d>(plogical_shift_left<52>(b + bias)); // 2^(e - 3b)
+ out = pmul(out, c); // a * 2^e
+ return out;
+}
+
+
+// Extract exponent without existence of Packet2l.
+template<>
+EIGEN_STRONG_INLINE
+Packet2d pfrexp_generic_get_biased_exponent(const Packet2d& a) {
+ return pcast<Packet2l, Packet2d>(plogical_shift_right<52>(reinterpret_cast<Packet2l>(pabs(a))));
+}
+
+template<> EIGEN_STRONG_INLINE Packet2d pfrexp<Packet2d> (const Packet2d& a, Packet2d& exponent) {
+ return pfrexp_generic(a, exponent);
+}
+
template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a)
{
Packet2d b, sum;
@@ -964,20 +2668,6 @@ template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a)
return pfirst<Packet2d>(sum);
}
-template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
-{
- Packet2d v[2], sum;
- v[0] = vecs[0] + reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4f>(vecs[0]), reinterpret_cast<Packet4f>(vecs[0]), 8));
- v[1] = vecs[1] + reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4f>(vecs[1]), reinterpret_cast<Packet4f>(vecs[1]), 8));
-
-#ifdef _BIG_ENDIAN
- sum = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4f>(v[0]), reinterpret_cast<Packet4f>(v[1]), 8));
-#else
- sum = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4f>(v[1]), reinterpret_cast<Packet4f>(v[0]), 8));
-#endif
-
- return sum;
-}
// Other reduction functions:
// mul
template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a)
@@ -997,20 +2687,6 @@ template<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a)
return pfirst(pmax(a, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4ui>(a), reinterpret_cast<Packet4ui>(a), 8))));
}
-template<int Offset>
-struct palign_impl<Offset,Packet2d>
-{
- static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)
- {
- if (Offset == 1)
-#ifdef _BIG_ENDIAN
- first = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4ui>(first), reinterpret_cast<Packet4ui>(second), 8));
-#else
- first = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4ui>(second), reinterpret_cast<Packet4ui>(first), 8));
-#endif
- }
-};
-
EIGEN_DEVICE_FUNC inline void
ptranspose(PacketBlock<Packet2d,2>& kernel) {
Packet2d t0, t1;
@@ -1022,9 +2698,11 @@ ptranspose(PacketBlock<Packet2d,2>& kernel) {
template<> EIGEN_STRONG_INLINE Packet2d pblend(const Selector<2>& ifPacket, const Packet2d& thenPacket, const Packet2d& elsePacket) {
Packet2l select = { ifPacket.select[0], ifPacket.select[1] };
- Packet2bl mask = vec_cmpeq(reinterpret_cast<Packet2d>(select), reinterpret_cast<Packet2d>(p2l_ONE));
+ Packet2bl mask = reinterpret_cast<Packet2bl>( vec_cmpeq(reinterpret_cast<Packet2d>(select), reinterpret_cast<Packet2d>(p2l_ONE)) );
return vec_sel(elsePacket, thenPacket, mask);
}
+
+
#endif // __VSX__
} // end namespace internal
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/CUDA/Complex.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/CUDA/Complex.h
index 57d1201f4..45f6ddb94 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/arch/CUDA/Complex.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/CUDA/Complex.h
@@ -2,6 +2,7 @@
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+// Copyright (C) 2021 C. Antonio Sanchez <cantonios@google.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -10,94 +11,259 @@
#ifndef EIGEN_COMPLEX_CUDA_H
#define EIGEN_COMPLEX_CUDA_H
-// clang-format off
+// Many std::complex methods such as operator+, operator-, operator* and
+// operator/ are not constexpr. Due to this, GCC and older versions of clang do
+// not treat them as device functions and thus Eigen functors making use of
+// these operators fail to compile. Here, we manually specialize these
+// operators and functors for complex types when building for CUDA to enable
+// their use on-device.
+//
+// NOTES:
+// - Compound assignment operators +=,-=,*=,/=(Scalar) will not work on device,
+// since they are already specialized in the standard. Using them will result
+// in silent kernel failures.
+// - Compiling with MSVC and using +=,-=,*=,/=(std::complex<Scalar>) will lead
+// to duplicate definition errors, since these are already specialized in
+// Visual Studio's <complex> header (contrary to the standard). This is
+// preferable to removing such definitions, which will lead to silent kernel
+// failures.
+// - Compiling with ICC requires defining _USE_COMPLEX_SPECIALIZATION_ prior
+// to the first inclusion of <complex>.
+
+#if defined(EIGEN_CUDACC) && defined(EIGEN_GPU_COMPILE_PHASE)
+
+// ICC already specializes std::complex<float> and std::complex<double>
+// operators, preventing us from making them device functions here.
+// This will lead to silent runtime errors if the operators are used on device.
+//
+// To allow std::complex operator use on device, define _OVERRIDE_COMPLEX_SPECIALIZATION_
+// prior to first inclusion of <complex>. This prevents ICC from adding
+// its own specializations, so our custom ones below can be used instead.
+#if !(defined(EIGEN_COMP_ICC) && defined(_USE_COMPLEX_SPECIALIZATION_))
+
+// Import Eigen's internal operator specializations.
+#define EIGEN_USING_STD_COMPLEX_OPERATORS \
+ using Eigen::complex_operator_detail::operator+; \
+ using Eigen::complex_operator_detail::operator-; \
+ using Eigen::complex_operator_detail::operator*; \
+ using Eigen::complex_operator_detail::operator/; \
+ using Eigen::complex_operator_detail::operator+=; \
+ using Eigen::complex_operator_detail::operator-=; \
+ using Eigen::complex_operator_detail::operator*=; \
+ using Eigen::complex_operator_detail::operator/=; \
+ using Eigen::complex_operator_detail::operator==; \
+ using Eigen::complex_operator_detail::operator!=;
namespace Eigen {
-namespace internal {
+// Specialized std::complex overloads.
+namespace complex_operator_detail {
-#if defined(EIGEN_CUDACC) && defined(EIGEN_USE_GPU)
+template<typename T>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+std::complex<T> complex_multiply(const std::complex<T>& a, const std::complex<T>& b) {
+ const T a_real = numext::real(a);
+ const T a_imag = numext::imag(a);
+ const T b_real = numext::real(b);
+ const T b_imag = numext::imag(b);
+ return std::complex<T>(
+ a_real * b_real - a_imag * b_imag,
+ a_imag * b_real + a_real * b_imag);
+}
-// Many std::complex methods such as operator+, operator-, operator* and
-// operator/ are not constexpr. Due to this, clang does not treat them as device
-// functions and thus Eigen functors making use of these operators fail to
-// compile. Here, we manually specialize these functors for complex types when
-// building for CUDA to avoid non-constexpr methods.
-
-// Sum
-template<typename T> struct scalar_sum_op<const std::complex<T>, const std::complex<T> > : binary_op_base<const std::complex<T>, const std::complex<T> > {
- typedef typename std::complex<T> result_type;
-
- EIGEN_EMPTY_STRUCT_CTOR(scalar_sum_op)
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::complex<T> operator() (const std::complex<T>& a, const std::complex<T>& b) const {
- return std::complex<T>(numext::real(a) + numext::real(b),
- numext::imag(a) + numext::imag(b));
- }
-};
-
-template<typename T> struct scalar_sum_op<std::complex<T>, std::complex<T> > : scalar_sum_op<const std::complex<T>, const std::complex<T> > {};
-
-
-// Difference
-template<typename T> struct scalar_difference_op<const std::complex<T>, const std::complex<T> > : binary_op_base<const std::complex<T>, const std::complex<T> > {
- typedef typename std::complex<T> result_type;
-
- EIGEN_EMPTY_STRUCT_CTOR(scalar_difference_op)
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::complex<T> operator() (const std::complex<T>& a, const std::complex<T>& b) const {
- return std::complex<T>(numext::real(a) - numext::real(b),
- numext::imag(a) - numext::imag(b));
- }
-};
-
-template<typename T> struct scalar_difference_op<std::complex<T>, std::complex<T> > : scalar_difference_op<const std::complex<T>, const std::complex<T> > {};
-
-
-// Product
-template<typename T> struct scalar_product_op<const std::complex<T>, const std::complex<T> > : binary_op_base<const std::complex<T>, const std::complex<T> > {
- enum {
- Vectorizable = packet_traits<std::complex<T> >::HasMul
- };
- typedef typename std::complex<T> result_type;
-
- EIGEN_EMPTY_STRUCT_CTOR(scalar_product_op)
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::complex<T> operator() (const std::complex<T>& a, const std::complex<T>& b) const {
- const T a_real = numext::real(a);
- const T a_imag = numext::imag(a);
- const T b_real = numext::real(b);
- const T b_imag = numext::imag(b);
- return std::complex<T>(a_real * b_real - a_imag * b_imag,
- a_real * b_imag + a_imag * b_real);
- }
-};
-
-template<typename T> struct scalar_product_op<std::complex<T>, std::complex<T> > : scalar_product_op<const std::complex<T>, const std::complex<T> > {};
-
-
-// Quotient
-template<typename T> struct scalar_quotient_op<const std::complex<T>, const std::complex<T> > : binary_op_base<const std::complex<T>, const std::complex<T> > {
- enum {
- Vectorizable = packet_traits<std::complex<T> >::HasDiv
- };
- typedef typename std::complex<T> result_type;
-
- EIGEN_EMPTY_STRUCT_CTOR(scalar_quotient_op)
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::complex<T> operator() (const std::complex<T>& a, const std::complex<T>& b) const {
- const T a_real = numext::real(a);
- const T a_imag = numext::imag(a);
- const T b_real = numext::real(b);
- const T b_imag = numext::imag(b);
- const T norm = T(1) / (b_real * b_real + b_imag * b_imag);
- return std::complex<T>((a_real * b_real + a_imag * b_imag) * norm,
- (a_imag * b_real - a_real * b_imag) * norm);
- }
-};
-
-template<typename T> struct scalar_quotient_op<std::complex<T>, std::complex<T> > : scalar_quotient_op<const std::complex<T>, const std::complex<T> > {};
+template<typename T>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+std::complex<T> complex_divide_fast(const std::complex<T>& a, const std::complex<T>& b) {
+ const T a_real = numext::real(a);
+ const T a_imag = numext::imag(a);
+ const T b_real = numext::real(b);
+ const T b_imag = numext::imag(b);
+ const T norm = (b_real * b_real + b_imag * b_imag);
+ return std::complex<T>((a_real * b_real + a_imag * b_imag) / norm,
+ (a_imag * b_real - a_real * b_imag) / norm);
+}
+template<typename T>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+std::complex<T> complex_divide_stable(const std::complex<T>& a, const std::complex<T>& b) {
+ const T a_real = numext::real(a);
+ const T a_imag = numext::imag(a);
+ const T b_real = numext::real(b);
+ const T b_imag = numext::imag(b);
+ // Smith's complex division (https://arxiv.org/pdf/1210.4539.pdf),
+ // guards against over/under-flow.
+ const bool scale_imag = numext::abs(b_imag) <= numext::abs(b_real);
+ const T rscale = scale_imag ? T(1) : b_real / b_imag;
+ const T iscale = scale_imag ? b_imag / b_real : T(1);
+ const T denominator = b_real * rscale + b_imag * iscale;
+ return std::complex<T>((a_real * rscale + a_imag * iscale) / denominator,
+ (a_imag * rscale - a_real * iscale) / denominator);
+}
+
+template<typename T>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+std::complex<T> complex_divide(const std::complex<T>& a, const std::complex<T>& b) {
+#if EIGEN_FAST_MATH
+ return complex_divide_fast(a, b);
+#else
+ return complex_divide_stable(a, b);
#endif
+}
+
+// NOTE: We cannot specialize compound assignment operators with Scalar T,
+// (i.e. operator@=(const T&), for @=+,-,*,/)
+// since they are already specialized for float/double/long double within
+// the standard <complex> header. We also do not specialize the stream
+// operators.
+#define EIGEN_CREATE_STD_COMPLEX_OPERATOR_SPECIALIZATIONS(T) \
+ \
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+std::complex<T> operator+(const std::complex<T>& a) { return a; } \
+ \
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+std::complex<T> operator-(const std::complex<T>& a) { \
+ return std::complex<T>(-numext::real(a), -numext::imag(a)); \
+} \
+ \
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+std::complex<T> operator+(const std::complex<T>& a, const std::complex<T>& b) { \
+ return std::complex<T>(numext::real(a) + numext::real(b), numext::imag(a) + numext::imag(b)); \
+} \
+ \
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+std::complex<T> operator+(const std::complex<T>& a, const T& b) { \
+ return std::complex<T>(numext::real(a) + b, numext::imag(a)); \
+} \
+ \
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+std::complex<T> operator+(const T& a, const std::complex<T>& b) { \
+ return std::complex<T>(a + numext::real(b), numext::imag(b)); \
+} \
+ \
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+std::complex<T> operator-(const std::complex<T>& a, const std::complex<T>& b) { \
+ return std::complex<T>(numext::real(a) - numext::real(b), numext::imag(a) - numext::imag(b)); \
+} \
+ \
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+std::complex<T> operator-(const std::complex<T>& a, const T& b) { \
+ return std::complex<T>(numext::real(a) - b, numext::imag(a)); \
+} \
+ \
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+std::complex<T> operator-(const T& a, const std::complex<T>& b) { \
+ return std::complex<T>(a - numext::real(b), -numext::imag(b)); \
+} \
+ \
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+std::complex<T> operator*(const std::complex<T>& a, const std::complex<T>& b) { \
+ return complex_multiply(a, b); \
+} \
+ \
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+std::complex<T> operator*(const std::complex<T>& a, const T& b) { \
+ return std::complex<T>(numext::real(a) * b, numext::imag(a) * b); \
+} \
+ \
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+std::complex<T> operator*(const T& a, const std::complex<T>& b) { \
+ return std::complex<T>(a * numext::real(b), a * numext::imag(b)); \
+} \
+ \
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+std::complex<T> operator/(const std::complex<T>& a, const std::complex<T>& b) { \
+ return complex_divide(a, b); \
+} \
+ \
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+std::complex<T> operator/(const std::complex<T>& a, const T& b) { \
+ return std::complex<T>(numext::real(a) / b, numext::imag(a) / b); \
+} \
+ \
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+std::complex<T> operator/(const T& a, const std::complex<T>& b) { \
+ return complex_divide(std::complex<T>(a, 0), b); \
+} \
+ \
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+std::complex<T>& operator+=(std::complex<T>& a, const std::complex<T>& b) { \
+ numext::real_ref(a) += numext::real(b); \
+ numext::imag_ref(a) += numext::imag(b); \
+ return a; \
+} \
+ \
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+std::complex<T>& operator-=(std::complex<T>& a, const std::complex<T>& b) { \
+ numext::real_ref(a) -= numext::real(b); \
+ numext::imag_ref(a) -= numext::imag(b); \
+ return a; \
+} \
+ \
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+std::complex<T>& operator*=(std::complex<T>& a, const std::complex<T>& b) { \
+ a = complex_multiply(a, b); \
+ return a; \
+} \
+ \
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+std::complex<T>& operator/=(std::complex<T>& a, const std::complex<T>& b) { \
+ a = complex_divide(a, b); \
+ return a; \
+} \
+ \
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+bool operator==(const std::complex<T>& a, const std::complex<T>& b) { \
+ return numext::real(a) == numext::real(b) && numext::imag(a) == numext::imag(b); \
+} \
+ \
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+bool operator==(const std::complex<T>& a, const T& b) { \
+ return numext::real(a) == b && numext::imag(a) == 0; \
+} \
+ \
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+bool operator==(const T& a, const std::complex<T>& b) { \
+ return a == numext::real(b) && 0 == numext::imag(b); \
+} \
+ \
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+bool operator!=(const std::complex<T>& a, const std::complex<T>& b) { \
+ return !(a == b); \
+} \
+ \
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+bool operator!=(const std::complex<T>& a, const T& b) { \
+ return !(a == b); \
+} \
+ \
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
+bool operator!=(const T& a, const std::complex<T>& b) { \
+ return !(a == b); \
+}
+
+// Do not specialize for long double, since that reduces to double on device.
+EIGEN_CREATE_STD_COMPLEX_OPERATOR_SPECIALIZATIONS(float)
+EIGEN_CREATE_STD_COMPLEX_OPERATOR_SPECIALIZATIONS(double)
+
+#undef EIGEN_CREATE_STD_COMPLEX_OPERATOR_SPECIALIZATIONS
+
+
+} // namespace complex_operator_detail
+
+EIGEN_USING_STD_COMPLEX_OPERATORS
+
+namespace numext {
+EIGEN_USING_STD_COMPLEX_OPERATORS
+} // namespace numext
+
+namespace internal {
+EIGEN_USING_STD_COMPLEX_OPERATORS
+
+} // namespace internal
+} // namespace Eigen
-} // end namespace internal
+#endif // !(EIGEN_COMP_ICC && _USE_COMPLEX_SPECIALIZATION_)
-} // end namespace Eigen
+#endif // EIGEN_CUDACC && EIGEN_GPU_COMPILE_PHASE
-#endif // EIGEN_COMPLEX_CUDA_H
+#endif // EIGEN_COMPLEX_CUDA_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/CUDA/Half.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/CUDA/Half.h
deleted file mode 100644
index ee24e615a..000000000
--- a/examples/ThirdPartyLibs/Eigen/src/Core/arch/CUDA/Half.h
+++ /dev/null
@@ -1,666 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-//
-// The conversion routines are Copyright (c) Fabian Giesen, 2016.
-// The original license follows:
-//
-// Copyright (c) Fabian Giesen, 2016
-// All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted.
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-// Standard 16-bit float type, mostly useful for GPUs. Defines a new
-// type Eigen::half (inheriting from CUDA's __half struct) with
-// operator overloads such that it behaves basically as an arithmetic
-// type. It will be quite slow on CPUs (so it is recommended to stay
-// in fp32 for CPUs, except for simple parameter conversions, I/O
-// to disk and the likes), but fast on GPUs.
-
-
-#ifndef EIGEN_HALF_CUDA_H
-#define EIGEN_HALF_CUDA_H
-
-#if __cplusplus > 199711L
-#define EIGEN_EXPLICIT_CAST(tgt_type) explicit operator tgt_type()
-#else
-#define EIGEN_EXPLICIT_CAST(tgt_type) operator tgt_type()
-#endif
-
-
-namespace Eigen {
-
-struct half;
-
-namespace half_impl {
-
-#if !defined(EIGEN_HAS_CUDA_FP16)
-// Make our own __half_raw definition that is similar to CUDA's.
-struct __half_raw {
- EIGEN_DEVICE_FUNC __half_raw() : x(0) {}
- explicit EIGEN_DEVICE_FUNC __half_raw(unsigned short raw) : x(raw) {}
- unsigned short x;
-};
-#elif defined(EIGEN_CUDACC_VER) && EIGEN_CUDACC_VER < 90000
-// In CUDA < 9.0, __half is the equivalent of CUDA 9's __half_raw
-typedef __half __half_raw;
-#endif
-
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half_raw raw_uint16_to_half(unsigned short x);
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half_raw float_to_half_rtne(float ff);
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC float half_to_float(__half_raw h);
-
-struct half_base : public __half_raw {
- EIGEN_DEVICE_FUNC half_base() {}
- EIGEN_DEVICE_FUNC half_base(const half_base& h) : __half_raw(h) {}
- EIGEN_DEVICE_FUNC half_base(const __half_raw& h) : __half_raw(h) {}
-#if defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDACC_VER) && EIGEN_CUDACC_VER >= 90000
- EIGEN_DEVICE_FUNC half_base(const __half& h) : __half_raw(*(__half_raw*)&h) {}
-#endif
-};
-
-} // namespace half_impl
-
-// Class definition.
-struct half : public half_impl::half_base {
- #if !defined(EIGEN_HAS_CUDA_FP16) || (defined(EIGEN_CUDACC_VER) && EIGEN_CUDACC_VER < 90000)
- typedef half_impl::__half_raw __half_raw;
- #endif
-
- EIGEN_DEVICE_FUNC half() {}
-
- EIGEN_DEVICE_FUNC half(const __half_raw& h) : half_impl::half_base(h) {}
- EIGEN_DEVICE_FUNC half(const half& h) : half_impl::half_base(h) {}
-#if defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDACC_VER) && EIGEN_CUDACC_VER >= 90000
- EIGEN_DEVICE_FUNC half(const __half& h) : half_impl::half_base(h) {}
-#endif
-
- explicit EIGEN_DEVICE_FUNC half(bool b)
- : half_impl::half_base(half_impl::raw_uint16_to_half(b ? 0x3c00 : 0)) {}
- template<class T>
- explicit EIGEN_DEVICE_FUNC half(const T& val)
- : half_impl::half_base(half_impl::float_to_half_rtne(static_cast<float>(val))) {}
- explicit EIGEN_DEVICE_FUNC half(float f)
- : half_impl::half_base(half_impl::float_to_half_rtne(f)) {}
-
- EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(bool) const {
- // +0.0 and -0.0 become false, everything else becomes true.
- return (x & 0x7fff) != 0;
- }
- EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(signed char) const {
- return static_cast<signed char>(half_impl::half_to_float(*this));
- }
- EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(unsigned char) const {
- return static_cast<unsigned char>(half_impl::half_to_float(*this));
- }
- EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(short) const {
- return static_cast<short>(half_impl::half_to_float(*this));
- }
- EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(unsigned short) const {
- return static_cast<unsigned short>(half_impl::half_to_float(*this));
- }
- EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(int) const {
- return static_cast<int>(half_impl::half_to_float(*this));
- }
- EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(unsigned int) const {
- return static_cast<unsigned int>(half_impl::half_to_float(*this));
- }
- EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(long) const {
- return static_cast<long>(half_impl::half_to_float(*this));
- }
- EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(unsigned long) const {
- return static_cast<unsigned long>(half_impl::half_to_float(*this));
- }
- EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(long long) const {
- return static_cast<long long>(half_impl::half_to_float(*this));
- }
- EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(unsigned long long) const {
- return static_cast<unsigned long long>(half_to_float(*this));
- }
- EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(float) const {
- return half_impl::half_to_float(*this);
- }
- EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(double) const {
- return static_cast<double>(half_impl::half_to_float(*this));
- }
-
- EIGEN_DEVICE_FUNC half& operator=(const half& other) {
- x = other.x;
- return *this;
- }
-};
-
-namespace half_impl {
-
-#if defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530
-
-// Intrinsics for native fp16 support. Note that on current hardware,
-// these are no faster than fp32 arithmetic (you need to use the half2
-// versions to get the ALU speed increased), but you do save the
-// conversion steps back and forth.
-
-__device__ half operator + (const half& a, const half& b) {
- return __hadd(a, b);
-}
-__device__ half operator * (const half& a, const half& b) {
- return __hmul(a, b);
-}
-__device__ half operator - (const half& a, const half& b) {
- return __hsub(a, b);
-}
-__device__ half operator / (const half& a, const half& b) {
- float num = __half2float(a);
- float denom = __half2float(b);
- return __float2half(num / denom);
-}
-__device__ half operator - (const half& a) {
- return __hneg(a);
-}
-__device__ half& operator += (half& a, const half& b) {
- a = a + b;
- return a;
-}
-__device__ half& operator *= (half& a, const half& b) {
- a = a * b;
- return a;
-}
-__device__ half& operator -= (half& a, const half& b) {
- a = a - b;
- return a;
-}
-__device__ half& operator /= (half& a, const half& b) {
- a = a / b;
- return a;
-}
-__device__ bool operator == (const half& a, const half& b) {
- return __heq(a, b);
-}
-__device__ bool operator != (const half& a, const half& b) {
- return __hne(a, b);
-}
-__device__ bool operator < (const half& a, const half& b) {
- return __hlt(a, b);
-}
-__device__ bool operator <= (const half& a, const half& b) {
- return __hle(a, b);
-}
-__device__ bool operator > (const half& a, const half& b) {
- return __hgt(a, b);
-}
-__device__ bool operator >= (const half& a, const half& b) {
- return __hge(a, b);
-}
-
-#else // Emulate support for half floats
-
-// Definitions for CPUs and older CUDA, mostly working through conversion
-// to/from fp32.
-
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator + (const half& a, const half& b) {
- return half(float(a) + float(b));
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator * (const half& a, const half& b) {
- return half(float(a) * float(b));
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator - (const half& a, const half& b) {
- return half(float(a) - float(b));
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator / (const half& a, const half& b) {
- return half(float(a) / float(b));
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator - (const half& a) {
- half result;
- result.x = a.x ^ 0x8000;
- return result;
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator += (half& a, const half& b) {
- a = half(float(a) + float(b));
- return a;
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator *= (half& a, const half& b) {
- a = half(float(a) * float(b));
- return a;
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator -= (half& a, const half& b) {
- a = half(float(a) - float(b));
- return a;
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator /= (half& a, const half& b) {
- a = half(float(a) / float(b));
- return a;
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator == (const half& a, const half& b) {
- return float(a) == float(b);
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator != (const half& a, const half& b) {
- return float(a) != float(b);
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator < (const half& a, const half& b) {
- return float(a) < float(b);
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator <= (const half& a, const half& b) {
- return float(a) <= float(b);
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator > (const half& a, const half& b) {
- return float(a) > float(b);
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator >= (const half& a, const half& b) {
- return float(a) >= float(b);
-}
-
-#endif // Emulate support for half floats
-
-// Division by an index. Do it in full float precision to avoid accuracy
-// issues in converting the denominator to half.
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator / (const half& a, Index b) {
- return half(static_cast<float>(a) / static_cast<float>(b));
-}
-
-// Conversion routines, including fallbacks for the host or older CUDA.
-// Note that newer Intel CPUs (Haswell or newer) have vectorized versions of
-// these in hardware. If we need more performance on older/other CPUs, they are
-// also possible to vectorize directly.
-
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half_raw raw_uint16_to_half(unsigned short x) {
- __half_raw h;
- h.x = x;
- return h;
-}
-
-union FP32 {
- unsigned int u;
- float f;
-};
-
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half_raw float_to_half_rtne(float ff) {
-#if defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300
- __half tmp_ff = __float2half(ff);
- return *(__half_raw*)&tmp_ff;
-
-#elif defined(EIGEN_HAS_FP16_C)
- __half_raw h;
- h.x = _cvtss_sh(ff, 0);
- return h;
-
-#else
- FP32 f; f.f = ff;
-
- const FP32 f32infty = { 255 << 23 };
- const FP32 f16max = { (127 + 16) << 23 };
- const FP32 denorm_magic = { ((127 - 15) + (23 - 10) + 1) << 23 };
- unsigned int sign_mask = 0x80000000u;
- __half_raw o;
- o.x = static_cast<unsigned short>(0x0u);
-
- unsigned int sign = f.u & sign_mask;
- f.u ^= sign;
-
- // NOTE all the integer compares in this function can be safely
- // compiled into signed compares since all operands are below
- // 0x80000000. Important if you want fast straight SSE2 code
- // (since there's no unsigned PCMPGTD).
-
- if (f.u >= f16max.u) { // result is Inf or NaN (all exponent bits set)
- o.x = (f.u > f32infty.u) ? 0x7e00 : 0x7c00; // NaN->qNaN and Inf->Inf
- } else { // (De)normalized number or zero
- if (f.u < (113 << 23)) { // resulting FP16 is subnormal or zero
- // use a magic value to align our 10 mantissa bits at the bottom of
- // the float. as long as FP addition is round-to-nearest-even this
- // just works.
- f.f += denorm_magic.f;
-
- // and one integer subtract of the bias later, we have our final float!
- o.x = static_cast<unsigned short>(f.u - denorm_magic.u);
- } else {
- unsigned int mant_odd = (f.u >> 13) & 1; // resulting mantissa is odd
-
- // update exponent, rounding bias part 1
- f.u += ((unsigned int)(15 - 127) << 23) + 0xfff;
- // rounding bias part 2
- f.u += mant_odd;
- // take the bits!
- o.x = static_cast<unsigned short>(f.u >> 13);
- }
- }
-
- o.x |= static_cast<unsigned short>(sign >> 16);
- return o;
-#endif
-}
-
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC float half_to_float(__half_raw h) {
-#if defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300
- return __half2float(h);
-
-#elif defined(EIGEN_HAS_FP16_C)
- return _cvtsh_ss(h.x);
-
-#else
- const FP32 magic = { 113 << 23 };
- const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift
- FP32 o;
-
- o.u = (h.x & 0x7fff) << 13; // exponent/mantissa bits
- unsigned int exp = shifted_exp & o.u; // just the exponent
- o.u += (127 - 15) << 23; // exponent adjust
-
- // handle exponent special cases
- if (exp == shifted_exp) { // Inf/NaN?
- o.u += (128 - 16) << 23; // extra exp adjust
- } else if (exp == 0) { // Zero/Denormal?
- o.u += 1 << 23; // extra exp adjust
- o.f -= magic.f; // renormalize
- }
-
- o.u |= (h.x & 0x8000) << 16; // sign bit
- return o.f;
-#endif
-}
-
-// --- standard functions ---
-
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isinf)(const half& a) {
- return (a.x & 0x7fff) == 0x7c00;
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isnan)(const half& a) {
-#if defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530
- return __hisnan(a);
-#else
- return (a.x & 0x7fff) > 0x7c00;
-#endif
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isfinite)(const half& a) {
- return !(isinf EIGEN_NOT_A_MACRO (a)) && !(isnan EIGEN_NOT_A_MACRO (a));
-}
-
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half abs(const half& a) {
- half result;
- result.x = a.x & 0x7FFF;
- return result;
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half exp(const half& a) {
-#if EIGEN_CUDACC_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 530
- return half(hexp(a));
-#else
- return half(::expf(float(a)));
-#endif
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half expm1(const half& a) {
- return half(numext::expm1(float(a)));
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half log(const half& a) {
-#if defined(EIGEN_HAS_CUDA_FP16) && EIGEN_CUDACC_VER >= 80000 && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530
- return half(::hlog(a));
-#else
- return half(::logf(float(a)));
-#endif
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half log1p(const half& a) {
- return half(numext::log1p(float(a)));
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half log10(const half& a) {
- return half(::log10f(float(a)));
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half sqrt(const half& a) {
-#if EIGEN_CUDACC_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 530
- return half(hsqrt(a));
-#else
- return half(::sqrtf(float(a)));
-#endif
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half pow(const half& a, const half& b) {
- return half(::powf(float(a), float(b)));
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half sin(const half& a) {
- return half(::sinf(float(a)));
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half cos(const half& a) {
- return half(::cosf(float(a)));
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half tan(const half& a) {
- return half(::tanf(float(a)));
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half tanh(const half& a) {
- return half(::tanhf(float(a)));
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half floor(const half& a) {
-#if EIGEN_CUDACC_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 300
- return half(hfloor(a));
-#else
- return half(::floorf(float(a)));
-#endif
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half ceil(const half& a) {
-#if EIGEN_CUDACC_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 300
- return half(hceil(a));
-#else
- return half(::ceilf(float(a)));
-#endif
-}
-
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half (min)(const half& a, const half& b) {
-#if defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530
- return __hlt(b, a) ? b : a;
-#else
- const float f1 = static_cast<float>(a);
- const float f2 = static_cast<float>(b);
- return f2 < f1 ? b : a;
-#endif
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half (max)(const half& a, const half& b) {
-#if defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530
- return __hlt(a, b) ? b : a;
-#else
- const float f1 = static_cast<float>(a);
- const float f2 = static_cast<float>(b);
- return f1 < f2 ? b : a;
-#endif
-}
-
-EIGEN_ALWAYS_INLINE std::ostream& operator << (std::ostream& os, const half& v) {
- os << static_cast<float>(v);
- return os;
-}
-
-} // end namespace half_impl
-
-// import Eigen::half_impl::half into Eigen namespace
-// using half_impl::half;
-
-namespace internal {
-
-template<>
-struct random_default_impl<half, false, false>
-{
- static inline half run(const half& x, const half& y)
- {
- return x + (y-x) * half(float(std::rand()) / float(RAND_MAX));
- }
- static inline half run()
- {
- return run(half(-1.f), half(1.f));
- }
-};
-
-template<> struct is_arithmetic<half> { enum { value = true }; };
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-namespace std {
-template<>
-struct numeric_limits<Eigen::half> {
- static const bool is_specialized = true;
- static const bool is_signed = true;
- static const bool is_integer = false;
- static const bool is_exact = false;
- static const bool has_infinity = true;
- static const bool has_quiet_NaN = true;
- static const bool has_signaling_NaN = true;
- static const float_denorm_style has_denorm = denorm_present;
- static const bool has_denorm_loss = false;
- static const std::float_round_style round_style = std::round_to_nearest;
- static const bool is_iec559 = false;
- static const bool is_bounded = false;
- static const bool is_modulo = false;
- static const int digits = 11;
- static const int digits10 = 3; // according to http://half.sourceforge.net/structstd_1_1numeric__limits_3_01half__float_1_1half_01_4.html
- static const int max_digits10 = 5; // according to http://half.sourceforge.net/structstd_1_1numeric__limits_3_01half__float_1_1half_01_4.html
- static const int radix = 2;
- static const int min_exponent = -13;
- static const int min_exponent10 = -4;
- static const int max_exponent = 16;
- static const int max_exponent10 = 4;
- static const bool traps = true;
- static const bool tinyness_before = false;
-
- static Eigen::half (min)() { return Eigen::half_impl::raw_uint16_to_half(0x400); }
- static Eigen::half lowest() { return Eigen::half_impl::raw_uint16_to_half(0xfbff); }
- static Eigen::half (max)() { return Eigen::half_impl::raw_uint16_to_half(0x7bff); }
- static Eigen::half epsilon() { return Eigen::half_impl::raw_uint16_to_half(0x0800); }
- static Eigen::half round_error() { return Eigen::half(0.5); }
- static Eigen::half infinity() { return Eigen::half_impl::raw_uint16_to_half(0x7c00); }
- static Eigen::half quiet_NaN() { return Eigen::half_impl::raw_uint16_to_half(0x7e00); }
- static Eigen::half signaling_NaN() { return Eigen::half_impl::raw_uint16_to_half(0x7e00); }
- static Eigen::half denorm_min() { return Eigen::half_impl::raw_uint16_to_half(0x1); }
-};
-}
-
-namespace Eigen {
-
-template<> struct NumTraits<Eigen::half>
- : GenericNumTraits<Eigen::half>
-{
- enum {
- IsSigned = true,
- IsInteger = false,
- IsComplex = false,
- RequireInitialization = false
- };
-
- EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Eigen::half epsilon() {
- return half_impl::raw_uint16_to_half(0x0800);
- }
- EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Eigen::half dummy_precision() { return Eigen::half(1e-2f); }
- EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Eigen::half highest() {
- return half_impl::raw_uint16_to_half(0x7bff);
- }
- EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Eigen::half lowest() {
- return half_impl::raw_uint16_to_half(0xfbff);
- }
- EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Eigen::half infinity() {
- return half_impl::raw_uint16_to_half(0x7c00);
- }
- EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Eigen::half quiet_NaN() {
- return half_impl::raw_uint16_to_half(0x7c01);
- }
-};
-
-} // end namespace Eigen
-
-// C-like standard mathematical functions and trancendentals.
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half fabsh(const Eigen::half& a) {
- Eigen::half result;
- result.x = a.x & 0x7FFF;
- return result;
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half exph(const Eigen::half& a) {
- return Eigen::half(::expf(float(a)));
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half logh(const Eigen::half& a) {
-#if EIGEN_CUDACC_VER >= 80000 && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530
- return Eigen::half(::hlog(a));
-#else
- return Eigen::half(::logf(float(a)));
-#endif
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half sqrth(const Eigen::half& a) {
- return Eigen::half(::sqrtf(float(a)));
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half powh(const Eigen::half& a, const Eigen::half& b) {
- return Eigen::half(::powf(float(a), float(b)));
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half floorh(const Eigen::half& a) {
- return Eigen::half(::floorf(float(a)));
-}
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half ceilh(const Eigen::half& a) {
- return Eigen::half(::ceilf(float(a)));
-}
-
-namespace std {
-
-#if __cplusplus > 199711L
-template <>
-struct hash<Eigen::half> {
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::size_t operator()(const Eigen::half& a) const {
- return static_cast<std::size_t>(a.x);
- }
-};
-#endif
-
-} // end namespace std
-
-
-// Add the missing shfl_xor intrinsic
-#if defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300
-__device__ EIGEN_STRONG_INLINE Eigen::half __shfl_xor(Eigen::half var, int laneMask, int width=warpSize) {
- #if EIGEN_CUDACC_VER < 90000
- return static_cast<Eigen::half>(__shfl_xor(static_cast<float>(var), laneMask, width));
- #else
- return static_cast<Eigen::half>(__shfl_xor_sync(0xFFFFFFFF, static_cast<float>(var), laneMask, width));
- #endif
-}
-#endif
-
-// ldg() has an overload for __half_raw, but we also need one for Eigen::half.
-#if defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 350
-EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half __ldg(const Eigen::half* ptr) {
- return Eigen::half_impl::raw_uint16_to_half(
- __ldg(reinterpret_cast<const unsigned short*>(ptr)));
-}
-#endif
-
-
-#if defined(EIGEN_CUDA_ARCH)
-namespace Eigen {
-namespace numext {
-
-template<>
-EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
-bool (isnan)(const Eigen::half& h) {
- return (half_impl::isnan)(h);
-}
-
-template<>
-EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
-bool (isinf)(const Eigen::half& h) {
- return (half_impl::isinf)(h);
-}
-
-template<>
-EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
-bool (isfinite)(const Eigen::half& h) {
- return (half_impl::isfinite)(h);
-}
-
-} // namespace Eigen
-} // namespace numext
-#endif
-
-#endif // EIGEN_HALF_CUDA_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/CUDA/PacketMath.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/CUDA/PacketMath.h
deleted file mode 100644
index 97a8abe59..000000000
--- a/examples/ThirdPartyLibs/Eigen/src/Core/arch/CUDA/PacketMath.h
+++ /dev/null
@@ -1,333 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_PACKET_MATH_CUDA_H
-#define EIGEN_PACKET_MATH_CUDA_H
-
-namespace Eigen {
-
-namespace internal {
-
-// Make sure this is only available when targeting a GPU: we don't want to
-// introduce conflicts between these packet_traits definitions and the ones
-// we'll use on the host side (SSE, AVX, ...)
-#if defined(EIGEN_CUDACC) && defined(EIGEN_USE_GPU)
-template<> struct is_arithmetic<float4> { enum { value = true }; };
-template<> struct is_arithmetic<double2> { enum { value = true }; };
-
-template<> struct packet_traits<float> : default_packet_traits
-{
- typedef float4 type;
- typedef float4 half;
- enum {
- Vectorizable = 1,
- AlignedOnScalar = 1,
- size=4,
- HasHalfPacket = 0,
-
- HasDiv = 1,
- HasSin = 0,
- HasCos = 0,
- HasLog = 1,
- HasExp = 1,
- HasSqrt = 1,
- HasRsqrt = 1,
- HasLGamma = 1,
- HasDiGamma = 1,
- HasZeta = 1,
- HasPolygamma = 1,
- HasErf = 1,
- HasErfc = 1,
- HasIGamma = 1,
- HasIGammac = 1,
- HasBetaInc = 1,
-
- HasBlend = 0,
- };
-};
-
-template<> struct packet_traits<double> : default_packet_traits
-{
- typedef double2 type;
- typedef double2 half;
- enum {
- Vectorizable = 1,
- AlignedOnScalar = 1,
- size=2,
- HasHalfPacket = 0,
-
- HasDiv = 1,
- HasLog = 1,
- HasExp = 1,
- HasSqrt = 1,
- HasRsqrt = 1,
- HasLGamma = 1,
- HasDiGamma = 1,
- HasZeta = 1,
- HasPolygamma = 1,
- HasErf = 1,
- HasErfc = 1,
- HasIGamma = 1,
- HasIGammac = 1,
- HasBetaInc = 1,
-
- HasBlend = 0,
- };
-};
-
-
-template<> struct unpacket_traits<float4> { typedef float type; enum {size=4, alignment=Aligned16}; typedef float4 half; };
-template<> struct unpacket_traits<double2> { typedef double type; enum {size=2, alignment=Aligned16}; typedef double2 half; };
-
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pset1<float4>(const float& from) {
- return make_float4(from, from, from, from);
-}
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pset1<double2>(const double& from) {
- return make_double2(from, from);
-}
-
-
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 plset<float4>(const float& a) {
- return make_float4(a, a+1, a+2, a+3);
-}
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 plset<double2>(const double& a) {
- return make_double2(a, a+1);
-}
-
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 padd<float4>(const float4& a, const float4& b) {
- return make_float4(a.x+b.x, a.y+b.y, a.z+b.z, a.w+b.w);
-}
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 padd<double2>(const double2& a, const double2& b) {
- return make_double2(a.x+b.x, a.y+b.y);
-}
-
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 psub<float4>(const float4& a, const float4& b) {
- return make_float4(a.x-b.x, a.y-b.y, a.z-b.z, a.w-b.w);
-}
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 psub<double2>(const double2& a, const double2& b) {
- return make_double2(a.x-b.x, a.y-b.y);
-}
-
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pnegate(const float4& a) {
- return make_float4(-a.x, -a.y, -a.z, -a.w);
-}
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pnegate(const double2& a) {
- return make_double2(-a.x, -a.y);
-}
-
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pconj(const float4& a) { return a; }
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pconj(const double2& a) { return a; }
-
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pmul<float4>(const float4& a, const float4& b) {
- return make_float4(a.x*b.x, a.y*b.y, a.z*b.z, a.w*b.w);
-}
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pmul<double2>(const double2& a, const double2& b) {
- return make_double2(a.x*b.x, a.y*b.y);
-}
-
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pdiv<float4>(const float4& a, const float4& b) {
- return make_float4(a.x/b.x, a.y/b.y, a.z/b.z, a.w/b.w);
-}
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pdiv<double2>(const double2& a, const double2& b) {
- return make_double2(a.x/b.x, a.y/b.y);
-}
-
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pmin<float4>(const float4& a, const float4& b) {
- return make_float4(fminf(a.x, b.x), fminf(a.y, b.y), fminf(a.z, b.z), fminf(a.w, b.w));
-}
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pmin<double2>(const double2& a, const double2& b) {
- return make_double2(fmin(a.x, b.x), fmin(a.y, b.y));
-}
-
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pmax<float4>(const float4& a, const float4& b) {
- return make_float4(fmaxf(a.x, b.x), fmaxf(a.y, b.y), fmaxf(a.z, b.z), fmaxf(a.w, b.w));
-}
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pmax<double2>(const double2& a, const double2& b) {
- return make_double2(fmax(a.x, b.x), fmax(a.y, b.y));
-}
-
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pload<float4>(const float* from) {
- return *reinterpret_cast<const float4*>(from);
-}
-
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pload<double2>(const double* from) {
- return *reinterpret_cast<const double2*>(from);
-}
-
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 ploadu<float4>(const float* from) {
- return make_float4(from[0], from[1], from[2], from[3]);
-}
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 ploadu<double2>(const double* from) {
- return make_double2(from[0], from[1]);
-}
-
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 ploaddup<float4>(const float* from) {
- return make_float4(from[0], from[0], from[1], from[1]);
-}
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 ploaddup<double2>(const double* from) {
- return make_double2(from[0], from[0]);
-}
-
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstore<float>(float* to, const float4& from) {
- *reinterpret_cast<float4*>(to) = from;
-}
-
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstore<double>(double* to, const double2& from) {
- *reinterpret_cast<double2*>(to) = from;
-}
-
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const float4& from) {
- to[0] = from.x;
- to[1] = from.y;
- to[2] = from.z;
- to[3] = from.w;
-}
-
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const double2& from) {
- to[0] = from.x;
- to[1] = from.y;
-}
-
-template<>
-EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float4 ploadt_ro<float4, Aligned>(const float* from) {
-#if defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 350
- return __ldg((const float4*)from);
-#else
- return make_float4(from[0], from[1], from[2], from[3]);
-#endif
-}
-template<>
-EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double2 ploadt_ro<double2, Aligned>(const double* from) {
-#if defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 350
- return __ldg((const double2*)from);
-#else
- return make_double2(from[0], from[1]);
-#endif
-}
-
-template<>
-EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float4 ploadt_ro<float4, Unaligned>(const float* from) {
-#if defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 350
- return make_float4(__ldg(from+0), __ldg(from+1), __ldg(from+2), __ldg(from+3));
-#else
- return make_float4(from[0], from[1], from[2], from[3]);
-#endif
-}
-template<>
-EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double2 ploadt_ro<double2, Unaligned>(const double* from) {
-#if defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 350
- return make_double2(__ldg(from+0), __ldg(from+1));
-#else
- return make_double2(from[0], from[1]);
-#endif
-}
-
-template<> EIGEN_DEVICE_FUNC inline float4 pgather<float, float4>(const float* from, Index stride) {
- return make_float4(from[0*stride], from[1*stride], from[2*stride], from[3*stride]);
-}
-
-template<> EIGEN_DEVICE_FUNC inline double2 pgather<double, double2>(const double* from, Index stride) {
- return make_double2(from[0*stride], from[1*stride]);
-}
-
-template<> EIGEN_DEVICE_FUNC inline void pscatter<float, float4>(float* to, const float4& from, Index stride) {
- to[stride*0] = from.x;
- to[stride*1] = from.y;
- to[stride*2] = from.z;
- to[stride*3] = from.w;
-}
-template<> EIGEN_DEVICE_FUNC inline void pscatter<double, double2>(double* to, const double2& from, Index stride) {
- to[stride*0] = from.x;
- to[stride*1] = from.y;
-}
-
-template<> EIGEN_DEVICE_FUNC inline float pfirst<float4>(const float4& a) {
- return a.x;
-}
-template<> EIGEN_DEVICE_FUNC inline double pfirst<double2>(const double2& a) {
- return a.x;
-}
-
-template<> EIGEN_DEVICE_FUNC inline float predux<float4>(const float4& a) {
- return a.x + a.y + a.z + a.w;
-}
-template<> EIGEN_DEVICE_FUNC inline double predux<double2>(const double2& a) {
- return a.x + a.y;
-}
-
-template<> EIGEN_DEVICE_FUNC inline float predux_max<float4>(const float4& a) {
- return fmaxf(fmaxf(a.x, a.y), fmaxf(a.z, a.w));
-}
-template<> EIGEN_DEVICE_FUNC inline double predux_max<double2>(const double2& a) {
- return fmax(a.x, a.y);
-}
-
-template<> EIGEN_DEVICE_FUNC inline float predux_min<float4>(const float4& a) {
- return fminf(fminf(a.x, a.y), fminf(a.z, a.w));
-}
-template<> EIGEN_DEVICE_FUNC inline double predux_min<double2>(const double2& a) {
- return fmin(a.x, a.y);
-}
-
-template<> EIGEN_DEVICE_FUNC inline float predux_mul<float4>(const float4& a) {
- return a.x * a.y * a.z * a.w;
-}
-template<> EIGEN_DEVICE_FUNC inline double predux_mul<double2>(const double2& a) {
- return a.x * a.y;
-}
-
-template<> EIGEN_DEVICE_FUNC inline float4 pabs<float4>(const float4& a) {
- return make_float4(fabsf(a.x), fabsf(a.y), fabsf(a.z), fabsf(a.w));
-}
-template<> EIGEN_DEVICE_FUNC inline double2 pabs<double2>(const double2& a) {
- return make_double2(fabs(a.x), fabs(a.y));
-}
-
-EIGEN_DEVICE_FUNC inline void
-ptranspose(PacketBlock<float4,4>& kernel) {
- float tmp = kernel.packet[0].y;
- kernel.packet[0].y = kernel.packet[1].x;
- kernel.packet[1].x = tmp;
-
- tmp = kernel.packet[0].z;
- kernel.packet[0].z = kernel.packet[2].x;
- kernel.packet[2].x = tmp;
-
- tmp = kernel.packet[0].w;
- kernel.packet[0].w = kernel.packet[3].x;
- kernel.packet[3].x = tmp;
-
- tmp = kernel.packet[1].z;
- kernel.packet[1].z = kernel.packet[2].y;
- kernel.packet[2].y = tmp;
-
- tmp = kernel.packet[1].w;
- kernel.packet[1].w = kernel.packet[3].y;
- kernel.packet[3].y = tmp;
-
- tmp = kernel.packet[2].w;
- kernel.packet[2].w = kernel.packet[3].z;
- kernel.packet[3].z = tmp;
-}
-
-EIGEN_DEVICE_FUNC inline void
-ptranspose(PacketBlock<double2,2>& kernel) {
- double tmp = kernel.packet[0].y;
- kernel.packet[0].y = kernel.packet[1].x;
- kernel.packet[1].x = tmp;
-}
-
-#endif
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-
-#endif // EIGEN_PACKET_MATH_CUDA_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/CUDA/PacketMathHalf.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/CUDA/PacketMathHalf.h
deleted file mode 100644
index ce48e4b31..000000000
--- a/examples/ThirdPartyLibs/Eigen/src/Core/arch/CUDA/PacketMathHalf.h
+++ /dev/null
@@ -1,1133 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_PACKET_MATH_HALF_CUDA_H
-#define EIGEN_PACKET_MATH_HALF_CUDA_H
-
-
-namespace Eigen {
-namespace internal {
-
-// Most of the following operations require arch >= 3.0
-#if defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDACC) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300
-
-template<> struct is_arithmetic<half2> { enum { value = true }; };
-
-template<> struct packet_traits<Eigen::half> : default_packet_traits
-{
- typedef half2 type;
- typedef half2 half;
- enum {
- Vectorizable = 1,
- AlignedOnScalar = 1,
- size=2,
- HasHalfPacket = 0,
- HasAdd = 1,
- HasMul = 1,
- HasDiv = 1,
- HasSqrt = 1,
- HasRsqrt = 1,
- HasExp = 1,
- HasExpm1 = 1,
- HasLog = 1,
- HasLog1p = 1
- };
-};
-
-template<> struct unpacket_traits<half2> { typedef Eigen::half type; enum {size=2, alignment=Aligned16}; typedef half2 half; };
-
-template<> __device__ EIGEN_STRONG_INLINE half2 pset1<half2>(const Eigen::half& from) {
- return __half2half2(from);
-}
-
-template<> __device__ EIGEN_STRONG_INLINE half2 pload<half2>(const Eigen::half* from) {
- return *reinterpret_cast<const half2*>(from);
-}
-
-template<> __device__ EIGEN_STRONG_INLINE half2 ploadu<half2>(const Eigen::half* from) {
- return __halves2half2(from[0], from[1]);
-}
-
-template<> __device__ EIGEN_STRONG_INLINE half2 ploaddup<half2>(const Eigen::half* from) {
- return __halves2half2(from[0], from[0]);
-}
-
-template<> __device__ EIGEN_STRONG_INLINE void pstore<Eigen::half>(Eigen::half* to, const half2& from) {
- *reinterpret_cast<half2*>(to) = from;
-}
-
-template<> __device__ EIGEN_STRONG_INLINE void pstoreu<Eigen::half>(Eigen::half* to, const half2& from) {
- to[0] = __low2half(from);
- to[1] = __high2half(from);
-}
-
-template<>
- __device__ EIGEN_ALWAYS_INLINE half2 ploadt_ro<half2, Aligned>(const Eigen::half* from) {
-#if EIGEN_CUDA_ARCH >= 350
- return __ldg((const half2*)from);
-#else
- return __halves2half2(*(from+0), *(from+1));
-#endif
-}
-
-template<>
-__device__ EIGEN_ALWAYS_INLINE half2 ploadt_ro<half2, Unaligned>(const Eigen::half* from) {
-#if EIGEN_CUDA_ARCH >= 350
- return __halves2half2(__ldg(from+0), __ldg(from+1));
-#else
- return __halves2half2(*(from+0), *(from+1));
-#endif
-}
-
-template<> __device__ EIGEN_STRONG_INLINE half2 pgather<Eigen::half, half2>(const Eigen::half* from, Index stride) {
- return __halves2half2(from[0*stride], from[1*stride]);
-}
-
-template<> __device__ EIGEN_STRONG_INLINE void pscatter<Eigen::half, half2>(Eigen::half* to, const half2& from, Index stride) {
- to[stride*0] = __low2half(from);
- to[stride*1] = __high2half(from);
-}
-
-template<> __device__ EIGEN_STRONG_INLINE Eigen::half pfirst<half2>(const half2& a) {
- return __low2half(a);
-}
-
-template<> __device__ EIGEN_STRONG_INLINE half2 pabs<half2>(const half2& a) {
- half2 result;
- unsigned temp = *(reinterpret_cast<const unsigned*>(&(a)));
- *(reinterpret_cast<unsigned*>(&(result))) = temp & 0x7FFF7FFF;
- return result;
-}
-
-
-__device__ EIGEN_STRONG_INLINE void
-ptranspose(PacketBlock<half2,2>& kernel) {
- __half a1 = __low2half(kernel.packet[0]);
- __half a2 = __high2half(kernel.packet[0]);
- __half b1 = __low2half(kernel.packet[1]);
- __half b2 = __high2half(kernel.packet[1]);
- kernel.packet[0] = __halves2half2(a1, b1);
- kernel.packet[1] = __halves2half2(a2, b2);
-}
-
-template<> __device__ EIGEN_STRONG_INLINE half2 plset<half2>(const Eigen::half& a) {
-#if EIGEN_CUDA_ARCH >= 530
- return __halves2half2(a, __hadd(a, __float2half(1.0f)));
-#else
- float f = __half2float(a) + 1.0f;
- return __halves2half2(a, __float2half(f));
-#endif
-}
-
-template<> __device__ EIGEN_STRONG_INLINE half2 padd<half2>(const half2& a, const half2& b) {
-#if EIGEN_CUDA_ARCH >= 530
- return __hadd2(a, b);
-#else
- float a1 = __low2float(a);
- float a2 = __high2float(a);
- float b1 = __low2float(b);
- float b2 = __high2float(b);
- float r1 = a1 + b1;
- float r2 = a2 + b2;
- return __floats2half2_rn(r1, r2);
-#endif
-}
-
-template<> __device__ EIGEN_STRONG_INLINE half2 psub<half2>(const half2& a, const half2& b) {
-#if EIGEN_CUDA_ARCH >= 530
- return __hsub2(a, b);
-#else
- float a1 = __low2float(a);
- float a2 = __high2float(a);
- float b1 = __low2float(b);
- float b2 = __high2float(b);
- float r1 = a1 - b1;
- float r2 = a2 - b2;
- return __floats2half2_rn(r1, r2);
-#endif
-}
-
-template<> __device__ EIGEN_STRONG_INLINE half2 pnegate(const half2& a) {
-#if EIGEN_CUDA_ARCH >= 530
- return __hneg2(a);
-#else
- float a1 = __low2float(a);
- float a2 = __high2float(a);
- return __floats2half2_rn(-a1, -a2);
-#endif
-}
-
-template<> __device__ EIGEN_STRONG_INLINE half2 pconj(const half2& a) { return a; }
-
-template<> __device__ EIGEN_STRONG_INLINE half2 pmul<half2>(const half2& a, const half2& b) {
-#if EIGEN_CUDA_ARCH >= 530
- return __hmul2(a, b);
-#else
- float a1 = __low2float(a);
- float a2 = __high2float(a);
- float b1 = __low2float(b);
- float b2 = __high2float(b);
- float r1 = a1 * b1;
- float r2 = a2 * b2;
- return __floats2half2_rn(r1, r2);
-#endif
-}
-
-template<> __device__ EIGEN_STRONG_INLINE half2 pmadd<half2>(const half2& a, const half2& b, const half2& c) {
-#if EIGEN_CUDA_ARCH >= 530
- return __hfma2(a, b, c);
-#else
- float a1 = __low2float(a);
- float a2 = __high2float(a);
- float b1 = __low2float(b);
- float b2 = __high2float(b);
- float c1 = __low2float(c);
- float c2 = __high2float(c);
- float r1 = a1 * b1 + c1;
- float r2 = a2 * b2 + c2;
- return __floats2half2_rn(r1, r2);
-#endif
-}
-
-template<> __device__ EIGEN_STRONG_INLINE half2 pdiv<half2>(const half2& a, const half2& b) {
- float a1 = __low2float(a);
- float a2 = __high2float(a);
- float b1 = __low2float(b);
- float b2 = __high2float(b);
- float r1 = a1 / b1;
- float r2 = a2 / b2;
- return __floats2half2_rn(r1, r2);
-}
-
-template<> __device__ EIGEN_STRONG_INLINE half2 pmin<half2>(const half2& a, const half2& b) {
- float a1 = __low2float(a);
- float a2 = __high2float(a);
- float b1 = __low2float(b);
- float b2 = __high2float(b);
- __half r1 = a1 < b1 ? __low2half(a) : __low2half(b);
- __half r2 = a2 < b2 ? __high2half(a) : __high2half(b);
- return __halves2half2(r1, r2);
-}
-
-template<> __device__ EIGEN_STRONG_INLINE half2 pmax<half2>(const half2& a, const half2& b) {
- float a1 = __low2float(a);
- float a2 = __high2float(a);
- float b1 = __low2float(b);
- float b2 = __high2float(b);
- __half r1 = a1 > b1 ? __low2half(a) : __low2half(b);
- __half r2 = a2 > b2 ? __high2half(a) : __high2half(b);
- return __halves2half2(r1, r2);
-}
-
-template<> __device__ EIGEN_STRONG_INLINE Eigen::half predux<half2>(const half2& a) {
-#if EIGEN_CUDA_ARCH >= 530
- return __hadd(__low2half(a), __high2half(a));
-#else
- float a1 = __low2float(a);
- float a2 = __high2float(a);
- return Eigen::half(half_impl::raw_uint16_to_half(__float2half_rn(a1 + a2)));
-#endif
-}
-
-template<> __device__ EIGEN_STRONG_INLINE Eigen::half predux_max<half2>(const half2& a) {
-#if EIGEN_CUDA_ARCH >= 530
- __half first = __low2half(a);
- __half second = __high2half(a);
- return __hgt(first, second) ? first : second;
-#else
- float a1 = __low2float(a);
- float a2 = __high2float(a);
- return a1 > a2 ? __low2half(a) : __high2half(a);
-#endif
-}
-
-template<> __device__ EIGEN_STRONG_INLINE Eigen::half predux_min<half2>(const half2& a) {
-#if EIGEN_CUDA_ARCH >= 530
- __half first = __low2half(a);
- __half second = __high2half(a);
- return __hlt(first, second) ? first : second;
-#else
- float a1 = __low2float(a);
- float a2 = __high2float(a);
- return a1 < a2 ? __low2half(a) : __high2half(a);
-#endif
-}
-
-template<> __device__ EIGEN_STRONG_INLINE Eigen::half predux_mul<half2>(const half2& a) {
-#if EIGEN_CUDA_ARCH >= 530
- return __hmul(__low2half(a), __high2half(a));
-#else
- float a1 = __low2float(a);
- float a2 = __high2float(a);
- return Eigen::half(half_impl::raw_uint16_to_half(__float2half_rn(a1 * a2)));
-#endif
-}
-
-template<> __device__ EIGEN_STRONG_INLINE half2 plog1p<half2>(const half2& a) {
- float a1 = __low2float(a);
- float a2 = __high2float(a);
- float r1 = log1pf(a1);
- float r2 = log1pf(a2);
- return __floats2half2_rn(r1, r2);
-}
-
-template<> __device__ EIGEN_STRONG_INLINE half2 pexpm1<half2>(const half2& a) {
- float a1 = __low2float(a);
- float a2 = __high2float(a);
- float r1 = expm1f(a1);
- float r2 = expm1f(a2);
- return __floats2half2_rn(r1, r2);
-}
-
-#if EIGEN_CUDACC_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 530
-
-template<> __device__ EIGEN_STRONG_INLINE
-half2 plog<half2>(const half2& a) {
- return h2log(a);
-}
-
-template<> __device__ EIGEN_STRONG_INLINE
-half2 pexp<half2>(const half2& a) {
- return h2exp(a);
-}
-
-template<> __device__ EIGEN_STRONG_INLINE
-half2 psqrt<half2>(const half2& a) {
- return h2sqrt(a);
-}
-
-template<> __device__ EIGEN_STRONG_INLINE
-half2 prsqrt<half2>(const half2& a) {
- return h2rsqrt(a);
-}
-
-#else
-
-template<> __device__ EIGEN_STRONG_INLINE half2 plog<half2>(const half2& a) {
- float a1 = __low2float(a);
- float a2 = __high2float(a);
- float r1 = logf(a1);
- float r2 = logf(a2);
- return __floats2half2_rn(r1, r2);
-}
-
-template<> __device__ EIGEN_STRONG_INLINE half2 pexp<half2>(const half2& a) {
- float a1 = __low2float(a);
- float a2 = __high2float(a);
- float r1 = expf(a1);
- float r2 = expf(a2);
- return __floats2half2_rn(r1, r2);
-}
-
-template<> __device__ EIGEN_STRONG_INLINE half2 psqrt<half2>(const half2& a) {
- float a1 = __low2float(a);
- float a2 = __high2float(a);
- float r1 = sqrtf(a1);
- float r2 = sqrtf(a2);
- return __floats2half2_rn(r1, r2);
-}
-
-template<> __device__ EIGEN_STRONG_INLINE half2 prsqrt<half2>(const half2& a) {
- float a1 = __low2float(a);
- float a2 = __high2float(a);
- float r1 = rsqrtf(a1);
- float r2 = rsqrtf(a2);
- return __floats2half2_rn(r1, r2);
-}
-
-#endif
-
-#elif defined EIGEN_VECTORIZE_AVX512
-
-typedef struct {
- __m256i x;
-} Packet16h;
-
-
-template<> struct is_arithmetic<Packet16h> { enum { value = true }; };
-
-template <>
-struct packet_traits<half> : default_packet_traits {
- typedef Packet16h type;
- // There is no half-size packet for Packet16h.
- typedef Packet16h half;
- enum {
- Vectorizable = 1,
- AlignedOnScalar = 1,
- size = 16,
- HasHalfPacket = 0,
- HasAdd = 0,
- HasSub = 0,
- HasMul = 0,
- HasNegate = 0,
- HasAbs = 0,
- HasAbs2 = 0,
- HasMin = 0,
- HasMax = 0,
- HasConj = 0,
- HasSetLinear = 0,
- HasDiv = 0,
- HasSqrt = 0,
- HasRsqrt = 0,
- HasExp = 0,
- HasLog = 0,
- HasBlend = 0
- };
-};
-
-
-template<> struct unpacket_traits<Packet16h> { typedef Eigen::half type; enum {size=16, alignment=Aligned32}; typedef Packet16h half; };
-
-template<> EIGEN_STRONG_INLINE Packet16h pset1<Packet16h>(const Eigen::half& from) {
- Packet16h result;
- result.x = _mm256_set1_epi16(from.x);
- return result;
-}
-
-template<> EIGEN_STRONG_INLINE Eigen::half pfirst<Packet16h>(const Packet16h& from) {
- return half_impl::raw_uint16_to_half(static_cast<unsigned short>(_mm256_extract_epi16(from.x, 0)));
-}
-
-template<> EIGEN_STRONG_INLINE Packet16h pload<Packet16h>(const Eigen::half* from) {
- Packet16h result;
- result.x = _mm256_load_si256(reinterpret_cast<const __m256i*>(from));
- return result;
-}
-
-template<> EIGEN_STRONG_INLINE Packet16h ploadu<Packet16h>(const Eigen::half* from) {
- Packet16h result;
- result.x = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from));
- return result;
-}
-
-template<> EIGEN_STRONG_INLINE void pstore<half>(Eigen::half* to, const Packet16h& from) {
- _mm256_store_si256((__m256i*)to, from.x);
-}
-
-template<> EIGEN_STRONG_INLINE void pstoreu<half>(Eigen::half* to, const Packet16h& from) {
- _mm256_storeu_si256((__m256i*)to, from.x);
-}
-
-template<> EIGEN_STRONG_INLINE Packet16h
-ploadquad(const Eigen::half* from) {
- Packet16h result;
- unsigned short a = from[0].x;
- unsigned short b = from[1].x;
- unsigned short c = from[2].x;
- unsigned short d = from[3].x;
- result.x = _mm256_set_epi16(d, d, d, d, c, c, c, c, b, b, b, b, a, a, a, a);
- return result;
-}
-
-EIGEN_STRONG_INLINE Packet16f half2float(const Packet16h& a) {
-#ifdef EIGEN_HAS_FP16_C
- return _mm512_cvtph_ps(a.x);
-#else
- EIGEN_ALIGN64 half aux[16];
- pstore(aux, a);
- float f0(aux[0]);
- float f1(aux[1]);
- float f2(aux[2]);
- float f3(aux[3]);
- float f4(aux[4]);
- float f5(aux[5]);
- float f6(aux[6]);
- float f7(aux[7]);
- float f8(aux[8]);
- float f9(aux[9]);
- float fa(aux[10]);
- float fb(aux[11]);
- float fc(aux[12]);
- float fd(aux[13]);
- float fe(aux[14]);
- float ff(aux[15]);
-
- return _mm512_set_ps(
- ff, fe, fd, fc, fb, fa, f9, f8, f7, f6, f5, f4, f3, f2, f1, f0);
-#endif
-}
-
-EIGEN_STRONG_INLINE Packet16h float2half(const Packet16f& a) {
-#ifdef EIGEN_HAS_FP16_C
- Packet16h result;
- result.x = _mm512_cvtps_ph(a, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
- return result;
-#else
- EIGEN_ALIGN64 float aux[16];
- pstore(aux, a);
- half h0(aux[0]);
- half h1(aux[1]);
- half h2(aux[2]);
- half h3(aux[3]);
- half h4(aux[4]);
- half h5(aux[5]);
- half h6(aux[6]);
- half h7(aux[7]);
- half h8(aux[8]);
- half h9(aux[9]);
- half ha(aux[10]);
- half hb(aux[11]);
- half hc(aux[12]);
- half hd(aux[13]);
- half he(aux[14]);
- half hf(aux[15]);
-
- Packet16h result;
- result.x = _mm256_set_epi16(
- hf.x, he.x, hd.x, hc.x, hb.x, ha.x, h9.x, h8.x,
- h7.x, h6.x, h5.x, h4.x, h3.x, h2.x, h1.x, h0.x);
- return result;
-#endif
-}
-
-template<> EIGEN_STRONG_INLINE Packet16h padd<Packet16h>(const Packet16h& a, const Packet16h& b) {
- Packet16f af = half2float(a);
- Packet16f bf = half2float(b);
- Packet16f rf = padd(af, bf);
- return float2half(rf);
-}
-
-template<> EIGEN_STRONG_INLINE Packet16h pmul<Packet16h>(const Packet16h& a, const Packet16h& b) {
- Packet16f af = half2float(a);
- Packet16f bf = half2float(b);
- Packet16f rf = pmul(af, bf);
- return float2half(rf);
-}
-
-template<> EIGEN_STRONG_INLINE half predux<Packet16h>(const Packet16h& from) {
- Packet16f from_float = half2float(from);
- return half(predux(from_float));
-}
-
-template<> EIGEN_STRONG_INLINE Packet16h pgather<Eigen::half, Packet16h>(const Eigen::half* from, Index stride)
-{
- Packet16h result;
- result.x = _mm256_set_epi16(
- from[15*stride].x, from[14*stride].x, from[13*stride].x, from[12*stride].x,
- from[11*stride].x, from[10*stride].x, from[9*stride].x, from[8*stride].x,
- from[7*stride].x, from[6*stride].x, from[5*stride].x, from[4*stride].x,
- from[3*stride].x, from[2*stride].x, from[1*stride].x, from[0*stride].x);
- return result;
-}
-
-template<> EIGEN_STRONG_INLINE void pscatter<half, Packet16h>(half* to, const Packet16h& from, Index stride)
-{
- EIGEN_ALIGN64 half aux[16];
- pstore(aux, from);
- to[stride*0].x = aux[0].x;
- to[stride*1].x = aux[1].x;
- to[stride*2].x = aux[2].x;
- to[stride*3].x = aux[3].x;
- to[stride*4].x = aux[4].x;
- to[stride*5].x = aux[5].x;
- to[stride*6].x = aux[6].x;
- to[stride*7].x = aux[7].x;
- to[stride*8].x = aux[8].x;
- to[stride*9].x = aux[9].x;
- to[stride*10].x = aux[10].x;
- to[stride*11].x = aux[11].x;
- to[stride*12].x = aux[12].x;
- to[stride*13].x = aux[13].x;
- to[stride*14].x = aux[14].x;
- to[stride*15].x = aux[15].x;
-}
-
-EIGEN_STRONG_INLINE void
-ptranspose(PacketBlock<Packet16h,16>& kernel) {
- __m256i a = kernel.packet[0].x;
- __m256i b = kernel.packet[1].x;
- __m256i c = kernel.packet[2].x;
- __m256i d = kernel.packet[3].x;
- __m256i e = kernel.packet[4].x;
- __m256i f = kernel.packet[5].x;
- __m256i g = kernel.packet[6].x;
- __m256i h = kernel.packet[7].x;
- __m256i i = kernel.packet[8].x;
- __m256i j = kernel.packet[9].x;
- __m256i k = kernel.packet[10].x;
- __m256i l = kernel.packet[11].x;
- __m256i m = kernel.packet[12].x;
- __m256i n = kernel.packet[13].x;
- __m256i o = kernel.packet[14].x;
- __m256i p = kernel.packet[15].x;
-
- __m256i ab_07 = _mm256_unpacklo_epi16(a, b);
- __m256i cd_07 = _mm256_unpacklo_epi16(c, d);
- __m256i ef_07 = _mm256_unpacklo_epi16(e, f);
- __m256i gh_07 = _mm256_unpacklo_epi16(g, h);
- __m256i ij_07 = _mm256_unpacklo_epi16(i, j);
- __m256i kl_07 = _mm256_unpacklo_epi16(k, l);
- __m256i mn_07 = _mm256_unpacklo_epi16(m, n);
- __m256i op_07 = _mm256_unpacklo_epi16(o, p);
-
- __m256i ab_8f = _mm256_unpackhi_epi16(a, b);
- __m256i cd_8f = _mm256_unpackhi_epi16(c, d);
- __m256i ef_8f = _mm256_unpackhi_epi16(e, f);
- __m256i gh_8f = _mm256_unpackhi_epi16(g, h);
- __m256i ij_8f = _mm256_unpackhi_epi16(i, j);
- __m256i kl_8f = _mm256_unpackhi_epi16(k, l);
- __m256i mn_8f = _mm256_unpackhi_epi16(m, n);
- __m256i op_8f = _mm256_unpackhi_epi16(o, p);
-
- __m256i abcd_03 = _mm256_unpacklo_epi32(ab_07, cd_07);
- __m256i abcd_47 = _mm256_unpackhi_epi32(ab_07, cd_07);
- __m256i efgh_03 = _mm256_unpacklo_epi32(ef_07, gh_07);
- __m256i efgh_47 = _mm256_unpackhi_epi32(ef_07, gh_07);
- __m256i ijkl_03 = _mm256_unpacklo_epi32(ij_07, kl_07);
- __m256i ijkl_47 = _mm256_unpackhi_epi32(ij_07, kl_07);
- __m256i mnop_03 = _mm256_unpacklo_epi32(mn_07, op_07);
- __m256i mnop_47 = _mm256_unpackhi_epi32(mn_07, op_07);
-
- __m256i abcd_8b = _mm256_unpacklo_epi32(ab_8f, cd_8f);
- __m256i abcd_cf = _mm256_unpackhi_epi32(ab_8f, cd_8f);
- __m256i efgh_8b = _mm256_unpacklo_epi32(ef_8f, gh_8f);
- __m256i efgh_cf = _mm256_unpackhi_epi32(ef_8f, gh_8f);
- __m256i ijkl_8b = _mm256_unpacklo_epi32(ij_8f, kl_8f);
- __m256i ijkl_cf = _mm256_unpackhi_epi32(ij_8f, kl_8f);
- __m256i mnop_8b = _mm256_unpacklo_epi32(mn_8f, op_8f);
- __m256i mnop_cf = _mm256_unpackhi_epi32(mn_8f, op_8f);
-
- __m256i abcdefgh_01 = _mm256_unpacklo_epi64(abcd_03, efgh_03);
- __m256i abcdefgh_23 = _mm256_unpackhi_epi64(abcd_03, efgh_03);
- __m256i ijklmnop_01 = _mm256_unpacklo_epi64(ijkl_03, mnop_03);
- __m256i ijklmnop_23 = _mm256_unpackhi_epi64(ijkl_03, mnop_03);
- __m256i abcdefgh_45 = _mm256_unpacklo_epi64(abcd_47, efgh_47);
- __m256i abcdefgh_67 = _mm256_unpackhi_epi64(abcd_47, efgh_47);
- __m256i ijklmnop_45 = _mm256_unpacklo_epi64(ijkl_47, mnop_47);
- __m256i ijklmnop_67 = _mm256_unpackhi_epi64(ijkl_47, mnop_47);
- __m256i abcdefgh_89 = _mm256_unpacklo_epi64(abcd_8b, efgh_8b);
- __m256i abcdefgh_ab = _mm256_unpackhi_epi64(abcd_8b, efgh_8b);
- __m256i ijklmnop_89 = _mm256_unpacklo_epi64(ijkl_8b, mnop_8b);
- __m256i ijklmnop_ab = _mm256_unpackhi_epi64(ijkl_8b, mnop_8b);
- __m256i abcdefgh_cd = _mm256_unpacklo_epi64(abcd_cf, efgh_cf);
- __m256i abcdefgh_ef = _mm256_unpackhi_epi64(abcd_cf, efgh_cf);
- __m256i ijklmnop_cd = _mm256_unpacklo_epi64(ijkl_cf, mnop_cf);
- __m256i ijklmnop_ef = _mm256_unpackhi_epi64(ijkl_cf, mnop_cf);
-
- // NOTE: no unpacklo/hi instr in this case, so using permute instr.
- __m256i a_p_0 = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x20);
- __m256i a_p_1 = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x31);
- __m256i a_p_2 = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x20);
- __m256i a_p_3 = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x31);
- __m256i a_p_4 = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x20);
- __m256i a_p_5 = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x31);
- __m256i a_p_6 = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x20);
- __m256i a_p_7 = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x31);
- __m256i a_p_8 = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x20);
- __m256i a_p_9 = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x31);
- __m256i a_p_a = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x20);
- __m256i a_p_b = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x31);
- __m256i a_p_c = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x20);
- __m256i a_p_d = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x31);
- __m256i a_p_e = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x20);
- __m256i a_p_f = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x31);
-
- kernel.packet[0].x = a_p_0;
- kernel.packet[1].x = a_p_1;
- kernel.packet[2].x = a_p_2;
- kernel.packet[3].x = a_p_3;
- kernel.packet[4].x = a_p_4;
- kernel.packet[5].x = a_p_5;
- kernel.packet[6].x = a_p_6;
- kernel.packet[7].x = a_p_7;
- kernel.packet[8].x = a_p_8;
- kernel.packet[9].x = a_p_9;
- kernel.packet[10].x = a_p_a;
- kernel.packet[11].x = a_p_b;
- kernel.packet[12].x = a_p_c;
- kernel.packet[13].x = a_p_d;
- kernel.packet[14].x = a_p_e;
- kernel.packet[15].x = a_p_f;
-}
-
-EIGEN_STRONG_INLINE void
-ptranspose(PacketBlock<Packet16h,8>& kernel) {
- EIGEN_ALIGN64 half in[8][16];
- pstore<half>(in[0], kernel.packet[0]);
- pstore<half>(in[1], kernel.packet[1]);
- pstore<half>(in[2], kernel.packet[2]);
- pstore<half>(in[3], kernel.packet[3]);
- pstore<half>(in[4], kernel.packet[4]);
- pstore<half>(in[5], kernel.packet[5]);
- pstore<half>(in[6], kernel.packet[6]);
- pstore<half>(in[7], kernel.packet[7]);
-
- EIGEN_ALIGN64 half out[8][16];
-
- for (int i = 0; i < 8; ++i) {
- for (int j = 0; j < 8; ++j) {
- out[i][j] = in[j][2*i];
- }
- for (int j = 0; j < 8; ++j) {
- out[i][j+8] = in[j][2*i+1];
- }
- }
-
- kernel.packet[0] = pload<Packet16h>(out[0]);
- kernel.packet[1] = pload<Packet16h>(out[1]);
- kernel.packet[2] = pload<Packet16h>(out[2]);
- kernel.packet[3] = pload<Packet16h>(out[3]);
- kernel.packet[4] = pload<Packet16h>(out[4]);
- kernel.packet[5] = pload<Packet16h>(out[5]);
- kernel.packet[6] = pload<Packet16h>(out[6]);
- kernel.packet[7] = pload<Packet16h>(out[7]);
-}
-
-EIGEN_STRONG_INLINE void
-ptranspose(PacketBlock<Packet16h,4>& kernel) {
- EIGEN_ALIGN64 half in[4][16];
- pstore<half>(in[0], kernel.packet[0]);
- pstore<half>(in[1], kernel.packet[1]);
- pstore<half>(in[2], kernel.packet[2]);
- pstore<half>(in[3], kernel.packet[3]);
-
- EIGEN_ALIGN64 half out[4][16];
-
- for (int i = 0; i < 4; ++i) {
- for (int j = 0; j < 4; ++j) {
- out[i][j] = in[j][4*i];
- }
- for (int j = 0; j < 4; ++j) {
- out[i][j+4] = in[j][4*i+1];
- }
- for (int j = 0; j < 4; ++j) {
- out[i][j+8] = in[j][4*i+2];
- }
- for (int j = 0; j < 4; ++j) {
- out[i][j+12] = in[j][4*i+3];
- }
- }
-
- kernel.packet[0] = pload<Packet16h>(out[0]);
- kernel.packet[1] = pload<Packet16h>(out[1]);
- kernel.packet[2] = pload<Packet16h>(out[2]);
- kernel.packet[3] = pload<Packet16h>(out[3]);
-}
-
-
-#elif defined EIGEN_VECTORIZE_AVX
-
-typedef struct {
- __m128i x;
-} Packet8h;
-
-
-template<> struct is_arithmetic<Packet8h> { enum { value = true }; };
-
-template <>
-struct packet_traits<Eigen::half> : default_packet_traits {
- typedef Packet8h type;
- // There is no half-size packet for Packet8h.
- typedef Packet8h half;
- enum {
- Vectorizable = 1,
- AlignedOnScalar = 1,
- size = 8,
- HasHalfPacket = 0,
- HasAdd = 0,
- HasSub = 0,
- HasMul = 0,
- HasNegate = 0,
- HasAbs = 0,
- HasAbs2 = 0,
- HasMin = 0,
- HasMax = 0,
- HasConj = 0,
- HasSetLinear = 0,
- HasDiv = 0,
- HasSqrt = 0,
- HasRsqrt = 0,
- HasExp = 0,
- HasLog = 0,
- HasBlend = 0
- };
-};
-
-
-template<> struct unpacket_traits<Packet8h> { typedef Eigen::half type; enum {size=8, alignment=Aligned16}; typedef Packet8h half; };
-
-template<> EIGEN_STRONG_INLINE Packet8h pset1<Packet8h>(const Eigen::half& from) {
- Packet8h result;
- result.x = _mm_set1_epi16(from.x);
- return result;
-}
-
-template<> EIGEN_STRONG_INLINE Eigen::half pfirst<Packet8h>(const Packet8h& from) {
- return half_impl::raw_uint16_to_half(static_cast<unsigned short>(_mm_extract_epi16(from.x, 0)));
-}
-
-template<> EIGEN_STRONG_INLINE Packet8h pload<Packet8h>(const Eigen::half* from) {
- Packet8h result;
- result.x = _mm_load_si128(reinterpret_cast<const __m128i*>(from));
- return result;
-}
-
-template<> EIGEN_STRONG_INLINE Packet8h ploadu<Packet8h>(const Eigen::half* from) {
- Packet8h result;
- result.x = _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));
- return result;
-}
-
-template<> EIGEN_STRONG_INLINE void pstore<Eigen::half>(Eigen::half* to, const Packet8h& from) {
- _mm_store_si128(reinterpret_cast<__m128i*>(to), from.x);
-}
-
-template<> EIGEN_STRONG_INLINE void pstoreu<Eigen::half>(Eigen::half* to, const Packet8h& from) {
- _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from.x);
-}
-
-template<> EIGEN_STRONG_INLINE Packet8h
-ploadquad<Packet8h>(const Eigen::half* from) {
- Packet8h result;
- unsigned short a = from[0].x;
- unsigned short b = from[1].x;
- result.x = _mm_set_epi16(b, b, b, b, a, a, a, a);
- return result;
-}
-
-EIGEN_STRONG_INLINE Packet8f half2float(const Packet8h& a) {
-#ifdef EIGEN_HAS_FP16_C
- return _mm256_cvtph_ps(a.x);
-#else
- EIGEN_ALIGN32 Eigen::half aux[8];
- pstore(aux, a);
- float f0(aux[0]);
- float f1(aux[1]);
- float f2(aux[2]);
- float f3(aux[3]);
- float f4(aux[4]);
- float f5(aux[5]);
- float f6(aux[6]);
- float f7(aux[7]);
-
- return _mm256_set_ps(f7, f6, f5, f4, f3, f2, f1, f0);
-#endif
-}
-
-EIGEN_STRONG_INLINE Packet8h float2half(const Packet8f& a) {
-#ifdef EIGEN_HAS_FP16_C
- Packet8h result;
- result.x = _mm256_cvtps_ph(a, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
- return result;
-#else
- EIGEN_ALIGN32 float aux[8];
- pstore(aux, a);
- Eigen::half h0(aux[0]);
- Eigen::half h1(aux[1]);
- Eigen::half h2(aux[2]);
- Eigen::half h3(aux[3]);
- Eigen::half h4(aux[4]);
- Eigen::half h5(aux[5]);
- Eigen::half h6(aux[6]);
- Eigen::half h7(aux[7]);
-
- Packet8h result;
- result.x = _mm_set_epi16(h7.x, h6.x, h5.x, h4.x, h3.x, h2.x, h1.x, h0.x);
- return result;
-#endif
-}
-
-template<> EIGEN_STRONG_INLINE Packet8h pconj(const Packet8h& a) { return a; }
-
-template<> EIGEN_STRONG_INLINE Packet8h padd<Packet8h>(const Packet8h& a, const Packet8h& b) {
- Packet8f af = half2float(a);
- Packet8f bf = half2float(b);
- Packet8f rf = padd(af, bf);
- return float2half(rf);
-}
-
-template<> EIGEN_STRONG_INLINE Packet8h pmul<Packet8h>(const Packet8h& a, const Packet8h& b) {
- Packet8f af = half2float(a);
- Packet8f bf = half2float(b);
- Packet8f rf = pmul(af, bf);
- return float2half(rf);
-}
-
-template<> EIGEN_STRONG_INLINE Packet8h pgather<Eigen::half, Packet8h>(const Eigen::half* from, Index stride)
-{
- Packet8h result;
- result.x = _mm_set_epi16(from[7*stride].x, from[6*stride].x, from[5*stride].x, from[4*stride].x, from[3*stride].x, from[2*stride].x, from[1*stride].x, from[0*stride].x);
- return result;
-}
-
-template<> EIGEN_STRONG_INLINE void pscatter<Eigen::half, Packet8h>(Eigen::half* to, const Packet8h& from, Index stride)
-{
- EIGEN_ALIGN32 Eigen::half aux[8];
- pstore(aux, from);
- to[stride*0].x = aux[0].x;
- to[stride*1].x = aux[1].x;
- to[stride*2].x = aux[2].x;
- to[stride*3].x = aux[3].x;
- to[stride*4].x = aux[4].x;
- to[stride*5].x = aux[5].x;
- to[stride*6].x = aux[6].x;
- to[stride*7].x = aux[7].x;
-}
-
-template<> EIGEN_STRONG_INLINE Eigen::half predux<Packet8h>(const Packet8h& a) {
- Packet8f af = half2float(a);
- float reduced = predux<Packet8f>(af);
- return Eigen::half(reduced);
-}
-
-template<> EIGEN_STRONG_INLINE Eigen::half predux_max<Packet8h>(const Packet8h& a) {
- Packet8f af = half2float(a);
- float reduced = predux_max<Packet8f>(af);
- return Eigen::half(reduced);
-}
-
-template<> EIGEN_STRONG_INLINE Eigen::half predux_min<Packet8h>(const Packet8h& a) {
- Packet8f af = half2float(a);
- float reduced = predux_min<Packet8f>(af);
- return Eigen::half(reduced);
-}
-
-template<> EIGEN_STRONG_INLINE Eigen::half predux_mul<Packet8h>(const Packet8h& a) {
- Packet8f af = half2float(a);
- float reduced = predux_mul<Packet8f>(af);
- return Eigen::half(reduced);
-}
-
-EIGEN_STRONG_INLINE void
-ptranspose(PacketBlock<Packet8h,8>& kernel) {
- __m128i a = kernel.packet[0].x;
- __m128i b = kernel.packet[1].x;
- __m128i c = kernel.packet[2].x;
- __m128i d = kernel.packet[3].x;
- __m128i e = kernel.packet[4].x;
- __m128i f = kernel.packet[5].x;
- __m128i g = kernel.packet[6].x;
- __m128i h = kernel.packet[7].x;
-
- __m128i a03b03 = _mm_unpacklo_epi16(a, b);
- __m128i c03d03 = _mm_unpacklo_epi16(c, d);
- __m128i e03f03 = _mm_unpacklo_epi16(e, f);
- __m128i g03h03 = _mm_unpacklo_epi16(g, h);
- __m128i a47b47 = _mm_unpackhi_epi16(a, b);
- __m128i c47d47 = _mm_unpackhi_epi16(c, d);
- __m128i e47f47 = _mm_unpackhi_epi16(e, f);
- __m128i g47h47 = _mm_unpackhi_epi16(g, h);
-
- __m128i a01b01c01d01 = _mm_unpacklo_epi32(a03b03, c03d03);
- __m128i a23b23c23d23 = _mm_unpackhi_epi32(a03b03, c03d03);
- __m128i e01f01g01h01 = _mm_unpacklo_epi32(e03f03, g03h03);
- __m128i e23f23g23h23 = _mm_unpackhi_epi32(e03f03, g03h03);
- __m128i a45b45c45d45 = _mm_unpacklo_epi32(a47b47, c47d47);
- __m128i a67b67c67d67 = _mm_unpackhi_epi32(a47b47, c47d47);
- __m128i e45f45g45h45 = _mm_unpacklo_epi32(e47f47, g47h47);
- __m128i e67f67g67h67 = _mm_unpackhi_epi32(e47f47, g47h47);
-
- __m128i a0b0c0d0e0f0g0h0 = _mm_unpacklo_epi64(a01b01c01d01, e01f01g01h01);
- __m128i a1b1c1d1e1f1g1h1 = _mm_unpackhi_epi64(a01b01c01d01, e01f01g01h01);
- __m128i a2b2c2d2e2f2g2h2 = _mm_unpacklo_epi64(a23b23c23d23, e23f23g23h23);
- __m128i a3b3c3d3e3f3g3h3 = _mm_unpackhi_epi64(a23b23c23d23, e23f23g23h23);
- __m128i a4b4c4d4e4f4g4h4 = _mm_unpacklo_epi64(a45b45c45d45, e45f45g45h45);
- __m128i a5b5c5d5e5f5g5h5 = _mm_unpackhi_epi64(a45b45c45d45, e45f45g45h45);
- __m128i a6b6c6d6e6f6g6h6 = _mm_unpacklo_epi64(a67b67c67d67, e67f67g67h67);
- __m128i a7b7c7d7e7f7g7h7 = _mm_unpackhi_epi64(a67b67c67d67, e67f67g67h67);
-
- kernel.packet[0].x = a0b0c0d0e0f0g0h0;
- kernel.packet[1].x = a1b1c1d1e1f1g1h1;
- kernel.packet[2].x = a2b2c2d2e2f2g2h2;
- kernel.packet[3].x = a3b3c3d3e3f3g3h3;
- kernel.packet[4].x = a4b4c4d4e4f4g4h4;
- kernel.packet[5].x = a5b5c5d5e5f5g5h5;
- kernel.packet[6].x = a6b6c6d6e6f6g6h6;
- kernel.packet[7].x = a7b7c7d7e7f7g7h7;
-}
-
-EIGEN_STRONG_INLINE void
-ptranspose(PacketBlock<Packet8h,4>& kernel) {
- EIGEN_ALIGN32 Eigen::half in[4][8];
- pstore<Eigen::half>(in[0], kernel.packet[0]);
- pstore<Eigen::half>(in[1], kernel.packet[1]);
- pstore<Eigen::half>(in[2], kernel.packet[2]);
- pstore<Eigen::half>(in[3], kernel.packet[3]);
-
- EIGEN_ALIGN32 Eigen::half out[4][8];
-
- for (int i = 0; i < 4; ++i) {
- for (int j = 0; j < 4; ++j) {
- out[i][j] = in[j][2*i];
- }
- for (int j = 0; j < 4; ++j) {
- out[i][j+4] = in[j][2*i+1];
- }
- }
-
- kernel.packet[0] = pload<Packet8h>(out[0]);
- kernel.packet[1] = pload<Packet8h>(out[1]);
- kernel.packet[2] = pload<Packet8h>(out[2]);
- kernel.packet[3] = pload<Packet8h>(out[3]);
-}
-
-
-// Disable the following code since it's broken on too many platforms / compilers.
-//#elif defined(EIGEN_VECTORIZE_SSE) && (!EIGEN_ARCH_x86_64) && (!EIGEN_COMP_MSVC)
-#elif 0
-
-typedef struct {
- __m64 x;
-} Packet4h;
-
-
-template<> struct is_arithmetic<Packet4h> { enum { value = true }; };
-
-template <>
-struct packet_traits<Eigen::half> : default_packet_traits {
- typedef Packet4h type;
- // There is no half-size packet for Packet4h.
- typedef Packet4h half;
- enum {
- Vectorizable = 1,
- AlignedOnScalar = 1,
- size = 4,
- HasHalfPacket = 0,
- HasAdd = 0,
- HasSub = 0,
- HasMul = 0,
- HasNegate = 0,
- HasAbs = 0,
- HasAbs2 = 0,
- HasMin = 0,
- HasMax = 0,
- HasConj = 0,
- HasSetLinear = 0,
- HasDiv = 0,
- HasSqrt = 0,
- HasRsqrt = 0,
- HasExp = 0,
- HasLog = 0,
- HasBlend = 0
- };
-};
-
-
-template<> struct unpacket_traits<Packet4h> { typedef Eigen::half type; enum {size=4, alignment=Aligned16}; typedef Packet4h half; };
-
-template<> EIGEN_STRONG_INLINE Packet4h pset1<Packet4h>(const Eigen::half& from) {
- Packet4h result;
- result.x = _mm_set1_pi16(from.x);
- return result;
-}
-
-template<> EIGEN_STRONG_INLINE Eigen::half pfirst<Packet4h>(const Packet4h& from) {
- return half_impl::raw_uint16_to_half(static_cast<unsigned short>(_mm_cvtsi64_si32(from.x)));
-}
-
-template<> EIGEN_STRONG_INLINE Packet4h pconj(const Packet4h& a) { return a; }
-
-template<> EIGEN_STRONG_INLINE Packet4h padd<Packet4h>(const Packet4h& a, const Packet4h& b) {
- __int64_t a64 = _mm_cvtm64_si64(a.x);
- __int64_t b64 = _mm_cvtm64_si64(b.x);
-
- Eigen::half h[4];
-
- Eigen::half ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64));
- Eigen::half hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64));
- h[0] = ha + hb;
- ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));
- hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 16));
- h[1] = ha + hb;
- ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));
- hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 32));
- h[2] = ha + hb;
- ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));
- hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 48));
- h[3] = ha + hb;
- Packet4h result;
- result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x);
- return result;
-}
-
-template<> EIGEN_STRONG_INLINE Packet4h pmul<Packet4h>(const Packet4h& a, const Packet4h& b) {
- __int64_t a64 = _mm_cvtm64_si64(a.x);
- __int64_t b64 = _mm_cvtm64_si64(b.x);
-
- Eigen::half h[4];
-
- Eigen::half ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64));
- Eigen::half hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64));
- h[0] = ha * hb;
- ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));
- hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 16));
- h[1] = ha * hb;
- ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));
- hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 32));
- h[2] = ha * hb;
- ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));
- hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 48));
- h[3] = ha * hb;
- Packet4h result;
- result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x);
- return result;
-}
-
-template<> EIGEN_STRONG_INLINE Packet4h pload<Packet4h>(const Eigen::half* from) {
- Packet4h result;
- result.x = _mm_cvtsi64_m64(*reinterpret_cast<const __int64_t*>(from));
- return result;
-}
-
-template<> EIGEN_STRONG_INLINE Packet4h ploadu<Packet4h>(const Eigen::half* from) {
- Packet4h result;
- result.x = _mm_cvtsi64_m64(*reinterpret_cast<const __int64_t*>(from));
- return result;
-}
-
-template<> EIGEN_STRONG_INLINE void pstore<Eigen::half>(Eigen::half* to, const Packet4h& from) {
- __int64_t r = _mm_cvtm64_si64(from.x);
- *(reinterpret_cast<__int64_t*>(to)) = r;
-}
-
-template<> EIGEN_STRONG_INLINE void pstoreu<Eigen::half>(Eigen::half* to, const Packet4h& from) {
- __int64_t r = _mm_cvtm64_si64(from.x);
- *(reinterpret_cast<__int64_t*>(to)) = r;
-}
-
-template<> EIGEN_STRONG_INLINE Packet4h
-ploadquad<Packet4h>(const Eigen::half* from) {
- return pset1<Packet4h>(*from);
-}
-
-template<> EIGEN_STRONG_INLINE Packet4h pgather<Eigen::half, Packet4h>(const Eigen::half* from, Index stride)
-{
- Packet4h result;
- result.x = _mm_set_pi16(from[3*stride].x, from[2*stride].x, from[1*stride].x, from[0*stride].x);
- return result;
-}
-
-template<> EIGEN_STRONG_INLINE void pscatter<Eigen::half, Packet4h>(Eigen::half* to, const Packet4h& from, Index stride)
-{
- __int64_t a = _mm_cvtm64_si64(from.x);
- to[stride*0].x = static_cast<unsigned short>(a);
- to[stride*1].x = static_cast<unsigned short>(a >> 16);
- to[stride*2].x = static_cast<unsigned short>(a >> 32);
- to[stride*3].x = static_cast<unsigned short>(a >> 48);
-}
-
-EIGEN_STRONG_INLINE void
-ptranspose(PacketBlock<Packet4h,4>& kernel) {
- __m64 T0 = _mm_unpacklo_pi16(kernel.packet[0].x, kernel.packet[1].x);
- __m64 T1 = _mm_unpacklo_pi16(kernel.packet[2].x, kernel.packet[3].x);
- __m64 T2 = _mm_unpackhi_pi16(kernel.packet[0].x, kernel.packet[1].x);
- __m64 T3 = _mm_unpackhi_pi16(kernel.packet[2].x, kernel.packet[3].x);
-
- kernel.packet[0].x = _mm_unpacklo_pi32(T0, T1);
- kernel.packet[1].x = _mm_unpackhi_pi32(T0, T1);
- kernel.packet[2].x = _mm_unpacklo_pi32(T2, T3);
- kernel.packet[3].x = _mm_unpackhi_pi32(T2, T3);
-}
-
-#endif
-
-}
-}
-
-#endif // EIGEN_PACKET_MATH_HALF_CUDA_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/CUDA/TypeCasting.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/CUDA/TypeCasting.h
deleted file mode 100644
index 30f870c3d..000000000
--- a/examples/ThirdPartyLibs/Eigen/src/Core/arch/CUDA/TypeCasting.h
+++ /dev/null
@@ -1,212 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_TYPE_CASTING_CUDA_H
-#define EIGEN_TYPE_CASTING_CUDA_H
-
-namespace Eigen {
-
-namespace internal {
-
-template<>
-struct scalar_cast_op<float, Eigen::half> {
- EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)
- typedef Eigen::half result_type;
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half operator() (const float& a) const {
- #if defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300
- return __float2half(a);
- #else
- return Eigen::half(a);
- #endif
- }
-};
-
-template<>
-struct functor_traits<scalar_cast_op<float, Eigen::half> >
-{ enum { Cost = NumTraits<float>::AddCost, PacketAccess = false }; };
-
-
-template<>
-struct scalar_cast_op<int, Eigen::half> {
- EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)
- typedef Eigen::half result_type;
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half operator() (const int& a) const {
- #if defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300
- return __float2half(static_cast<float>(a));
- #else
- return Eigen::half(static_cast<float>(a));
- #endif
- }
-};
-
-template<>
-struct functor_traits<scalar_cast_op<int, Eigen::half> >
-{ enum { Cost = NumTraits<float>::AddCost, PacketAccess = false }; };
-
-
-template<>
-struct scalar_cast_op<Eigen::half, float> {
- EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)
- typedef float result_type;
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float operator() (const Eigen::half& a) const {
- #if defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300
- return __half2float(a);
- #else
- return static_cast<float>(a);
- #endif
- }
-};
-
-template<>
-struct functor_traits<scalar_cast_op<Eigen::half, float> >
-{ enum { Cost = NumTraits<float>::AddCost, PacketAccess = false }; };
-
-
-
-#if defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300
-
-template <>
-struct type_casting_traits<Eigen::half, float> {
- enum {
- VectorizedCast = 1,
- SrcCoeffRatio = 2,
- TgtCoeffRatio = 1
- };
-};
-
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pcast<half2, float4>(const half2& a, const half2& b) {
- float2 r1 = __half22float2(a);
- float2 r2 = __half22float2(b);
- return make_float4(r1.x, r1.y, r2.x, r2.y);
-}
-
-template <>
-struct type_casting_traits<float, Eigen::half> {
- enum {
- VectorizedCast = 1,
- SrcCoeffRatio = 1,
- TgtCoeffRatio = 2
- };
-};
-
-template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pcast<float4, half2>(const float4& a) {
- // Simply discard the second half of the input
- return __floats2half2_rn(a.x, a.y);
-}
-
-#elif defined EIGEN_VECTORIZE_AVX512
-template <>
-struct type_casting_traits<half, float> {
- enum {
- VectorizedCast = 1,
- SrcCoeffRatio = 1,
- TgtCoeffRatio = 1
- };
-};
-
-template<> EIGEN_STRONG_INLINE Packet16f pcast<Packet16h, Packet16f>(const Packet16h& a) {
- return half2float(a);
-}
-
-template <>
-struct type_casting_traits<float, half> {
- enum {
- VectorizedCast = 1,
- SrcCoeffRatio = 1,
- TgtCoeffRatio = 1
- };
-};
-
-template<> EIGEN_STRONG_INLINE Packet16h pcast<Packet16f, Packet16h>(const Packet16f& a) {
- return float2half(a);
-}
-
-#elif defined EIGEN_VECTORIZE_AVX
-
-template <>
-struct type_casting_traits<Eigen::half, float> {
- enum {
- VectorizedCast = 1,
- SrcCoeffRatio = 1,
- TgtCoeffRatio = 1
- };
-};
-
-template<> EIGEN_STRONG_INLINE Packet8f pcast<Packet8h, Packet8f>(const Packet8h& a) {
- return half2float(a);
-}
-
-template <>
-struct type_casting_traits<float, Eigen::half> {
- enum {
- VectorizedCast = 1,
- SrcCoeffRatio = 1,
- TgtCoeffRatio = 1
- };
-};
-
-template<> EIGEN_STRONG_INLINE Packet8h pcast<Packet8f, Packet8h>(const Packet8f& a) {
- return float2half(a);
-}
-
-// Disable the following code since it's broken on too many platforms / compilers.
-//#elif defined(EIGEN_VECTORIZE_SSE) && (!EIGEN_ARCH_x86_64) && (!EIGEN_COMP_MSVC)
-#elif 0
-
-template <>
-struct type_casting_traits<Eigen::half, float> {
- enum {
- VectorizedCast = 1,
- SrcCoeffRatio = 1,
- TgtCoeffRatio = 1
- };
-};
-
-template<> EIGEN_STRONG_INLINE Packet4f pcast<Packet4h, Packet4f>(const Packet4h& a) {
- __int64_t a64 = _mm_cvtm64_si64(a.x);
- Eigen::half h = raw_uint16_to_half(static_cast<unsigned short>(a64));
- float f1 = static_cast<float>(h);
- h = raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));
- float f2 = static_cast<float>(h);
- h = raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));
- float f3 = static_cast<float>(h);
- h = raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));
- float f4 = static_cast<float>(h);
- return _mm_set_ps(f4, f3, f2, f1);
-}
-
-template <>
-struct type_casting_traits<float, Eigen::half> {
- enum {
- VectorizedCast = 1,
- SrcCoeffRatio = 1,
- TgtCoeffRatio = 1
- };
-};
-
-template<> EIGEN_STRONG_INLINE Packet4h pcast<Packet4f, Packet4h>(const Packet4f& a) {
- EIGEN_ALIGN16 float aux[4];
- pstore(aux, a);
- Eigen::half h0(aux[0]);
- Eigen::half h1(aux[1]);
- Eigen::half h2(aux[2]);
- Eigen::half h3(aux[3]);
-
- Packet4h result;
- result.x = _mm_set_pi16(h3.x, h2.x, h1.x, h0.x);
- return result;
-}
-
-#endif
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-#endif // EIGEN_TYPE_CASTING_CUDA_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/BFloat16.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/BFloat16.h
new file mode 100644
index 000000000..f21d1a0a3
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/BFloat16.h
@@ -0,0 +1,688 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef EIGEN_BFLOAT16_H
+#define EIGEN_BFLOAT16_H
+
+#define BF16_PACKET_FUNCTION(PACKET_F, PACKET_BF16, METHOD) \
+ template <> \
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED \
+ PACKET_BF16 METHOD<PACKET_BF16>(const PACKET_BF16& _x) { \
+ return F32ToBf16(METHOD<PACKET_F>(Bf16ToF32(_x))); \
+ }
+
+namespace Eigen {
+
+struct bfloat16;
+
+namespace bfloat16_impl {
+
+// Make our own __bfloat16_raw definition.
+struct __bfloat16_raw {
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __bfloat16_raw() : value(0) {}
+ explicit EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __bfloat16_raw(unsigned short raw) : value(raw) {}
+ unsigned short value;
+};
+
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __bfloat16_raw raw_uint16_to_bfloat16(unsigned short value);
+template <bool AssumeArgumentIsNormalOrInfinityOrZero>
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __bfloat16_raw float_to_bfloat16_rtne(float ff);
+// Forward declarations of template specializations, to avoid Visual C++ 2019 errors, saying:
+// > error C2908: explicit specialization; 'float_to_bfloat16_rtne' has already been instantiated
+template <>
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __bfloat16_raw float_to_bfloat16_rtne<false>(float ff);
+template <>
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __bfloat16_raw float_to_bfloat16_rtne<true>(float ff);
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC float bfloat16_to_float(__bfloat16_raw h);
+
+struct bfloat16_base : public __bfloat16_raw {
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bfloat16_base() {}
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bfloat16_base(const __bfloat16_raw& h) : __bfloat16_raw(h) {}
+};
+
+} // namespace bfloat16_impl
+
+// Class definition.
+struct bfloat16 : public bfloat16_impl::bfloat16_base {
+
+ typedef bfloat16_impl::__bfloat16_raw __bfloat16_raw;
+
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bfloat16() {}
+
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bfloat16(const __bfloat16_raw& h) : bfloat16_impl::bfloat16_base(h) {}
+
+ explicit EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bfloat16(bool b)
+ : bfloat16_impl::bfloat16_base(bfloat16_impl::raw_uint16_to_bfloat16(b ? 0x3f80 : 0)) {}
+
+ template<class T>
+ explicit EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bfloat16(T val)
+ : bfloat16_impl::bfloat16_base(bfloat16_impl::float_to_bfloat16_rtne<internal::is_integral<T>::value>(static_cast<float>(val))) {}
+
+ explicit EIGEN_DEVICE_FUNC bfloat16(float f)
+ : bfloat16_impl::bfloat16_base(bfloat16_impl::float_to_bfloat16_rtne<false>(f)) {}
+
+ // Following the convention of numpy, converting between complex and
+ // float will lead to loss of imag value.
+ template<typename RealScalar>
+ explicit EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bfloat16(const std::complex<RealScalar>& val)
+ : bfloat16_impl::bfloat16_base(bfloat16_impl::float_to_bfloat16_rtne<false>(static_cast<float>(val.real()))) {}
+
+ EIGEN_DEVICE_FUNC operator float() const { // NOLINT: Allow implicit conversion to float, because it is lossless.
+ return bfloat16_impl::bfloat16_to_float(*this);
+ }
+};
+} // namespace Eigen
+
+namespace std {
+template<>
+struct numeric_limits<Eigen::bfloat16> {
+ static const bool is_specialized = true;
+ static const bool is_signed = true;
+ static const bool is_integer = false;
+ static const bool is_exact = false;
+ static const bool has_infinity = true;
+ static const bool has_quiet_NaN = true;
+ static const bool has_signaling_NaN = true;
+ static const float_denorm_style has_denorm = std::denorm_absent;
+ static const bool has_denorm_loss = false;
+ static const std::float_round_style round_style = numeric_limits<float>::round_style;
+ static const bool is_iec559 = false;
+ static const bool is_bounded = true;
+ static const bool is_modulo = false;
+ static const int digits = 8;
+ static const int digits10 = 2;
+ static const int max_digits10 = 4;
+ static const int radix = 2;
+ static const int min_exponent = numeric_limits<float>::min_exponent;
+ static const int min_exponent10 = numeric_limits<float>::min_exponent10;
+ static const int max_exponent = numeric_limits<float>::max_exponent;
+ static const int max_exponent10 = numeric_limits<float>::max_exponent10;
+ static const bool traps = numeric_limits<float>::traps;
+ static const bool tinyness_before = numeric_limits<float>::tinyness_before;
+
+ static Eigen::bfloat16 (min)() { return Eigen::bfloat16_impl::raw_uint16_to_bfloat16(0x0080); }
+ static Eigen::bfloat16 lowest() { return Eigen::bfloat16_impl::raw_uint16_to_bfloat16(0xff7f); }
+ static Eigen::bfloat16 (max)() { return Eigen::bfloat16_impl::raw_uint16_to_bfloat16(0x7f7f); }
+ static Eigen::bfloat16 epsilon() { return Eigen::bfloat16_impl::raw_uint16_to_bfloat16(0x3c00); }
+ static Eigen::bfloat16 round_error() { return Eigen::bfloat16(0x3f00); }
+ static Eigen::bfloat16 infinity() { return Eigen::bfloat16_impl::raw_uint16_to_bfloat16(0x7f80); }
+ static Eigen::bfloat16 quiet_NaN() { return Eigen::bfloat16_impl::raw_uint16_to_bfloat16(0x7fc0); }
+ static Eigen::bfloat16 signaling_NaN() { return Eigen::bfloat16_impl::raw_uint16_to_bfloat16(0x7f81); }
+ static Eigen::bfloat16 denorm_min() { return Eigen::bfloat16_impl::raw_uint16_to_bfloat16(0x0001); }
+};
+
+// If std::numeric_limits<T> is specialized, should also specialize
+// std::numeric_limits<const T>, std::numeric_limits<volatile T>, and
+// std::numeric_limits<const volatile T>
+// https://stackoverflow.com/a/16519653/
+template<>
+struct numeric_limits<const Eigen::bfloat16> : numeric_limits<Eigen::bfloat16> {};
+template<>
+struct numeric_limits<volatile Eigen::bfloat16> : numeric_limits<Eigen::bfloat16> {};
+template<>
+struct numeric_limits<const volatile Eigen::bfloat16> : numeric_limits<Eigen::bfloat16> {};
+} // namespace std
+
+namespace Eigen {
+
+namespace bfloat16_impl {
+
+// We need to distinguish ‘clang as the CUDA compiler’ from ‘clang as the host compiler,
+// invoked by NVCC’ (e.g. on MacOS). The former needs to see both host and device implementation
+// of the functions, while the latter can only deal with one of them.
+#if !defined(EIGEN_HAS_NATIVE_BF16) || (EIGEN_COMP_CLANG && !EIGEN_COMP_NVCC) // Emulate support for bfloat16 floats
+
+#if EIGEN_COMP_CLANG && defined(EIGEN_CUDACC)
+// We need to provide emulated *host-side* BF16 operators for clang.
+#pragma push_macro("EIGEN_DEVICE_FUNC")
+#undef EIGEN_DEVICE_FUNC
+#if defined(EIGEN_HAS_CUDA_BF16) && defined(EIGEN_HAS_NATIVE_BF16)
+#define EIGEN_DEVICE_FUNC __host__
+#else // both host and device need emulated ops.
+#define EIGEN_DEVICE_FUNC __host__ __device__
+#endif
+#endif
+
+// Definitions for CPUs, mostly working through conversion
+// to/from fp32.
+
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator + (const bfloat16& a, const bfloat16& b) {
+ return bfloat16(float(a) + float(b));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator + (const bfloat16& a, const int& b) {
+ return bfloat16(float(a) + static_cast<float>(b));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator + (const int& a, const bfloat16& b) {
+ return bfloat16(static_cast<float>(a) + float(b));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator * (const bfloat16& a, const bfloat16& b) {
+ return bfloat16(float(a) * float(b));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator - (const bfloat16& a, const bfloat16& b) {
+ return bfloat16(float(a) - float(b));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator / (const bfloat16& a, const bfloat16& b) {
+ return bfloat16(float(a) / float(b));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator - (const bfloat16& a) {
+ bfloat16 result;
+ result.value = a.value ^ 0x8000;
+ return result;
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16& operator += (bfloat16& a, const bfloat16& b) {
+ a = bfloat16(float(a) + float(b));
+ return a;
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16& operator *= (bfloat16& a, const bfloat16& b) {
+ a = bfloat16(float(a) * float(b));
+ return a;
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16& operator -= (bfloat16& a, const bfloat16& b) {
+ a = bfloat16(float(a) - float(b));
+ return a;
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16& operator /= (bfloat16& a, const bfloat16& b) {
+ a = bfloat16(float(a) / float(b));
+ return a;
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator++(bfloat16& a) {
+ a += bfloat16(1);
+ return a;
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator--(bfloat16& a) {
+ a -= bfloat16(1);
+ return a;
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator++(bfloat16& a, int) {
+ bfloat16 original_value = a;
+ ++a;
+ return original_value;
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator--(bfloat16& a, int) {
+ bfloat16 original_value = a;
+ --a;
+ return original_value;
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator == (const bfloat16& a, const bfloat16& b) {
+ return numext::equal_strict(float(a),float(b));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator != (const bfloat16& a, const bfloat16& b) {
+ return numext::not_equal_strict(float(a), float(b));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator < (const bfloat16& a, const bfloat16& b) {
+ return float(a) < float(b);
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator <= (const bfloat16& a, const bfloat16& b) {
+ return float(a) <= float(b);
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator > (const bfloat16& a, const bfloat16& b) {
+ return float(a) > float(b);
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator >= (const bfloat16& a, const bfloat16& b) {
+ return float(a) >= float(b);
+}
+
+#if EIGEN_COMP_CLANG && defined(EIGEN_CUDACC)
+#pragma pop_macro("EIGEN_DEVICE_FUNC")
+#endif
+#endif // Emulate support for bfloat16 floats
+
+// Division by an index. Do it in full float precision to avoid accuracy
+// issues in converting the denominator to bfloat16.
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator / (const bfloat16& a, Index b) {
+ return bfloat16(static_cast<float>(a) / static_cast<float>(b));
+}
+
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __bfloat16_raw truncate_to_bfloat16(const float v) {
+ __bfloat16_raw output;
+ if (Eigen::numext::isnan EIGEN_NOT_A_MACRO(v)) {
+ output.value = std::signbit(v) ? 0xFFC0: 0x7FC0;
+ return output;
+ }
+ output.value = static_cast<numext::uint16_t>(numext::bit_cast<numext::uint32_t>(v) >> 16);
+ return output;
+}
+
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __bfloat16_raw raw_uint16_to_bfloat16(numext::uint16_t value) {
+ return __bfloat16_raw(value);
+}
+
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR numext::uint16_t raw_bfloat16_as_uint16(const __bfloat16_raw& bf) {
+ return bf.value;
+}
+
+// float_to_bfloat16_rtne template specialization that does not make any
+// assumption about the value of its function argument (ff).
+template <>
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __bfloat16_raw float_to_bfloat16_rtne<false>(float ff) {
+#if (defined(EIGEN_HAS_CUDA_BF16) && defined(EIGEN_HAS_HIP_BF16))
+ // Nothing to do here
+#else
+ __bfloat16_raw output;
+
+ if (Eigen::numext::isnan EIGEN_NOT_A_MACRO(ff)) {
+ // If the value is a NaN, squash it to a qNaN with msb of fraction set,
+ // this makes sure after truncation we don't end up with an inf.
+ //
+ // qNaN magic: All exponent bits set + most significant bit of fraction
+ // set.
+ output.value = std::signbit(ff) ? 0xFFC0: 0x7FC0;
+ } else {
+ // Fast rounding algorithm that rounds a half value to nearest even. This
+ // reduces expected error when we convert a large number of floats. Here
+ // is how it works:
+ //
+ // Definitions:
+ // To convert a float 32 to bfloat16, a float 32 can be viewed as 32 bits
+ // with the following tags:
+ //
+ // Sign | Exp (8 bits) | Frac (23 bits)
+ // S EEEEEEEE FFFFFFLRTTTTTTTTTTTTTTT
+ //
+ // S: Sign bit.
+ // E: Exponent bits.
+ // F: First 6 bits of fraction.
+ // L: Least significant bit of resulting bfloat16 if we truncate away the
+ // rest of the float32. This is also the 7th bit of fraction
+ // R: Rounding bit, 8th bit of fraction.
+ // T: Sticky bits, rest of fraction, 15 bits.
+ //
+ // To round half to nearest even, there are 3 cases where we want to round
+ // down (simply truncate the result of the bits away, which consists of
+ // rounding bit and sticky bits) and two cases where we want to round up
+ // (truncate then add one to the result).
+ //
+ // The fast converting algorithm simply adds lsb (L) to 0x7fff (15 bits of
+ // 1s) as the rounding bias, adds the rounding bias to the input, then
+ // truncates the last 16 bits away.
+ //
+ // To understand how it works, we can analyze this algorithm case by case:
+ //
+ // 1. L = 0, R = 0:
+ // Expect: round down, this is less than half value.
+ //
+ // Algorithm:
+ // - Rounding bias: 0x7fff + 0 = 0x7fff
+ // - Adding rounding bias to input may create any carry, depending on
+ // whether there is any value set to 1 in T bits.
+ // - R may be set to 1 if there is a carry.
+ // - L remains 0.
+ // - Note that this case also handles Inf and -Inf, where all fraction
+ // bits, including L, R and Ts are all 0. The output remains Inf after
+ // this algorithm.
+ //
+ // 2. L = 1, R = 0:
+ // Expect: round down, this is less than half value.
+ //
+ // Algorithm:
+ // - Rounding bias: 0x7fff + 1 = 0x8000
+ // - Adding rounding bias to input doesn't change sticky bits but
+ // adds 1 to rounding bit.
+ // - L remains 1.
+ //
+ // 3. L = 0, R = 1, all of T are 0:
+ // Expect: round down, this is exactly at half, the result is already
+ // even (L=0).
+ //
+ // Algorithm:
+ // - Rounding bias: 0x7fff + 0 = 0x7fff
+ // - Adding rounding bias to input sets all sticky bits to 1, but
+ // doesn't create a carry.
+ // - R remains 1.
+ // - L remains 0.
+ //
+ // 4. L = 1, R = 1:
+ // Expect: round up, this is exactly at half, the result needs to be
+ // round to the next even number.
+ //
+ // Algorithm:
+ // - Rounding bias: 0x7fff + 1 = 0x8000
+ // - Adding rounding bias to input doesn't change sticky bits, but
+ // creates a carry from rounding bit.
+ // - The carry sets L to 0, creates another carry bit and propagate
+ // forward to F bits.
+ // - If all the F bits are 1, a carry then propagates to the exponent
+ // bits, which then creates the minimum value with the next exponent
+ // value. Note that we won't have the case where exponents are all 1,
+ // since that's either a NaN (handled in the other if condition) or inf
+ // (handled in case 1).
+ //
+ // 5. L = 0, R = 1, any of T is 1:
+ // Expect: round up, this is greater than half.
+ //
+ // Algorithm:
+ // - Rounding bias: 0x7fff + 0 = 0x7fff
+ // - Adding rounding bias to input creates a carry from sticky bits,
+ // sets rounding bit to 0, then create another carry.
+ // - The second carry sets L to 1.
+ //
+ // Examples:
+ //
+ // Exact half value that is already even:
+ // Input:
+ // Sign | Exp (8 bit) | Frac (first 7 bit) | Frac (last 16 bit)
+ // S E E E E E E E E F F F F F F L RTTTTTTTTTTTTTTT
+ // 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1000000000000000
+ //
+ // This falls into case 3. We truncate the rest of 16 bits and no
+ // carry is created into F and L:
+ //
+ // Output:
+ // Sign | Exp (8 bit) | Frac (first 7 bit)
+ // S E E E E E E E E F F F F F F L
+ // 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0
+ //
+ // Exact half value, round to next even number:
+ // Input:
+ // Sign | Exp (8 bit) | Frac (first 7 bit) | Frac (last 16 bit)
+ // S E E E E E E E E F F F F F F L RTTTTTTTTTTTTTTT
+ // 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1000000000000000
+ //
+ // This falls into case 4. We create a carry from R and T,
+ // which then propagates into L and F:
+ //
+ // Output:
+ // Sign | Exp (8 bit) | Frac (first 7 bit)
+ // S E E E E E E E E F F F F F F L
+ // 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0
+ //
+ //
+ // Max denormal value round to min normal value:
+ // Input:
+ // Sign | Exp (8 bit) | Frac (first 7 bit) | Frac (last 16 bit)
+ // S E E E E E E E E F F F F F F L RTTTTTTTTTTTTTTT
+ // 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1111111111111111
+ //
+ // This falls into case 4. We create a carry from R and T,
+ // propagate into L and F, which then propagates into exponent
+ // bits:
+ //
+ // Output:
+ // Sign | Exp (8 bit) | Frac (first 7 bit)
+ // S E E E E E E E E F F F F F F L
+ // 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0
+ //
+ // Max normal value round to Inf:
+ // Input:
+ // Sign | Exp (8 bit) | Frac (first 7 bit) | Frac (last 16 bit)
+ // S E E E E E E E E F F F F F F L RTTTTTTTTTTTTTTT
+ // 0 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1111111111111111
+ //
+ // This falls into case 4. We create a carry from R and T,
+ // propagate into L and F, which then propagates into exponent
+ // bits:
+ //
+ // Sign | Exp (8 bit) | Frac (first 7 bit)
+ // S E E E E E E E E F F F F F F L
+ // 0 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0
+
+ // At this point, ff must be either a normal float, or +/-infinity.
+ output = float_to_bfloat16_rtne<true>(ff);
+ }
+ return output;
+#endif
+}
+
+// float_to_bfloat16_rtne template specialization that assumes that its function
+// argument (ff) is either a normal floating point number, or +/-infinity, or
+// zero. Used to improve the runtime performance of conversion from an integer
+// type to bfloat16.
+template <>
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __bfloat16_raw float_to_bfloat16_rtne<true>(float ff) {
+#if (defined(EIGEN_HAS_CUDA_BF16) && defined(EIGEN_HAS_HIP_BF16))
+ // Nothing to do here
+#else
+ numext::uint32_t input = numext::bit_cast<numext::uint32_t>(ff);
+ __bfloat16_raw output;
+
+ // Least significant bit of resulting bfloat.
+ numext::uint32_t lsb = (input >> 16) & 1;
+ numext::uint32_t rounding_bias = 0x7fff + lsb;
+ input += rounding_bias;
+ output.value = static_cast<numext::uint16_t>(input >> 16);
+ return output;
+#endif
+}
+
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC float bfloat16_to_float(__bfloat16_raw h) {
+ return numext::bit_cast<float>(static_cast<numext::uint32_t>(h.value) << 16);
+}
+// --- standard functions ---
+
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isinf)(const bfloat16& a) {
+ EIGEN_USING_STD(isinf);
+ return (isinf)(float(a));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isnan)(const bfloat16& a) {
+ EIGEN_USING_STD(isnan);
+ return (isnan)(float(a));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isfinite)(const bfloat16& a) {
+ return !(isinf EIGEN_NOT_A_MACRO (a)) && !(isnan EIGEN_NOT_A_MACRO (a));
+}
+
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 abs(const bfloat16& a) {
+ bfloat16 result;
+ result.value = a.value & 0x7FFF;
+ return result;
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 exp(const bfloat16& a) {
+ return bfloat16(::expf(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 expm1(const bfloat16& a) {
+ return bfloat16(numext::expm1(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 log(const bfloat16& a) {
+ return bfloat16(::logf(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 log1p(const bfloat16& a) {
+ return bfloat16(numext::log1p(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 log10(const bfloat16& a) {
+ return bfloat16(::log10f(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 log2(const bfloat16& a) {
+ return bfloat16(static_cast<float>(EIGEN_LOG2E) * ::logf(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 sqrt(const bfloat16& a) {
+ return bfloat16(::sqrtf(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 pow(const bfloat16& a, const bfloat16& b) {
+ return bfloat16(::powf(float(a), float(b)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 sin(const bfloat16& a) {
+ return bfloat16(::sinf(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 cos(const bfloat16& a) {
+ return bfloat16(::cosf(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 tan(const bfloat16& a) {
+ return bfloat16(::tanf(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 asin(const bfloat16& a) {
+ return bfloat16(::asinf(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 acos(const bfloat16& a) {
+ return bfloat16(::acosf(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 atan(const bfloat16& a) {
+ return bfloat16(::atanf(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 sinh(const bfloat16& a) {
+ return bfloat16(::sinhf(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 cosh(const bfloat16& a) {
+ return bfloat16(::coshf(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 tanh(const bfloat16& a) {
+ return bfloat16(::tanhf(float(a)));
+}
+#if EIGEN_HAS_CXX11_MATH
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 asinh(const bfloat16& a) {
+ return bfloat16(::asinhf(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 acosh(const bfloat16& a) {
+ return bfloat16(::acoshf(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 atanh(const bfloat16& a) {
+ return bfloat16(::atanhf(float(a)));
+}
+#endif
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 floor(const bfloat16& a) {
+ return bfloat16(::floorf(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 ceil(const bfloat16& a) {
+ return bfloat16(::ceilf(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 rint(const bfloat16& a) {
+ return bfloat16(::rintf(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 round(const bfloat16& a) {
+ return bfloat16(::roundf(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 fmod(const bfloat16& a, const bfloat16& b) {
+ return bfloat16(::fmodf(float(a), float(b)));
+}
+
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 (min)(const bfloat16& a, const bfloat16& b) {
+ const float f1 = static_cast<float>(a);
+ const float f2 = static_cast<float>(b);
+ return f2 < f1 ? b : a;
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 (max)(const bfloat16& a, const bfloat16& b) {
+ const float f1 = static_cast<float>(a);
+ const float f2 = static_cast<float>(b);
+ return f1 < f2 ? b : a;
+}
+
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 fmin(const bfloat16& a, const bfloat16& b) {
+ const float f1 = static_cast<float>(a);
+ const float f2 = static_cast<float>(b);
+ return bfloat16(::fminf(f1, f2));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 fmax(const bfloat16& a, const bfloat16& b) {
+ const float f1 = static_cast<float>(a);
+ const float f2 = static_cast<float>(b);
+ return bfloat16(::fmaxf(f1, f2));
+}
+
+#ifndef EIGEN_NO_IO
+EIGEN_ALWAYS_INLINE std::ostream& operator << (std::ostream& os, const bfloat16& v) {
+ os << static_cast<float>(v);
+ return os;
+}
+#endif
+
+} // namespace bfloat16_impl
+
+namespace internal {
+
+template<>
+struct random_default_impl<bfloat16, false, false>
+{
+ static inline bfloat16 run(const bfloat16& x, const bfloat16& y)
+ {
+ return x + (y-x) * bfloat16(float(std::rand()) / float(RAND_MAX));
+ }
+ static inline bfloat16 run()
+ {
+ return run(bfloat16(-1.f), bfloat16(1.f));
+ }
+};
+
+template<> struct is_arithmetic<bfloat16> { enum { value = true }; };
+
+} // namespace internal
+
+template<> struct NumTraits<Eigen::bfloat16>
+ : GenericNumTraits<Eigen::bfloat16>
+{
+ enum {
+ IsSigned = true,
+ IsInteger = false,
+ IsComplex = false,
+ RequireInitialization = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::bfloat16 epsilon() {
+ return bfloat16_impl::raw_uint16_to_bfloat16(0x3c00);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::bfloat16 dummy_precision() {
+ return bfloat16_impl::raw_uint16_to_bfloat16(0x3D4D); // bfloat16(5e-2f);
+
+ }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::bfloat16 highest() {
+ return bfloat16_impl::raw_uint16_to_bfloat16(0x7F7F);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::bfloat16 lowest() {
+ return bfloat16_impl::raw_uint16_to_bfloat16(0xFF7F);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::bfloat16 infinity() {
+ return bfloat16_impl::raw_uint16_to_bfloat16(0x7f80);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::bfloat16 quiet_NaN() {
+ return bfloat16_impl::raw_uint16_to_bfloat16(0x7fc0);
+ }
+};
+
+} // namespace Eigen
+
+namespace Eigen {
+namespace numext {
+
+template<>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+bool (isnan)(const Eigen::bfloat16& h) {
+ return (bfloat16_impl::isnan)(h);
+}
+
+template<>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+bool (isinf)(const Eigen::bfloat16& h) {
+ return (bfloat16_impl::isinf)(h);
+}
+
+template<>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+bool (isfinite)(const Eigen::bfloat16& h) {
+ return (bfloat16_impl::isfinite)(h);
+}
+
+template <>
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::bfloat16 bit_cast<Eigen::bfloat16, uint16_t>(const uint16_t& src) {
+ return Eigen::bfloat16(Eigen::bfloat16_impl::raw_uint16_to_bfloat16(src));
+}
+
+template <>
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC uint16_t bit_cast<uint16_t, Eigen::bfloat16>(const Eigen::bfloat16& src) {
+ return Eigen::bfloat16_impl::raw_bfloat16_as_uint16(src);
+}
+
+} // namespace numext
+} // namespace Eigen
+
+#if EIGEN_HAS_STD_HASH
+namespace std {
+template <>
+struct hash<Eigen::bfloat16> {
+ EIGEN_STRONG_INLINE std::size_t operator()(const Eigen::bfloat16& a) const {
+ return static_cast<std::size_t>(Eigen::numext::bit_cast<Eigen::numext::uint16_t>(a));
+ }
+};
+} // namespace std
+#endif
+
+
+#endif // EIGEN_BFLOAT16_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/ConjHelper.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/ConjHelper.h
index 4cfe34e05..53830b5a2 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/ConjHelper.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/ConjHelper.h
@@ -11,19 +11,107 @@
#ifndef EIGEN_ARCH_CONJ_HELPER_H
#define EIGEN_ARCH_CONJ_HELPER_H
-#define EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(PACKET_CPLX, PACKET_REAL) \
- template<> struct conj_helper<PACKET_REAL, PACKET_CPLX, false,false> { \
- EIGEN_STRONG_INLINE PACKET_CPLX pmadd(const PACKET_REAL& x, const PACKET_CPLX& y, const PACKET_CPLX& c) const \
- { return padd(c, pmul(x,y)); } \
- EIGEN_STRONG_INLINE PACKET_CPLX pmul(const PACKET_REAL& x, const PACKET_CPLX& y) const \
- { return PACKET_CPLX(Eigen::internal::pmul<PACKET_REAL>(x, y.v)); } \
- }; \
- \
- template<> struct conj_helper<PACKET_CPLX, PACKET_REAL, false,false> { \
- EIGEN_STRONG_INLINE PACKET_CPLX pmadd(const PACKET_CPLX& x, const PACKET_REAL& y, const PACKET_CPLX& c) const \
- { return padd(c, pmul(x,y)); } \
- EIGEN_STRONG_INLINE PACKET_CPLX pmul(const PACKET_CPLX& x, const PACKET_REAL& y) const \
- { return PACKET_CPLX(Eigen::internal::pmul<PACKET_REAL>(x.v, y)); } \
+#define EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(PACKET_CPLX, PACKET_REAL) \
+ template <> \
+ struct conj_helper<PACKET_REAL, PACKET_CPLX, false, false> { \
+ EIGEN_STRONG_INLINE PACKET_CPLX pmadd(const PACKET_REAL& x, \
+ const PACKET_CPLX& y, \
+ const PACKET_CPLX& c) const { \
+ return padd(c, this->pmul(x, y)); \
+ } \
+ EIGEN_STRONG_INLINE PACKET_CPLX pmul(const PACKET_REAL& x, \
+ const PACKET_CPLX& y) const { \
+ return PACKET_CPLX(Eigen::internal::pmul<PACKET_REAL>(x, y.v)); \
+ } \
+ }; \
+ \
+ template <> \
+ struct conj_helper<PACKET_CPLX, PACKET_REAL, false, false> { \
+ EIGEN_STRONG_INLINE PACKET_CPLX pmadd(const PACKET_CPLX& x, \
+ const PACKET_REAL& y, \
+ const PACKET_CPLX& c) const { \
+ return padd(c, this->pmul(x, y)); \
+ } \
+ EIGEN_STRONG_INLINE PACKET_CPLX pmul(const PACKET_CPLX& x, \
+ const PACKET_REAL& y) const { \
+ return PACKET_CPLX(Eigen::internal::pmul<PACKET_REAL>(x.v, y)); \
+ } \
};
-#endif // EIGEN_ARCH_CONJ_HELPER_H
+namespace Eigen {
+namespace internal {
+
+template<bool Conjugate> struct conj_if;
+
+template<> struct conj_if<true> {
+ template<typename T>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& x) const { return numext::conj(x); }
+ template<typename T>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T pconj(const T& x) const { return internal::pconj(x); }
+};
+
+template<> struct conj_if<false> {
+ template<typename T>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const T& operator()(const T& x) const { return x; }
+ template<typename T>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const T& pconj(const T& x) const { return x; }
+};
+
+// Generic Implementation, assume scalars since the packet-version is
+// specialized below.
+template<typename LhsType, typename RhsType, bool ConjLhs, bool ConjRhs>
+struct conj_helper {
+ typedef typename ScalarBinaryOpTraits<LhsType, RhsType>::ReturnType ResultType;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ResultType
+ pmadd(const LhsType& x, const RhsType& y, const ResultType& c) const
+ { return this->pmul(x, y) + c; }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ResultType
+ pmul(const LhsType& x, const RhsType& y) const
+ { return conj_if<ConjLhs>()(x) * conj_if<ConjRhs>()(y); }
+};
+
+template<typename LhsScalar, typename RhsScalar>
+struct conj_helper<LhsScalar, RhsScalar, true, true> {
+ typedef typename ScalarBinaryOpTraits<LhsScalar,RhsScalar>::ReturnType ResultType;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ResultType
+ pmadd(const LhsScalar& x, const RhsScalar& y, const ResultType& c) const
+ { return this->pmul(x, y) + c; }
+
+ // We save a conjuation by using the identity conj(a)*conj(b) = conj(a*b).
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ResultType
+ pmul(const LhsScalar& x, const RhsScalar& y) const
+ { return numext::conj(x * y); }
+};
+
+// Implementation with equal type, use packet operations.
+template<typename Packet, bool ConjLhs, bool ConjRhs>
+struct conj_helper<Packet, Packet, ConjLhs, ConjRhs>
+{
+ typedef Packet ResultType;
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet pmadd(const Packet& x, const Packet& y, const Packet& c) const
+ { return Eigen::internal::pmadd(conj_if<ConjLhs>().pconj(x), conj_if<ConjRhs>().pconj(y), c); }
+
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet pmul(const Packet& x, const Packet& y) const
+ { return Eigen::internal::pmul(conj_if<ConjLhs>().pconj(x), conj_if<ConjRhs>().pconj(y)); }
+};
+
+template<typename Packet>
+struct conj_helper<Packet, Packet, true, true>
+{
+ typedef Packet ResultType;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet pmadd(const Packet& x, const Packet& y, const Packet& c) const
+ { return Eigen::internal::pmadd(pconj(x), pconj(y), c); }
+ // We save a conjuation by using the identity conj(a)*conj(b) = conj(a*b).
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet pmul(const Packet& x, const Packet& y) const
+ { return pconj(Eigen::internal::pmul(x, y)); }
+};
+
+} // namespace internal
+} // namespace Eigen
+
+#endif // EIGEN_ARCH_CONJ_HELPER_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h
new file mode 100644
index 000000000..c9fbaf68b
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h
@@ -0,0 +1,1649 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2007 Julien Pommier
+// Copyright (C) 2014 Pedro Gonnet (pedro.gonnet@gmail.com)
+// Copyright (C) 2009-2019 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/* The exp and log functions of this file initially come from
+ * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/
+ */
+
+#ifndef EIGEN_ARCH_GENERIC_PACKET_MATH_FUNCTIONS_H
+#define EIGEN_ARCH_GENERIC_PACKET_MATH_FUNCTIONS_H
+
+namespace Eigen {
+namespace internal {
+
+// Creates a Scalar integer type with same bit-width.
+template<typename T> struct make_integer;
+template<> struct make_integer<float> { typedef numext::int32_t type; };
+template<> struct make_integer<double> { typedef numext::int64_t type; };
+template<> struct make_integer<half> { typedef numext::int16_t type; };
+template<> struct make_integer<bfloat16> { typedef numext::int16_t type; };
+
+template<typename Packet> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
+Packet pfrexp_generic_get_biased_exponent(const Packet& a) {
+ typedef typename unpacket_traits<Packet>::type Scalar;
+ typedef typename unpacket_traits<Packet>::integer_packet PacketI;
+ enum { mantissa_bits = numext::numeric_limits<Scalar>::digits - 1};
+ return pcast<PacketI, Packet>(plogical_shift_right<mantissa_bits>(preinterpret<PacketI>(pabs(a))));
+}
+
+// Safely applies frexp, correctly handles denormals.
+// Assumes IEEE floating point format.
+template<typename Packet> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
+Packet pfrexp_generic(const Packet& a, Packet& exponent) {
+ typedef typename unpacket_traits<Packet>::type Scalar;
+ typedef typename make_unsigned<typename make_integer<Scalar>::type>::type ScalarUI;
+ enum {
+ TotalBits = sizeof(Scalar) * CHAR_BIT,
+ MantissaBits = numext::numeric_limits<Scalar>::digits - 1,
+ ExponentBits = int(TotalBits) - int(MantissaBits) - 1
+ };
+
+ EIGEN_CONSTEXPR ScalarUI scalar_sign_mantissa_mask =
+ ~(((ScalarUI(1) << int(ExponentBits)) - ScalarUI(1)) << int(MantissaBits)); // ~0x7f800000
+ const Packet sign_mantissa_mask = pset1frombits<Packet>(static_cast<ScalarUI>(scalar_sign_mantissa_mask));
+ const Packet half = pset1<Packet>(Scalar(0.5));
+ const Packet zero = pzero(a);
+ const Packet normal_min = pset1<Packet>((numext::numeric_limits<Scalar>::min)()); // Minimum normal value, 2^-126
+
+ // To handle denormals, normalize by multiplying by 2^(int(MantissaBits)+1).
+ const Packet is_denormal = pcmp_lt(pabs(a), normal_min);
+ EIGEN_CONSTEXPR ScalarUI scalar_normalization_offset = ScalarUI(int(MantissaBits) + 1); // 24
+ // The following cannot be constexpr because bfloat16(uint16_t) is not constexpr.
+ const Scalar scalar_normalization_factor = Scalar(ScalarUI(1) << int(scalar_normalization_offset)); // 2^24
+ const Packet normalization_factor = pset1<Packet>(scalar_normalization_factor);
+ const Packet normalized_a = pselect(is_denormal, pmul(a, normalization_factor), a);
+
+ // Determine exponent offset: -126 if normal, -126-24 if denormal
+ const Scalar scalar_exponent_offset = -Scalar((ScalarUI(1)<<(int(ExponentBits)-1)) - ScalarUI(2)); // -126
+ Packet exponent_offset = pset1<Packet>(scalar_exponent_offset);
+ const Packet normalization_offset = pset1<Packet>(-Scalar(scalar_normalization_offset)); // -24
+ exponent_offset = pselect(is_denormal, padd(exponent_offset, normalization_offset), exponent_offset);
+
+ // Determine exponent and mantissa from normalized_a.
+ exponent = pfrexp_generic_get_biased_exponent(normalized_a);
+ // Zero, Inf and NaN return 'a' unmodified, exponent is zero
+ // (technically the exponent is unspecified for inf/NaN, but GCC/Clang set it to zero)
+ const Scalar scalar_non_finite_exponent = Scalar((ScalarUI(1) << int(ExponentBits)) - ScalarUI(1)); // 255
+ const Packet non_finite_exponent = pset1<Packet>(scalar_non_finite_exponent);
+ const Packet is_zero_or_not_finite = por(pcmp_eq(a, zero), pcmp_eq(exponent, non_finite_exponent));
+ const Packet m = pselect(is_zero_or_not_finite, a, por(pand(normalized_a, sign_mantissa_mask), half));
+ exponent = pselect(is_zero_or_not_finite, zero, padd(exponent, exponent_offset));
+ return m;
+}
+
+// Safely applies ldexp, correctly handles overflows, underflows and denormals.
+// Assumes IEEE floating point format.
+template<typename Packet> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
+Packet pldexp_generic(const Packet& a, const Packet& exponent) {
+ // We want to return a * 2^exponent, allowing for all possible integer
+ // exponents without overflowing or underflowing in intermediate
+ // computations.
+ //
+ // Since 'a' and the output can be denormal, the maximum range of 'exponent'
+ // to consider for a float is:
+ // -255-23 -> 255+23
+ // Below -278 any finite float 'a' will become zero, and above +278 any
+ // finite float will become inf, including when 'a' is the smallest possible
+ // denormal.
+ //
+ // Unfortunately, 2^(278) cannot be represented using either one or two
+ // finite normal floats, so we must split the scale factor into at least
+ // three parts. It turns out to be faster to split 'exponent' into four
+ // factors, since [exponent>>2] is much faster to compute that [exponent/3].
+ //
+ // Set e = min(max(exponent, -278), 278);
+ // b = floor(e/4);
+ // out = ((((a * 2^(b)) * 2^(b)) * 2^(b)) * 2^(e-3*b))
+ //
+ // This will avoid any intermediate overflows and correctly handle 0, inf,
+ // NaN cases.
+ typedef typename unpacket_traits<Packet>::integer_packet PacketI;
+ typedef typename unpacket_traits<Packet>::type Scalar;
+ typedef typename unpacket_traits<PacketI>::type ScalarI;
+ enum {
+ TotalBits = sizeof(Scalar) * CHAR_BIT,
+ MantissaBits = numext::numeric_limits<Scalar>::digits - 1,
+ ExponentBits = int(TotalBits) - int(MantissaBits) - 1
+ };
+
+ const Packet max_exponent = pset1<Packet>(Scalar((ScalarI(1)<<int(ExponentBits)) + ScalarI(int(MantissaBits) - 1))); // 278
+ const PacketI bias = pset1<PacketI>((ScalarI(1)<<(int(ExponentBits)-1)) - ScalarI(1)); // 127
+ const PacketI e = pcast<Packet, PacketI>(pmin(pmax(exponent, pnegate(max_exponent)), max_exponent));
+ PacketI b = parithmetic_shift_right<2>(e); // floor(e/4);
+ Packet c = preinterpret<Packet>(plogical_shift_left<int(MantissaBits)>(padd(b, bias))); // 2^b
+ Packet out = pmul(pmul(pmul(a, c), c), c); // a * 2^(3b)
+ b = psub(psub(psub(e, b), b), b); // e - 3b
+ c = preinterpret<Packet>(plogical_shift_left<int(MantissaBits)>(padd(b, bias))); // 2^(e-3*b)
+ out = pmul(out, c);
+ return out;
+}
+
+// Explicitly multiplies
+// a * (2^e)
+// clamping e to the range
+// [NumTraits<Scalar>::min_exponent()-2, NumTraits<Scalar>::max_exponent()]
+//
+// This is approx 7x faster than pldexp_impl, but will prematurely over/underflow
+// if 2^e doesn't fit into a normal floating-point Scalar.
+//
+// Assumes IEEE floating point format
+template<typename Packet>
+struct pldexp_fast_impl {
+ typedef typename unpacket_traits<Packet>::integer_packet PacketI;
+ typedef typename unpacket_traits<Packet>::type Scalar;
+ typedef typename unpacket_traits<PacketI>::type ScalarI;
+ enum {
+ TotalBits = sizeof(Scalar) * CHAR_BIT,
+ MantissaBits = numext::numeric_limits<Scalar>::digits - 1,
+ ExponentBits = int(TotalBits) - int(MantissaBits) - 1
+ };
+
+ static EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
+ Packet run(const Packet& a, const Packet& exponent) {
+ const Packet bias = pset1<Packet>(Scalar((ScalarI(1)<<(int(ExponentBits)-1)) - ScalarI(1))); // 127
+ const Packet limit = pset1<Packet>(Scalar((ScalarI(1)<<int(ExponentBits)) - ScalarI(1))); // 255
+ // restrict biased exponent between 0 and 255 for float.
+ const PacketI e = pcast<Packet, PacketI>(pmin(pmax(padd(exponent, bias), pzero(limit)), limit)); // exponent + 127
+ // return a * (2^e)
+ return pmul(a, preinterpret<Packet>(plogical_shift_left<int(MantissaBits)>(e)));
+ }
+};
+
+// Natural or base 2 logarithm.
+// Computes log(x) as log(2^e * m) = C*e + log(m), where the constant C =log(2)
+// and m is in the range [sqrt(1/2),sqrt(2)). In this range, the logarithm can
+// be easily approximated by a polynomial centered on m=1 for stability.
+// TODO(gonnet): Further reduce the interval allowing for lower-degree
+// polynomial interpolants -> ... -> profit!
+template <typename Packet, bool base2>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+EIGEN_UNUSED
+Packet plog_impl_float(const Packet _x)
+{
+ Packet x = _x;
+
+ const Packet cst_1 = pset1<Packet>(1.0f);
+ const Packet cst_neg_half = pset1<Packet>(-0.5f);
+ // The smallest non denormalized float number.
+ const Packet cst_min_norm_pos = pset1frombits<Packet>( 0x00800000u);
+ const Packet cst_minus_inf = pset1frombits<Packet>( 0xff800000u);
+ const Packet cst_pos_inf = pset1frombits<Packet>( 0x7f800000u);
+
+ // Polynomial coefficients.
+ const Packet cst_cephes_SQRTHF = pset1<Packet>(0.707106781186547524f);
+ const Packet cst_cephes_log_p0 = pset1<Packet>(7.0376836292E-2f);
+ const Packet cst_cephes_log_p1 = pset1<Packet>(-1.1514610310E-1f);
+ const Packet cst_cephes_log_p2 = pset1<Packet>(1.1676998740E-1f);
+ const Packet cst_cephes_log_p3 = pset1<Packet>(-1.2420140846E-1f);
+ const Packet cst_cephes_log_p4 = pset1<Packet>(+1.4249322787E-1f);
+ const Packet cst_cephes_log_p5 = pset1<Packet>(-1.6668057665E-1f);
+ const Packet cst_cephes_log_p6 = pset1<Packet>(+2.0000714765E-1f);
+ const Packet cst_cephes_log_p7 = pset1<Packet>(-2.4999993993E-1f);
+ const Packet cst_cephes_log_p8 = pset1<Packet>(+3.3333331174E-1f);
+
+ // Truncate input values to the minimum positive normal.
+ x = pmax(x, cst_min_norm_pos);
+
+ Packet e;
+ // extract significant in the range [0.5,1) and exponent
+ x = pfrexp(x,e);
+
+ // part2: Shift the inputs from the range [0.5,1) to [sqrt(1/2),sqrt(2))
+ // and shift by -1. The values are then centered around 0, which improves
+ // the stability of the polynomial evaluation.
+ // if( x < SQRTHF ) {
+ // e -= 1;
+ // x = x + x - 1.0;
+ // } else { x = x - 1.0; }
+ Packet mask = pcmp_lt(x, cst_cephes_SQRTHF);
+ Packet tmp = pand(x, mask);
+ x = psub(x, cst_1);
+ e = psub(e, pand(cst_1, mask));
+ x = padd(x, tmp);
+
+ Packet x2 = pmul(x, x);
+ Packet x3 = pmul(x2, x);
+
+ // Evaluate the polynomial approximant of degree 8 in three parts, probably
+ // to improve instruction-level parallelism.
+ Packet y, y1, y2;
+ y = pmadd(cst_cephes_log_p0, x, cst_cephes_log_p1);
+ y1 = pmadd(cst_cephes_log_p3, x, cst_cephes_log_p4);
+ y2 = pmadd(cst_cephes_log_p6, x, cst_cephes_log_p7);
+ y = pmadd(y, x, cst_cephes_log_p2);
+ y1 = pmadd(y1, x, cst_cephes_log_p5);
+ y2 = pmadd(y2, x, cst_cephes_log_p8);
+ y = pmadd(y, x3, y1);
+ y = pmadd(y, x3, y2);
+ y = pmul(y, x3);
+
+ y = pmadd(cst_neg_half, x2, y);
+ x = padd(x, y);
+
+ // Add the logarithm of the exponent back to the result of the interpolation.
+ if (base2) {
+ const Packet cst_log2e = pset1<Packet>(static_cast<float>(EIGEN_LOG2E));
+ x = pmadd(x, cst_log2e, e);
+ } else {
+ const Packet cst_ln2 = pset1<Packet>(static_cast<float>(EIGEN_LN2));
+ x = pmadd(e, cst_ln2, x);
+ }
+
+ Packet invalid_mask = pcmp_lt_or_nan(_x, pzero(_x));
+ Packet iszero_mask = pcmp_eq(_x,pzero(_x));
+ Packet pos_inf_mask = pcmp_eq(_x,cst_pos_inf);
+ // Filter out invalid inputs, i.e.:
+ // - negative arg will be NAN
+ // - 0 will be -INF
+ // - +INF will be +INF
+ return pselect(iszero_mask, cst_minus_inf,
+ por(pselect(pos_inf_mask,cst_pos_inf,x), invalid_mask));
+}
+
+template <typename Packet>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+EIGEN_UNUSED
+Packet plog_float(const Packet _x)
+{
+ return plog_impl_float<Packet, /* base2 */ false>(_x);
+}
+
+template <typename Packet>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+EIGEN_UNUSED
+Packet plog2_float(const Packet _x)
+{
+ return plog_impl_float<Packet, /* base2 */ true>(_x);
+}
+
+/* Returns the base e (2.718...) or base 2 logarithm of x.
+ * The argument is separated into its exponent and fractional parts.
+ * The logarithm of the fraction in the interval [sqrt(1/2), sqrt(2)],
+ * is approximated by
+ *
+ * log(1+x) = x - 0.5 x**2 + x**3 P(x)/Q(x).
+ *
+ * for more detail see: http://www.netlib.org/cephes/
+ */
+template <typename Packet, bool base2>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+EIGEN_UNUSED
+Packet plog_impl_double(const Packet _x)
+{
+ Packet x = _x;
+
+ const Packet cst_1 = pset1<Packet>(1.0);
+ const Packet cst_neg_half = pset1<Packet>(-0.5);
+ // The smallest non denormalized double.
+ const Packet cst_min_norm_pos = pset1frombits<Packet>( static_cast<uint64_t>(0x0010000000000000ull));
+ const Packet cst_minus_inf = pset1frombits<Packet>( static_cast<uint64_t>(0xfff0000000000000ull));
+ const Packet cst_pos_inf = pset1frombits<Packet>( static_cast<uint64_t>(0x7ff0000000000000ull));
+
+
+ // Polynomial Coefficients for log(1+x) = x - x**2/2 + x**3 P(x)/Q(x)
+ // 1/sqrt(2) <= x < sqrt(2)
+ const Packet cst_cephes_SQRTHF = pset1<Packet>(0.70710678118654752440E0);
+ const Packet cst_cephes_log_p0 = pset1<Packet>(1.01875663804580931796E-4);
+ const Packet cst_cephes_log_p1 = pset1<Packet>(4.97494994976747001425E-1);
+ const Packet cst_cephes_log_p2 = pset1<Packet>(4.70579119878881725854E0);
+ const Packet cst_cephes_log_p3 = pset1<Packet>(1.44989225341610930846E1);
+ const Packet cst_cephes_log_p4 = pset1<Packet>(1.79368678507819816313E1);
+ const Packet cst_cephes_log_p5 = pset1<Packet>(7.70838733755885391666E0);
+
+ const Packet cst_cephes_log_q0 = pset1<Packet>(1.0);
+ const Packet cst_cephes_log_q1 = pset1<Packet>(1.12873587189167450590E1);
+ const Packet cst_cephes_log_q2 = pset1<Packet>(4.52279145837532221105E1);
+ const Packet cst_cephes_log_q3 = pset1<Packet>(8.29875266912776603211E1);
+ const Packet cst_cephes_log_q4 = pset1<Packet>(7.11544750618563894466E1);
+ const Packet cst_cephes_log_q5 = pset1<Packet>(2.31251620126765340583E1);
+
+ // Truncate input values to the minimum positive normal.
+ x = pmax(x, cst_min_norm_pos);
+
+ Packet e;
+ // extract significant in the range [0.5,1) and exponent
+ x = pfrexp(x,e);
+
+ // Shift the inputs from the range [0.5,1) to [sqrt(1/2),sqrt(2))
+ // and shift by -1. The values are then centered around 0, which improves
+ // the stability of the polynomial evaluation.
+ // if( x < SQRTHF ) {
+ // e -= 1;
+ // x = x + x - 1.0;
+ // } else { x = x - 1.0; }
+ Packet mask = pcmp_lt(x, cst_cephes_SQRTHF);
+ Packet tmp = pand(x, mask);
+ x = psub(x, cst_1);
+ e = psub(e, pand(cst_1, mask));
+ x = padd(x, tmp);
+
+ Packet x2 = pmul(x, x);
+ Packet x3 = pmul(x2, x);
+
+ // Evaluate the polynomial approximant , probably to improve instruction-level parallelism.
+ // y = x - 0.5*x^2 + x^3 * polevl( x, P, 5 ) / p1evl( x, Q, 5 ) );
+ Packet y, y1, y_;
+ y = pmadd(cst_cephes_log_p0, x, cst_cephes_log_p1);
+ y1 = pmadd(cst_cephes_log_p3, x, cst_cephes_log_p4);
+ y = pmadd(y, x, cst_cephes_log_p2);
+ y1 = pmadd(y1, x, cst_cephes_log_p5);
+ y_ = pmadd(y, x3, y1);
+
+ y = pmadd(cst_cephes_log_q0, x, cst_cephes_log_q1);
+ y1 = pmadd(cst_cephes_log_q3, x, cst_cephes_log_q4);
+ y = pmadd(y, x, cst_cephes_log_q2);
+ y1 = pmadd(y1, x, cst_cephes_log_q5);
+ y = pmadd(y, x3, y1);
+
+ y_ = pmul(y_, x3);
+ y = pdiv(y_, y);
+
+ y = pmadd(cst_neg_half, x2, y);
+ x = padd(x, y);
+
+ // Add the logarithm of the exponent back to the result of the interpolation.
+ if (base2) {
+ const Packet cst_log2e = pset1<Packet>(static_cast<double>(EIGEN_LOG2E));
+ x = pmadd(x, cst_log2e, e);
+ } else {
+ const Packet cst_ln2 = pset1<Packet>(static_cast<double>(EIGEN_LN2));
+ x = pmadd(e, cst_ln2, x);
+ }
+
+ Packet invalid_mask = pcmp_lt_or_nan(_x, pzero(_x));
+ Packet iszero_mask = pcmp_eq(_x,pzero(_x));
+ Packet pos_inf_mask = pcmp_eq(_x,cst_pos_inf);
+ // Filter out invalid inputs, i.e.:
+ // - negative arg will be NAN
+ // - 0 will be -INF
+ // - +INF will be +INF
+ return pselect(iszero_mask, cst_minus_inf,
+ por(pselect(pos_inf_mask,cst_pos_inf,x), invalid_mask));
+}
+
+template <typename Packet>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+EIGEN_UNUSED
+Packet plog_double(const Packet _x)
+{
+ return plog_impl_double<Packet, /* base2 */ false>(_x);
+}
+
+template <typename Packet>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+EIGEN_UNUSED
+Packet plog2_double(const Packet _x)
+{
+ return plog_impl_double<Packet, /* base2 */ true>(_x);
+}
+
+/** \internal \returns log(1 + x) computed using W. Kahan's formula.
+ See: http://www.plunk.org/~hatch/rightway.php
+ */
+template<typename Packet>
+Packet generic_plog1p(const Packet& x)
+{
+ typedef typename unpacket_traits<Packet>::type ScalarType;
+ const Packet one = pset1<Packet>(ScalarType(1));
+ Packet xp1 = padd(x, one);
+ Packet small_mask = pcmp_eq(xp1, one);
+ Packet log1 = plog(xp1);
+ Packet inf_mask = pcmp_eq(xp1, log1);
+ Packet log_large = pmul(x, pdiv(log1, psub(xp1, one)));
+ return pselect(por(small_mask, inf_mask), x, log_large);
+}
+
+/** \internal \returns exp(x)-1 computed using W. Kahan's formula.
+ See: http://www.plunk.org/~hatch/rightway.php
+ */
+template<typename Packet>
+Packet generic_expm1(const Packet& x)
+{
+ typedef typename unpacket_traits<Packet>::type ScalarType;
+ const Packet one = pset1<Packet>(ScalarType(1));
+ const Packet neg_one = pset1<Packet>(ScalarType(-1));
+ Packet u = pexp(x);
+ Packet one_mask = pcmp_eq(u, one);
+ Packet u_minus_one = psub(u, one);
+ Packet neg_one_mask = pcmp_eq(u_minus_one, neg_one);
+ Packet logu = plog(u);
+ // The following comparison is to catch the case where
+ // exp(x) = +inf. It is written in this way to avoid having
+ // to form the constant +inf, which depends on the packet
+ // type.
+ Packet pos_inf_mask = pcmp_eq(logu, u);
+ Packet expm1 = pmul(u_minus_one, pdiv(x, logu));
+ expm1 = pselect(pos_inf_mask, u, expm1);
+ return pselect(one_mask,
+ x,
+ pselect(neg_one_mask,
+ neg_one,
+ expm1));
+}
+
+
+// Exponential function. Works by writing "x = m*log(2) + r" where
+// "m = floor(x/log(2)+1/2)" and "r" is the remainder. The result is then
+// "exp(x) = 2^m*exp(r)" where exp(r) is in the range [-1,1).
+template <typename Packet>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+EIGEN_UNUSED
+Packet pexp_float(const Packet _x)
+{
+ const Packet cst_1 = pset1<Packet>(1.0f);
+ const Packet cst_half = pset1<Packet>(0.5f);
+ const Packet cst_exp_hi = pset1<Packet>( 88.723f);
+ const Packet cst_exp_lo = pset1<Packet>(-88.723f);
+
+ const Packet cst_cephes_LOG2EF = pset1<Packet>(1.44269504088896341f);
+ const Packet cst_cephes_exp_p0 = pset1<Packet>(1.9875691500E-4f);
+ const Packet cst_cephes_exp_p1 = pset1<Packet>(1.3981999507E-3f);
+ const Packet cst_cephes_exp_p2 = pset1<Packet>(8.3334519073E-3f);
+ const Packet cst_cephes_exp_p3 = pset1<Packet>(4.1665795894E-2f);
+ const Packet cst_cephes_exp_p4 = pset1<Packet>(1.6666665459E-1f);
+ const Packet cst_cephes_exp_p5 = pset1<Packet>(5.0000001201E-1f);
+
+ // Clamp x.
+ Packet x = pmax(pmin(_x, cst_exp_hi), cst_exp_lo);
+
+ // Express exp(x) as exp(m*ln(2) + r), start by extracting
+ // m = floor(x/ln(2) + 0.5).
+ Packet m = pfloor(pmadd(x, cst_cephes_LOG2EF, cst_half));
+
+ // Get r = x - m*ln(2). If no FMA instructions are available, m*ln(2) is
+ // subtracted out in two parts, m*C1+m*C2 = m*ln(2), to avoid accumulating
+ // truncation errors.
+ const Packet cst_cephes_exp_C1 = pset1<Packet>(-0.693359375f);
+ const Packet cst_cephes_exp_C2 = pset1<Packet>(2.12194440e-4f);
+ Packet r = pmadd(m, cst_cephes_exp_C1, x);
+ r = pmadd(m, cst_cephes_exp_C2, r);
+
+ Packet r2 = pmul(r, r);
+ Packet r3 = pmul(r2, r);
+
+ // Evaluate the polynomial approximant,improved by instruction-level parallelism.
+ Packet y, y1, y2;
+ y = pmadd(cst_cephes_exp_p0, r, cst_cephes_exp_p1);
+ y1 = pmadd(cst_cephes_exp_p3, r, cst_cephes_exp_p4);
+ y2 = padd(r, cst_1);
+ y = pmadd(y, r, cst_cephes_exp_p2);
+ y1 = pmadd(y1, r, cst_cephes_exp_p5);
+ y = pmadd(y, r3, y1);
+ y = pmadd(y, r2, y2);
+
+ // Return 2^m * exp(r).
+ // TODO: replace pldexp with faster implementation since y in [-1, 1).
+ return pmax(pldexp(y,m), _x);
+}
+
+template <typename Packet>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+EIGEN_UNUSED
+Packet pexp_double(const Packet _x)
+{
+ Packet x = _x;
+
+ const Packet cst_1 = pset1<Packet>(1.0);
+ const Packet cst_2 = pset1<Packet>(2.0);
+ const Packet cst_half = pset1<Packet>(0.5);
+
+ const Packet cst_exp_hi = pset1<Packet>(709.784);
+ const Packet cst_exp_lo = pset1<Packet>(-709.784);
+
+ const Packet cst_cephes_LOG2EF = pset1<Packet>(1.4426950408889634073599);
+ const Packet cst_cephes_exp_p0 = pset1<Packet>(1.26177193074810590878e-4);
+ const Packet cst_cephes_exp_p1 = pset1<Packet>(3.02994407707441961300e-2);
+ const Packet cst_cephes_exp_p2 = pset1<Packet>(9.99999999999999999910e-1);
+ const Packet cst_cephes_exp_q0 = pset1<Packet>(3.00198505138664455042e-6);
+ const Packet cst_cephes_exp_q1 = pset1<Packet>(2.52448340349684104192e-3);
+ const Packet cst_cephes_exp_q2 = pset1<Packet>(2.27265548208155028766e-1);
+ const Packet cst_cephes_exp_q3 = pset1<Packet>(2.00000000000000000009e0);
+ const Packet cst_cephes_exp_C1 = pset1<Packet>(0.693145751953125);
+ const Packet cst_cephes_exp_C2 = pset1<Packet>(1.42860682030941723212e-6);
+
+ Packet tmp, fx;
+
+ // clamp x
+ x = pmax(pmin(x, cst_exp_hi), cst_exp_lo);
+ // Express exp(x) as exp(g + n*log(2)).
+ fx = pmadd(cst_cephes_LOG2EF, x, cst_half);
+
+ // Get the integer modulus of log(2), i.e. the "n" described above.
+ fx = pfloor(fx);
+
+ // Get the remainder modulo log(2), i.e. the "g" described above. Subtract
+ // n*log(2) out in two steps, i.e. n*C1 + n*C2, C1+C2=log2 to get the last
+ // digits right.
+ tmp = pmul(fx, cst_cephes_exp_C1);
+ Packet z = pmul(fx, cst_cephes_exp_C2);
+ x = psub(x, tmp);
+ x = psub(x, z);
+
+ Packet x2 = pmul(x, x);
+
+ // Evaluate the numerator polynomial of the rational interpolant.
+ Packet px = cst_cephes_exp_p0;
+ px = pmadd(px, x2, cst_cephes_exp_p1);
+ px = pmadd(px, x2, cst_cephes_exp_p2);
+ px = pmul(px, x);
+
+ // Evaluate the denominator polynomial of the rational interpolant.
+ Packet qx = cst_cephes_exp_q0;
+ qx = pmadd(qx, x2, cst_cephes_exp_q1);
+ qx = pmadd(qx, x2, cst_cephes_exp_q2);
+ qx = pmadd(qx, x2, cst_cephes_exp_q3);
+
+ // I don't really get this bit, copied from the SSE2 routines, so...
+ // TODO(gonnet): Figure out what is going on here, perhaps find a better
+ // rational interpolant?
+ x = pdiv(px, psub(qx, px));
+ x = pmadd(cst_2, x, cst_1);
+
+ // Construct the result 2^n * exp(g) = e * x. The max is used to catch
+ // non-finite values in the input.
+ // TODO: replace pldexp with faster implementation since x in [-1, 1).
+ return pmax(pldexp(x,fx), _x);
+}
+
+// The following code is inspired by the following stack-overflow answer:
+// https://stackoverflow.com/questions/30463616/payne-hanek-algorithm-implementation-in-c/30465751#30465751
+// It has been largely optimized:
+// - By-pass calls to frexp.
+// - Aligned loads of required 96 bits of 2/pi. This is accomplished by
+// (1) balancing the mantissa and exponent to the required bits of 2/pi are
+// aligned on 8-bits, and (2) replicating the storage of the bits of 2/pi.
+// - Avoid a branch in rounding and extraction of the remaining fractional part.
+// Overall, I measured a speed up higher than x2 on x86-64.
+inline float trig_reduce_huge (float xf, int *quadrant)
+{
+ using Eigen::numext::int32_t;
+ using Eigen::numext::uint32_t;
+ using Eigen::numext::int64_t;
+ using Eigen::numext::uint64_t;
+
+ const double pio2_62 = 3.4061215800865545e-19; // pi/2 * 2^-62
+ const uint64_t zero_dot_five = uint64_t(1) << 61; // 0.5 in 2.62-bit fixed-point foramt
+
+ // 192 bits of 2/pi for Payne-Hanek reduction
+ // Bits are introduced by packet of 8 to enable aligned reads.
+ static const uint32_t two_over_pi [] =
+ {
+ 0x00000028, 0x000028be, 0x0028be60, 0x28be60db,
+ 0xbe60db93, 0x60db9391, 0xdb939105, 0x9391054a,
+ 0x91054a7f, 0x054a7f09, 0x4a7f09d5, 0x7f09d5f4,
+ 0x09d5f47d, 0xd5f47d4d, 0xf47d4d37, 0x7d4d3770,
+ 0x4d377036, 0x377036d8, 0x7036d8a5, 0x36d8a566,
+ 0xd8a5664f, 0xa5664f10, 0x664f10e4, 0x4f10e410,
+ 0x10e41000, 0xe4100000
+ };
+
+ uint32_t xi = numext::bit_cast<uint32_t>(xf);
+ // Below, -118 = -126 + 8.
+ // -126 is to get the exponent,
+ // +8 is to enable alignment of 2/pi's bits on 8 bits.
+ // This is possible because the fractional part of x as only 24 meaningful bits.
+ uint32_t e = (xi >> 23) - 118;
+ // Extract the mantissa and shift it to align it wrt the exponent
+ xi = ((xi & 0x007fffffu)| 0x00800000u) << (e & 0x7);
+
+ uint32_t i = e >> 3;
+ uint32_t twoopi_1 = two_over_pi[i-1];
+ uint32_t twoopi_2 = two_over_pi[i+3];
+ uint32_t twoopi_3 = two_over_pi[i+7];
+
+ // Compute x * 2/pi in 2.62-bit fixed-point format.
+ uint64_t p;
+ p = uint64_t(xi) * twoopi_3;
+ p = uint64_t(xi) * twoopi_2 + (p >> 32);
+ p = (uint64_t(xi * twoopi_1) << 32) + p;
+
+ // Round to nearest: add 0.5 and extract integral part.
+ uint64_t q = (p + zero_dot_five) >> 62;
+ *quadrant = int(q);
+ // Now it remains to compute "r = x - q*pi/2" with high accuracy,
+ // since we have p=x/(pi/2) with high accuracy, we can more efficiently compute r as:
+ // r = (p-q)*pi/2,
+ // where the product can be be carried out with sufficient accuracy using double precision.
+ p -= q<<62;
+ return float(double(int64_t(p)) * pio2_62);
+}
+
+template<bool ComputeSine,typename Packet>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+EIGEN_UNUSED
+#if EIGEN_GNUC_AT_LEAST(4,4) && EIGEN_COMP_GNUC_STRICT
+__attribute__((optimize("-fno-unsafe-math-optimizations")))
+#endif
+Packet psincos_float(const Packet& _x)
+{
+ typedef typename unpacket_traits<Packet>::integer_packet PacketI;
+
+ const Packet cst_2oPI = pset1<Packet>(0.636619746685028076171875f); // 2/PI
+ const Packet cst_rounding_magic = pset1<Packet>(12582912); // 2^23 for rounding
+ const PacketI csti_1 = pset1<PacketI>(1);
+ const Packet cst_sign_mask = pset1frombits<Packet>(0x80000000u);
+
+ Packet x = pabs(_x);
+
+ // Scale x by 2/Pi to find x's octant.
+ Packet y = pmul(x, cst_2oPI);
+
+ // Rounding trick:
+ Packet y_round = padd(y, cst_rounding_magic);
+ EIGEN_OPTIMIZATION_BARRIER(y_round)
+ PacketI y_int = preinterpret<PacketI>(y_round); // last 23 digits represent integer (if abs(x)<2^24)
+ y = psub(y_round, cst_rounding_magic); // nearest integer to x*4/pi
+
+ // Reduce x by y octants to get: -Pi/4 <= x <= +Pi/4
+ // using "Extended precision modular arithmetic"
+ #if defined(EIGEN_HAS_SINGLE_INSTRUCTION_MADD)
+ // This version requires true FMA for high accuracy
+ // It provides a max error of 1ULP up to (with absolute_error < 5.9605e-08):
+ const float huge_th = ComputeSine ? 117435.992f : 71476.0625f;
+ x = pmadd(y, pset1<Packet>(-1.57079601287841796875f), x);
+ x = pmadd(y, pset1<Packet>(-3.1391647326017846353352069854736328125e-07f), x);
+ x = pmadd(y, pset1<Packet>(-5.390302529957764765544681040410068817436695098876953125e-15f), x);
+ #else
+ // Without true FMA, the previous set of coefficients maintain 1ULP accuracy
+ // up to x<15.7 (for sin), but accuracy is immediately lost for x>15.7.
+ // We thus use one more iteration to maintain 2ULPs up to reasonably large inputs.
+
+ // The following set of coefficients maintain 1ULP up to 9.43 and 14.16 for sin and cos respectively.
+ // and 2 ULP up to:
+ const float huge_th = ComputeSine ? 25966.f : 18838.f;
+ x = pmadd(y, pset1<Packet>(-1.5703125), x); // = 0xbfc90000
+ EIGEN_OPTIMIZATION_BARRIER(x)
+ x = pmadd(y, pset1<Packet>(-0.000483989715576171875), x); // = 0xb9fdc000
+ EIGEN_OPTIMIZATION_BARRIER(x)
+ x = pmadd(y, pset1<Packet>(1.62865035235881805419921875e-07), x); // = 0x342ee000
+ x = pmadd(y, pset1<Packet>(5.5644315544167710640977020375430583953857421875e-11), x); // = 0x2e74b9ee
+
+ // For the record, the following set of coefficients maintain 2ULP up
+ // to a slightly larger range:
+ // const float huge_th = ComputeSine ? 51981.f : 39086.125f;
+ // but it slightly fails to maintain 1ULP for two values of sin below pi.
+ // x = pmadd(y, pset1<Packet>(-3.140625/2.), x);
+ // x = pmadd(y, pset1<Packet>(-0.00048351287841796875), x);
+ // x = pmadd(y, pset1<Packet>(-3.13855707645416259765625e-07), x);
+ // x = pmadd(y, pset1<Packet>(-6.0771006282767103812147979624569416046142578125e-11), x);
+
+ // For the record, with only 3 iterations it is possible to maintain
+ // 1 ULP up to 3PI (maybe more) and 2ULP up to 255.
+ // The coefficients are: 0xbfc90f80, 0xb7354480, 0x2e74b9ee
+ #endif
+
+ if(predux_any(pcmp_le(pset1<Packet>(huge_th),pabs(_x))))
+ {
+ const int PacketSize = unpacket_traits<Packet>::size;
+ EIGEN_ALIGN_TO_BOUNDARY(sizeof(Packet)) float vals[PacketSize];
+ EIGEN_ALIGN_TO_BOUNDARY(sizeof(Packet)) float x_cpy[PacketSize];
+ EIGEN_ALIGN_TO_BOUNDARY(sizeof(Packet)) int y_int2[PacketSize];
+ pstoreu(vals, pabs(_x));
+ pstoreu(x_cpy, x);
+ pstoreu(y_int2, y_int);
+ for(int k=0; k<PacketSize;++k)
+ {
+ float val = vals[k];
+ if(val>=huge_th && (numext::isfinite)(val))
+ x_cpy[k] = trig_reduce_huge(val,&y_int2[k]);
+ }
+ x = ploadu<Packet>(x_cpy);
+ y_int = ploadu<PacketI>(y_int2);
+ }
+
+ // Compute the sign to apply to the polynomial.
+ // sin: sign = second_bit(y_int) xor signbit(_x)
+ // cos: sign = second_bit(y_int+1)
+ Packet sign_bit = ComputeSine ? pxor(_x, preinterpret<Packet>(plogical_shift_left<30>(y_int)))
+ : preinterpret<Packet>(plogical_shift_left<30>(padd(y_int,csti_1)));
+ sign_bit = pand(sign_bit, cst_sign_mask); // clear all but left most bit
+
+ // Get the polynomial selection mask from the second bit of y_int
+ // We'll calculate both (sin and cos) polynomials and then select from the two.
+ Packet poly_mask = preinterpret<Packet>(pcmp_eq(pand(y_int, csti_1), pzero(y_int)));
+
+ Packet x2 = pmul(x,x);
+
+ // Evaluate the cos(x) polynomial. (-Pi/4 <= x <= Pi/4)
+ Packet y1 = pset1<Packet>(2.4372266125283204019069671630859375e-05f);
+ y1 = pmadd(y1, x2, pset1<Packet>(-0.00138865201734006404876708984375f ));
+ y1 = pmadd(y1, x2, pset1<Packet>(0.041666619479656219482421875f ));
+ y1 = pmadd(y1, x2, pset1<Packet>(-0.5f));
+ y1 = pmadd(y1, x2, pset1<Packet>(1.f));
+
+ // Evaluate the sin(x) polynomial. (Pi/4 <= x <= Pi/4)
+ // octave/matlab code to compute those coefficients:
+ // x = (0:0.0001:pi/4)';
+ // A = [x.^3 x.^5 x.^7];
+ // w = ((1.-(x/(pi/4)).^2).^5)*2000+1; # weights trading relative accuracy
+ // c = (A'*diag(w)*A)\(A'*diag(w)*(sin(x)-x)); # weighted LS, linear coeff forced to 1
+ // printf('%.64f\n %.64f\n%.64f\n', c(3), c(2), c(1))
+ //
+ Packet y2 = pset1<Packet>(-0.0001959234114083702898469196984621021329076029360294342041015625f);
+ y2 = pmadd(y2, x2, pset1<Packet>( 0.0083326873655616851693794799871284340042620897293090820312500000f));
+ y2 = pmadd(y2, x2, pset1<Packet>(-0.1666666203982298255503735617821803316473960876464843750000000000f));
+ y2 = pmul(y2, x2);
+ y2 = pmadd(y2, x, x);
+
+ // Select the correct result from the two polynomials.
+ y = ComputeSine ? pselect(poly_mask,y2,y1)
+ : pselect(poly_mask,y1,y2);
+
+ // Update the sign and filter huge inputs
+ return pxor(y, sign_bit);
+}
+
+template<typename Packet>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+EIGEN_UNUSED
+Packet psin_float(const Packet& x)
+{
+ return psincos_float<true>(x);
+}
+
+template<typename Packet>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+EIGEN_UNUSED
+Packet pcos_float(const Packet& x)
+{
+ return psincos_float<false>(x);
+}
+
+
+template<typename Packet>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+EIGEN_UNUSED
+Packet psqrt_complex(const Packet& a) {
+ typedef typename unpacket_traits<Packet>::type Scalar;
+ typedef typename Scalar::value_type RealScalar;
+ typedef typename unpacket_traits<Packet>::as_real RealPacket;
+
+ // Computes the principal sqrt of the complex numbers in the input.
+ //
+ // For example, for packets containing 2 complex numbers stored in interleaved format
+ // a = [a0, a1] = [x0, y0, x1, y1],
+ // where x0 = real(a0), y0 = imag(a0) etc., this function returns
+ // b = [b0, b1] = [u0, v0, u1, v1],
+ // such that b0^2 = a0, b1^2 = a1.
+ //
+ // To derive the formula for the complex square roots, let's consider the equation for
+ // a single complex square root of the number x + i*y. We want to find real numbers
+ // u and v such that
+ // (u + i*v)^2 = x + i*y <=>
+ // u^2 - v^2 + i*2*u*v = x + i*v.
+ // By equating the real and imaginary parts we get:
+ // u^2 - v^2 = x
+ // 2*u*v = y.
+ //
+ // For x >= 0, this has the numerically stable solution
+ // u = sqrt(0.5 * (x + sqrt(x^2 + y^2)))
+ // v = 0.5 * (y / u)
+ // and for x < 0,
+ // v = sign(y) * sqrt(0.5 * (-x + sqrt(x^2 + y^2)))
+ // u = 0.5 * (y / v)
+ //
+ // To avoid unnecessary over- and underflow, we compute sqrt(x^2 + y^2) as
+ // l = max(|x|, |y|) * sqrt(1 + (min(|x|, |y|) / max(|x|, |y|))^2) ,
+
+ // In the following, without lack of generality, we have annotated the code, assuming
+ // that the input is a packet of 2 complex numbers.
+ //
+ // Step 1. Compute l = [l0, l0, l1, l1], where
+ // l0 = sqrt(x0^2 + y0^2), l1 = sqrt(x1^2 + y1^2)
+ // To avoid over- and underflow, we use the stable formula for each hypotenuse
+ // l0 = (min0 == 0 ? max0 : max0 * sqrt(1 + (min0/max0)**2)),
+ // where max0 = max(|x0|, |y0|), min0 = min(|x0|, |y0|), and similarly for l1.
+
+ RealPacket a_abs = pabs(a.v); // [|x0|, |y0|, |x1|, |y1|]
+ RealPacket a_abs_flip = pcplxflip(Packet(a_abs)).v; // [|y0|, |x0|, |y1|, |x1|]
+ RealPacket a_max = pmax(a_abs, a_abs_flip);
+ RealPacket a_min = pmin(a_abs, a_abs_flip);
+ RealPacket a_min_zero_mask = pcmp_eq(a_min, pzero(a_min));
+ RealPacket a_max_zero_mask = pcmp_eq(a_max, pzero(a_max));
+ RealPacket r = pdiv(a_min, a_max);
+ const RealPacket cst_one = pset1<RealPacket>(RealScalar(1));
+ RealPacket l = pmul(a_max, psqrt(padd(cst_one, pmul(r, r)))); // [l0, l0, l1, l1]
+ // Set l to a_max if a_min is zero.
+ l = pselect(a_min_zero_mask, a_max, l);
+
+ // Step 2. Compute [rho0, *, rho1, *], where
+ // rho0 = sqrt(0.5 * (l0 + |x0|)), rho1 = sqrt(0.5 * (l1 + |x1|))
+ // We don't care about the imaginary parts computed here. They will be overwritten later.
+ const RealPacket cst_half = pset1<RealPacket>(RealScalar(0.5));
+ Packet rho;
+ rho.v = psqrt(pmul(cst_half, padd(a_abs, l)));
+
+ // Step 3. Compute [rho0, eta0, rho1, eta1], where
+ // eta0 = (y0 / l0) / 2, and eta1 = (y1 / l1) / 2.
+ // set eta = 0 of input is 0 + i0.
+ RealPacket eta = pandnot(pmul(cst_half, pdiv(a.v, pcplxflip(rho).v)), a_max_zero_mask);
+ RealPacket real_mask = peven_mask(a.v);
+ Packet positive_real_result;
+ // Compute result for inputs with positive real part.
+ positive_real_result.v = pselect(real_mask, rho.v, eta);
+
+ // Step 4. Compute solution for inputs with negative real part:
+ // [|eta0|, sign(y0)*rho0, |eta1|, sign(y1)*rho1]
+ const RealScalar neg_zero = RealScalar(numext::bit_cast<float>(0x80000000u));
+ const RealPacket cst_imag_sign_mask = pset1<Packet>(Scalar(RealScalar(0.0), neg_zero)).v;
+ RealPacket imag_signs = pand(a.v, cst_imag_sign_mask);
+ Packet negative_real_result;
+ // Notice that rho is positive, so taking it's absolute value is a noop.
+ negative_real_result.v = por(pabs(pcplxflip(positive_real_result).v), imag_signs);
+
+ // Step 5. Select solution branch based on the sign of the real parts.
+ Packet negative_real_mask;
+ negative_real_mask.v = pcmp_lt(pand(real_mask, a.v), pzero(a.v));
+ negative_real_mask.v = por(negative_real_mask.v, pcplxflip(negative_real_mask).v);
+ Packet result = pselect(negative_real_mask, negative_real_result, positive_real_result);
+
+ // Step 6. Handle special cases for infinities:
+ // * If z is (x,+∞), the result is (+∞,+∞) even if x is NaN
+ // * If z is (x,-∞), the result is (+∞,-∞) even if x is NaN
+ // * If z is (-∞,y), the result is (0*|y|,+∞) for finite or NaN y
+ // * If z is (+∞,y), the result is (+∞,0*|y|) for finite or NaN y
+ const RealPacket cst_pos_inf = pset1<RealPacket>(NumTraits<RealScalar>::infinity());
+ Packet is_inf;
+ is_inf.v = pcmp_eq(a_abs, cst_pos_inf);
+ Packet is_real_inf;
+ is_real_inf.v = pand(is_inf.v, real_mask);
+ is_real_inf = por(is_real_inf, pcplxflip(is_real_inf));
+ // prepare packet of (+∞,0*|y|) or (0*|y|,+∞), depending on the sign of the infinite real part.
+ Packet real_inf_result;
+ real_inf_result.v = pmul(a_abs, pset1<Packet>(Scalar(RealScalar(1.0), RealScalar(0.0))).v);
+ real_inf_result.v = pselect(negative_real_mask.v, pcplxflip(real_inf_result).v, real_inf_result.v);
+ // prepare packet of (+∞,+∞) or (+∞,-∞), depending on the sign of the infinite imaginary part.
+ Packet is_imag_inf;
+ is_imag_inf.v = pandnot(is_inf.v, real_mask);
+ is_imag_inf = por(is_imag_inf, pcplxflip(is_imag_inf));
+ Packet imag_inf_result;
+ imag_inf_result.v = por(pand(cst_pos_inf, real_mask), pandnot(a.v, real_mask));
+
+ return pselect(is_imag_inf, imag_inf_result,
+ pselect(is_real_inf, real_inf_result,result));
+}
+
+// TODO(rmlarsen): The following set of utilities for double word arithmetic
+// should perhaps be refactored as a separate file, since it would be generally
+// useful for special function implementation etc. Writing the algorithms in
+// terms if a double word type would also make the code more readable.
+
+// This function splits x into the nearest integer n and fractional part r,
+// such that x = n + r holds exactly.
+template<typename Packet>
+EIGEN_STRONG_INLINE
+void absolute_split(const Packet& x, Packet& n, Packet& r) {
+ n = pround(x);
+ r = psub(x, n);
+}
+
+// This function computes the sum {s, r}, such that x + y = s_hi + s_lo
+// holds exactly, and s_hi = fl(x+y), if |x| >= |y|.
+template<typename Packet>
+EIGEN_STRONG_INLINE
+void fast_twosum(const Packet& x, const Packet& y, Packet& s_hi, Packet& s_lo) {
+ s_hi = padd(x, y);
+ const Packet t = psub(s_hi, x);
+ s_lo = psub(y, t);
+}
+
+#ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
+// This function implements the extended precision product of
+// a pair of floating point numbers. Given {x, y}, it computes the pair
+// {p_hi, p_lo} such that x * y = p_hi + p_lo holds exactly and
+// p_hi = fl(x * y).
+template<typename Packet>
+EIGEN_STRONG_INLINE
+void twoprod(const Packet& x, const Packet& y,
+ Packet& p_hi, Packet& p_lo) {
+ p_hi = pmul(x, y);
+ p_lo = pmadd(x, y, pnegate(p_hi));
+}
+
+#else
+
+// This function implements the Veltkamp splitting. Given a floating point
+// number x it returns the pair {x_hi, x_lo} such that x_hi + x_lo = x holds
+// exactly and that half of the significant of x fits in x_hi.
+// This is Algorithm 3 from Jean-Michel Muller, "Elementary Functions",
+// 3rd edition, Birkh\"auser, 2016.
+template<typename Packet>
+EIGEN_STRONG_INLINE
+void veltkamp_splitting(const Packet& x, Packet& x_hi, Packet& x_lo) {
+ typedef typename unpacket_traits<Packet>::type Scalar;
+ EIGEN_CONSTEXPR int shift = (NumTraits<Scalar>::digits() + 1) / 2;
+ const Scalar shift_scale = Scalar(uint64_t(1) << shift); // Scalar constructor not necessarily constexpr.
+ const Packet gamma = pmul(pset1<Packet>(shift_scale + Scalar(1)), x);
+ Packet rho = psub(x, gamma);
+ x_hi = padd(rho, gamma);
+ x_lo = psub(x, x_hi);
+}
+
+// This function implements Dekker's algorithm for products x * y.
+// Given floating point numbers {x, y} computes the pair
+// {p_hi, p_lo} such that x * y = p_hi + p_lo holds exactly and
+// p_hi = fl(x * y).
+template<typename Packet>
+EIGEN_STRONG_INLINE
+void twoprod(const Packet& x, const Packet& y,
+ Packet& p_hi, Packet& p_lo) {
+ Packet x_hi, x_lo, y_hi, y_lo;
+ veltkamp_splitting(x, x_hi, x_lo);
+ veltkamp_splitting(y, y_hi, y_lo);
+
+ p_hi = pmul(x, y);
+ p_lo = pmadd(x_hi, y_hi, pnegate(p_hi));
+ p_lo = pmadd(x_hi, y_lo, p_lo);
+ p_lo = pmadd(x_lo, y_hi, p_lo);
+ p_lo = pmadd(x_lo, y_lo, p_lo);
+}
+
+#endif // EIGEN_HAS_SINGLE_INSTRUCTION_MADD
+
+
+// This function implements Dekker's algorithm for the addition
+// of two double word numbers represented by {x_hi, x_lo} and {y_hi, y_lo}.
+// It returns the result as a pair {s_hi, s_lo} such that
+// x_hi + x_lo + y_hi + y_lo = s_hi + s_lo holds exactly.
+// This is Algorithm 5 from Jean-Michel Muller, "Elementary Functions",
+// 3rd edition, Birkh\"auser, 2016.
+template<typename Packet>
+EIGEN_STRONG_INLINE
+ void twosum(const Packet& x_hi, const Packet& x_lo,
+ const Packet& y_hi, const Packet& y_lo,
+ Packet& s_hi, Packet& s_lo) {
+ const Packet x_greater_mask = pcmp_lt(pabs(y_hi), pabs(x_hi));
+ Packet r_hi_1, r_lo_1;
+ fast_twosum(x_hi, y_hi,r_hi_1, r_lo_1);
+ Packet r_hi_2, r_lo_2;
+ fast_twosum(y_hi, x_hi,r_hi_2, r_lo_2);
+ const Packet r_hi = pselect(x_greater_mask, r_hi_1, r_hi_2);
+
+ const Packet s1 = padd(padd(y_lo, r_lo_1), x_lo);
+ const Packet s2 = padd(padd(x_lo, r_lo_2), y_lo);
+ const Packet s = pselect(x_greater_mask, s1, s2);
+
+ fast_twosum(r_hi, s, s_hi, s_lo);
+}
+
+// This is a version of twosum for double word numbers,
+// which assumes that |x_hi| >= |y_hi|.
+template<typename Packet>
+EIGEN_STRONG_INLINE
+ void fast_twosum(const Packet& x_hi, const Packet& x_lo,
+ const Packet& y_hi, const Packet& y_lo,
+ Packet& s_hi, Packet& s_lo) {
+ Packet r_hi, r_lo;
+ fast_twosum(x_hi, y_hi, r_hi, r_lo);
+ const Packet s = padd(padd(y_lo, r_lo), x_lo);
+ fast_twosum(r_hi, s, s_hi, s_lo);
+}
+
+// This is a version of twosum for adding a floating point number x to
+// double word number {y_hi, y_lo} number, with the assumption
+// that |x| >= |y_hi|.
+template<typename Packet>
+EIGEN_STRONG_INLINE
+void fast_twosum(const Packet& x,
+ const Packet& y_hi, const Packet& y_lo,
+ Packet& s_hi, Packet& s_lo) {
+ Packet r_hi, r_lo;
+ fast_twosum(x, y_hi, r_hi, r_lo);
+ const Packet s = padd(y_lo, r_lo);
+ fast_twosum(r_hi, s, s_hi, s_lo);
+}
+
+// This function implements the multiplication of a double word
+// number represented by {x_hi, x_lo} by a floating point number y.
+// It returns the result as a pair {p_hi, p_lo} such that
+// (x_hi + x_lo) * y = p_hi + p_lo hold with a relative error
+// of less than 2*2^{-2p}, where p is the number of significand bit
+// in the floating point type.
+// This is Algorithm 7 from Jean-Michel Muller, "Elementary Functions",
+// 3rd edition, Birkh\"auser, 2016.
+template<typename Packet>
+EIGEN_STRONG_INLINE
+void twoprod(const Packet& x_hi, const Packet& x_lo, const Packet& y,
+ Packet& p_hi, Packet& p_lo) {
+ Packet c_hi, c_lo1;
+ twoprod(x_hi, y, c_hi, c_lo1);
+ const Packet c_lo2 = pmul(x_lo, y);
+ Packet t_hi, t_lo1;
+ fast_twosum(c_hi, c_lo2, t_hi, t_lo1);
+ const Packet t_lo2 = padd(t_lo1, c_lo1);
+ fast_twosum(t_hi, t_lo2, p_hi, p_lo);
+}
+
+// This function implements the multiplication of two double word
+// numbers represented by {x_hi, x_lo} and {y_hi, y_lo}.
+// It returns the result as a pair {p_hi, p_lo} such that
+// (x_hi + x_lo) * (y_hi + y_lo) = p_hi + p_lo holds with a relative error
+// of less than 2*2^{-2p}, where p is the number of significand bit
+// in the floating point type.
+template<typename Packet>
+EIGEN_STRONG_INLINE
+void twoprod(const Packet& x_hi, const Packet& x_lo,
+ const Packet& y_hi, const Packet& y_lo,
+ Packet& p_hi, Packet& p_lo) {
+ Packet p_hi_hi, p_hi_lo;
+ twoprod(x_hi, x_lo, y_hi, p_hi_hi, p_hi_lo);
+ Packet p_lo_hi, p_lo_lo;
+ twoprod(x_hi, x_lo, y_lo, p_lo_hi, p_lo_lo);
+ fast_twosum(p_hi_hi, p_hi_lo, p_lo_hi, p_lo_lo, p_hi, p_lo);
+}
+
+// This function computes the reciprocal of a floating point number
+// with extra precision and returns the result as a double word.
+template <typename Packet>
+void doubleword_reciprocal(const Packet& x, Packet& recip_hi, Packet& recip_lo) {
+ typedef typename unpacket_traits<Packet>::type Scalar;
+ // 1. Approximate the reciprocal as the reciprocal of the high order element.
+ Packet approx_recip = prsqrt(x);
+ approx_recip = pmul(approx_recip, approx_recip);
+
+ // 2. Run one step of Newton-Raphson iteration in double word arithmetic
+ // to get the bottom half. The NR iteration for reciprocal of 'a' is
+ // x_{i+1} = x_i * (2 - a * x_i)
+
+ // -a*x_i
+ Packet t1_hi, t1_lo;
+ twoprod(pnegate(x), approx_recip, t1_hi, t1_lo);
+ // 2 - a*x_i
+ Packet t2_hi, t2_lo;
+ fast_twosum(pset1<Packet>(Scalar(2)), t1_hi, t2_hi, t2_lo);
+ Packet t3_hi, t3_lo;
+ fast_twosum(t2_hi, padd(t2_lo, t1_lo), t3_hi, t3_lo);
+ // x_i * (2 - a * x_i)
+ twoprod(t3_hi, t3_lo, approx_recip, recip_hi, recip_lo);
+}
+
+
+// This function computes log2(x) and returns the result as a double word.
+template <typename Scalar>
+struct accurate_log2 {
+ template <typename Packet>
+ EIGEN_STRONG_INLINE
+ void operator()(const Packet& x, Packet& log2_x_hi, Packet& log2_x_lo) {
+ log2_x_hi = plog2(x);
+ log2_x_lo = pzero(x);
+ }
+};
+
+// This specialization uses a more accurate algorithm to compute log2(x) for
+// floats in [1/sqrt(2);sqrt(2)] with a relative accuracy of ~6.42e-10.
+// This additional accuracy is needed to counter the error-magnification
+// inherent in multiplying by a potentially large exponent in pow(x,y).
+// The minimax polynomial used was calculated using the Sollya tool.
+// See sollya.org.
+template <>
+struct accurate_log2<float> {
+ template <typename Packet>
+ EIGEN_STRONG_INLINE
+ void operator()(const Packet& z, Packet& log2_x_hi, Packet& log2_x_lo) {
+ // The function log(1+x)/x is approximated in the interval
+ // [1/sqrt(2)-1;sqrt(2)-1] by a degree 10 polynomial of the form
+ // Q(x) = (C0 + x * (C1 + x * (C2 + x * (C3 + x * P(x))))),
+ // where the degree 6 polynomial P(x) is evaluated in single precision,
+ // while the remaining 4 terms of Q(x), as well as the final multiplication by x
+ // to reconstruct log(1+x) are evaluated in extra precision using
+ // double word arithmetic. C0 through C3 are extra precise constants
+ // stored as double words.
+ //
+ // The polynomial coefficients were calculated using Sollya commands:
+ // > n = 10;
+ // > f = log2(1+x)/x;
+ // > interval = [sqrt(0.5)-1;sqrt(2)-1];
+ // > p = fpminimax(f,n,[|double,double,double,double,single...|],interval,relative,floating);
+
+ const Packet p6 = pset1<Packet>( 9.703654795885e-2f);
+ const Packet p5 = pset1<Packet>(-0.1690667718648f);
+ const Packet p4 = pset1<Packet>( 0.1720575392246f);
+ const Packet p3 = pset1<Packet>(-0.1789081543684f);
+ const Packet p2 = pset1<Packet>( 0.2050433009862f);
+ const Packet p1 = pset1<Packet>(-0.2404672354459f);
+ const Packet p0 = pset1<Packet>( 0.2885761857032f);
+
+ const Packet C3_hi = pset1<Packet>(-0.360674142838f);
+ const Packet C3_lo = pset1<Packet>(-6.13283912543e-09f);
+ const Packet C2_hi = pset1<Packet>(0.480897903442f);
+ const Packet C2_lo = pset1<Packet>(-1.44861207474e-08f);
+ const Packet C1_hi = pset1<Packet>(-0.721347510815f);
+ const Packet C1_lo = pset1<Packet>(-4.84483164698e-09f);
+ const Packet C0_hi = pset1<Packet>(1.44269502163f);
+ const Packet C0_lo = pset1<Packet>(2.01711713999e-08f);
+ const Packet one = pset1<Packet>(1.0f);
+
+ const Packet x = psub(z, one);
+ // Evaluate P(x) in working precision.
+ // We evaluate it in multiple parts to improve instruction level
+ // parallelism.
+ Packet x2 = pmul(x,x);
+ Packet p_even = pmadd(p6, x2, p4);
+ p_even = pmadd(p_even, x2, p2);
+ p_even = pmadd(p_even, x2, p0);
+ Packet p_odd = pmadd(p5, x2, p3);
+ p_odd = pmadd(p_odd, x2, p1);
+ Packet p = pmadd(p_odd, x, p_even);
+
+ // Now evaluate the low-order tems of Q(x) in double word precision.
+ // In the following, due to the alternating signs and the fact that
+ // |x| < sqrt(2)-1, we can assume that |C*_hi| >= q_i, and use
+ // fast_twosum instead of the slower twosum.
+ Packet q_hi, q_lo;
+ Packet t_hi, t_lo;
+ // C3 + x * p(x)
+ twoprod(p, x, t_hi, t_lo);
+ fast_twosum(C3_hi, C3_lo, t_hi, t_lo, q_hi, q_lo);
+ // C2 + x * p(x)
+ twoprod(q_hi, q_lo, x, t_hi, t_lo);
+ fast_twosum(C2_hi, C2_lo, t_hi, t_lo, q_hi, q_lo);
+ // C1 + x * p(x)
+ twoprod(q_hi, q_lo, x, t_hi, t_lo);
+ fast_twosum(C1_hi, C1_lo, t_hi, t_lo, q_hi, q_lo);
+ // C0 + x * p(x)
+ twoprod(q_hi, q_lo, x, t_hi, t_lo);
+ fast_twosum(C0_hi, C0_lo, t_hi, t_lo, q_hi, q_lo);
+
+ // log(z) ~= x * Q(x)
+ twoprod(q_hi, q_lo, x, log2_x_hi, log2_x_lo);
+ }
+};
+
+// This specialization uses a more accurate algorithm to compute log2(x) for
+// floats in [1/sqrt(2);sqrt(2)] with a relative accuracy of ~1.27e-18.
+// This additional accuracy is needed to counter the error-magnification
+// inherent in multiplying by a potentially large exponent in pow(x,y).
+// The minimax polynomial used was calculated using the Sollya tool.
+// See sollya.org.
+
+template <>
+struct accurate_log2<double> {
+ template <typename Packet>
+ EIGEN_STRONG_INLINE
+ void operator()(const Packet& x, Packet& log2_x_hi, Packet& log2_x_lo) {
+ // We use a transformation of variables:
+ // r = c * (x-1) / (x+1),
+ // such that
+ // log2(x) = log2((1 + r/c) / (1 - r/c)) = f(r).
+ // The function f(r) can be approximated well using an odd polynomial
+ // of the form
+ // P(r) = ((Q(r^2) * r^2 + C) * r^2 + 1) * r,
+ // For the implementation of log2<double> here, Q is of degree 6 with
+ // coefficient represented in working precision (double), while C is a
+ // constant represented in extra precision as a double word to achieve
+ // full accuracy.
+ //
+ // The polynomial coefficients were computed by the Sollya script:
+ //
+ // c = 2 / log(2);
+ // trans = c * (x-1)/(x+1);
+ // itrans = (1+x/c)/(1-x/c);
+ // interval=[trans(sqrt(0.5)); trans(sqrt(2))];
+ // print(interval);
+ // f = log2(itrans(x));
+ // p=fpminimax(f,[|1,3,5,7,9,11,13,15,17|],[|1,DD,double...|],interval,relative,floating);
+ const Packet q12 = pset1<Packet>(2.87074255468000586e-9);
+ const Packet q10 = pset1<Packet>(2.38957980901884082e-8);
+ const Packet q8 = pset1<Packet>(2.31032094540014656e-7);
+ const Packet q6 = pset1<Packet>(2.27279857398537278e-6);
+ const Packet q4 = pset1<Packet>(2.31271023278625638e-5);
+ const Packet q2 = pset1<Packet>(2.47556738444535513e-4);
+ const Packet q0 = pset1<Packet>(2.88543873228900172e-3);
+ const Packet C_hi = pset1<Packet>(0.0400377511598501157);
+ const Packet C_lo = pset1<Packet>(-4.77726582251425391e-19);
+ const Packet one = pset1<Packet>(1.0);
+
+ const Packet cst_2_log2e_hi = pset1<Packet>(2.88539008177792677);
+ const Packet cst_2_log2e_lo = pset1<Packet>(4.07660016854549667e-17);
+ // c * (x - 1)
+ Packet num_hi, num_lo;
+ twoprod(cst_2_log2e_hi, cst_2_log2e_lo, psub(x, one), num_hi, num_lo);
+ // TODO(rmlarsen): Investigate if using the division algorithm by
+ // Muller et al. is faster/more accurate.
+ // 1 / (x + 1)
+ Packet denom_hi, denom_lo;
+ doubleword_reciprocal(padd(x, one), denom_hi, denom_lo);
+ // r = c * (x-1) / (x+1),
+ Packet r_hi, r_lo;
+ twoprod(num_hi, num_lo, denom_hi, denom_lo, r_hi, r_lo);
+ // r2 = r * r
+ Packet r2_hi, r2_lo;
+ twoprod(r_hi, r_lo, r_hi, r_lo, r2_hi, r2_lo);
+ // r4 = r2 * r2
+ Packet r4_hi, r4_lo;
+ twoprod(r2_hi, r2_lo, r2_hi, r2_lo, r4_hi, r4_lo);
+
+ // Evaluate Q(r^2) in working precision. We evaluate it in two parts
+ // (even and odd in r^2) to improve instruction level parallelism.
+ Packet q_even = pmadd(q12, r4_hi, q8);
+ Packet q_odd = pmadd(q10, r4_hi, q6);
+ q_even = pmadd(q_even, r4_hi, q4);
+ q_odd = pmadd(q_odd, r4_hi, q2);
+ q_even = pmadd(q_even, r4_hi, q0);
+ Packet q = pmadd(q_odd, r2_hi, q_even);
+
+ // Now evaluate the low order terms of P(x) in double word precision.
+ // In the following, due to the increasing magnitude of the coefficients
+ // and r being constrained to [-0.5, 0.5] we can use fast_twosum instead
+ // of the slower twosum.
+ // Q(r^2) * r^2
+ Packet p_hi, p_lo;
+ twoprod(r2_hi, r2_lo, q, p_hi, p_lo);
+ // Q(r^2) * r^2 + C
+ Packet p1_hi, p1_lo;
+ fast_twosum(C_hi, C_lo, p_hi, p_lo, p1_hi, p1_lo);
+ // (Q(r^2) * r^2 + C) * r^2
+ Packet p2_hi, p2_lo;
+ twoprod(r2_hi, r2_lo, p1_hi, p1_lo, p2_hi, p2_lo);
+ // ((Q(r^2) * r^2 + C) * r^2 + 1)
+ Packet p3_hi, p3_lo;
+ fast_twosum(one, p2_hi, p2_lo, p3_hi, p3_lo);
+
+ // log(z) ~= ((Q(r^2) * r^2 + C) * r^2 + 1) * r
+ twoprod(p3_hi, p3_lo, r_hi, r_lo, log2_x_hi, log2_x_lo);
+ }
+};
+
+// This function computes exp2(x) (i.e. 2**x).
+template <typename Scalar>
+struct fast_accurate_exp2 {
+ template <typename Packet>
+ EIGEN_STRONG_INLINE
+ Packet operator()(const Packet& x) {
+ // TODO(rmlarsen): Add a pexp2 packetop.
+ return pexp(pmul(pset1<Packet>(Scalar(EIGEN_LN2)), x));
+ }
+};
+
+// This specialization uses a faster algorithm to compute exp2(x) for floats
+// in [-0.5;0.5] with a relative accuracy of 1 ulp.
+// The minimax polynomial used was calculated using the Sollya tool.
+// See sollya.org.
+template <>
+struct fast_accurate_exp2<float> {
+ template <typename Packet>
+ EIGEN_STRONG_INLINE
+ Packet operator()(const Packet& x) {
+ // This function approximates exp2(x) by a degree 6 polynomial of the form
+ // Q(x) = 1 + x * (C + x * P(x)), where the degree 4 polynomial P(x) is evaluated in
+ // single precision, and the remaining steps are evaluated with extra precision using
+ // double word arithmetic. C is an extra precise constant stored as a double word.
+ //
+ // The polynomial coefficients were calculated using Sollya commands:
+ // > n = 6;
+ // > f = 2^x;
+ // > interval = [-0.5;0.5];
+ // > p = fpminimax(f,n,[|1,double,single...|],interval,relative,floating);
+
+ const Packet p4 = pset1<Packet>(1.539513905e-4f);
+ const Packet p3 = pset1<Packet>(1.340007293e-3f);
+ const Packet p2 = pset1<Packet>(9.618283249e-3f);
+ const Packet p1 = pset1<Packet>(5.550328270e-2f);
+ const Packet p0 = pset1<Packet>(0.2402264923f);
+
+ const Packet C_hi = pset1<Packet>(0.6931471825f);
+ const Packet C_lo = pset1<Packet>(2.36836577e-08f);
+ const Packet one = pset1<Packet>(1.0f);
+
+ // Evaluate P(x) in working precision.
+ // We evaluate even and odd parts of the polynomial separately
+ // to gain some instruction level parallelism.
+ Packet x2 = pmul(x,x);
+ Packet p_even = pmadd(p4, x2, p2);
+ Packet p_odd = pmadd(p3, x2, p1);
+ p_even = pmadd(p_even, x2, p0);
+ Packet p = pmadd(p_odd, x, p_even);
+
+ // Evaluate the remaining terms of Q(x) with extra precision using
+ // double word arithmetic.
+ Packet p_hi, p_lo;
+ // x * p(x)
+ twoprod(p, x, p_hi, p_lo);
+ // C + x * p(x)
+ Packet q1_hi, q1_lo;
+ twosum(p_hi, p_lo, C_hi, C_lo, q1_hi, q1_lo);
+ // x * (C + x * p(x))
+ Packet q2_hi, q2_lo;
+ twoprod(q1_hi, q1_lo, x, q2_hi, q2_lo);
+ // 1 + x * (C + x * p(x))
+ Packet q3_hi, q3_lo;
+ // Since |q2_hi| <= sqrt(2)-1 < 1, we can use fast_twosum
+ // for adding it to unity here.
+ fast_twosum(one, q2_hi, q3_hi, q3_lo);
+ return padd(q3_hi, padd(q2_lo, q3_lo));
+ }
+};
+
+// in [-0.5;0.5] with a relative accuracy of 1 ulp.
+// The minimax polynomial used was calculated using the Sollya tool.
+// See sollya.org.
+template <>
+struct fast_accurate_exp2<double> {
+ template <typename Packet>
+ EIGEN_STRONG_INLINE
+ Packet operator()(const Packet& x) {
+ // This function approximates exp2(x) by a degree 10 polynomial of the form
+ // Q(x) = 1 + x * (C + x * P(x)), where the degree 8 polynomial P(x) is evaluated in
+ // single precision, and the remaining steps are evaluated with extra precision using
+ // double word arithmetic. C is an extra precise constant stored as a double word.
+ //
+ // The polynomial coefficients were calculated using Sollya commands:
+ // > n = 11;
+ // > f = 2^x;
+ // > interval = [-0.5;0.5];
+ // > p = fpminimax(f,n,[|1,DD,double...|],interval,relative,floating);
+
+ const Packet p9 = pset1<Packet>(4.431642109085495276e-10);
+ const Packet p8 = pset1<Packet>(7.073829923303358410e-9);
+ const Packet p7 = pset1<Packet>(1.017822306737031311e-7);
+ const Packet p6 = pset1<Packet>(1.321543498017646657e-6);
+ const Packet p5 = pset1<Packet>(1.525273342728892877e-5);
+ const Packet p4 = pset1<Packet>(1.540353045780084423e-4);
+ const Packet p3 = pset1<Packet>(1.333355814685869807e-3);
+ const Packet p2 = pset1<Packet>(9.618129107593478832e-3);
+ const Packet p1 = pset1<Packet>(5.550410866481961247e-2);
+ const Packet p0 = pset1<Packet>(0.240226506959101332);
+ const Packet C_hi = pset1<Packet>(0.693147180559945286);
+ const Packet C_lo = pset1<Packet>(4.81927865669806721e-17);
+ const Packet one = pset1<Packet>(1.0);
+
+ // Evaluate P(x) in working precision.
+ // We evaluate even and odd parts of the polynomial separately
+ // to gain some instruction level parallelism.
+ Packet x2 = pmul(x,x);
+ Packet p_even = pmadd(p8, x2, p6);
+ Packet p_odd = pmadd(p9, x2, p7);
+ p_even = pmadd(p_even, x2, p4);
+ p_odd = pmadd(p_odd, x2, p5);
+ p_even = pmadd(p_even, x2, p2);
+ p_odd = pmadd(p_odd, x2, p3);
+ p_even = pmadd(p_even, x2, p0);
+ p_odd = pmadd(p_odd, x2, p1);
+ Packet p = pmadd(p_odd, x, p_even);
+
+ // Evaluate the remaining terms of Q(x) with extra precision using
+ // double word arithmetic.
+ Packet p_hi, p_lo;
+ // x * p(x)
+ twoprod(p, x, p_hi, p_lo);
+ // C + x * p(x)
+ Packet q1_hi, q1_lo;
+ twosum(p_hi, p_lo, C_hi, C_lo, q1_hi, q1_lo);
+ // x * (C + x * p(x))
+ Packet q2_hi, q2_lo;
+ twoprod(q1_hi, q1_lo, x, q2_hi, q2_lo);
+ // 1 + x * (C + x * p(x))
+ Packet q3_hi, q3_lo;
+ // Since |q2_hi| <= sqrt(2)-1 < 1, we can use fast_twosum
+ // for adding it to unity here.
+ fast_twosum(one, q2_hi, q3_hi, q3_lo);
+ return padd(q3_hi, padd(q2_lo, q3_lo));
+ }
+};
+
+// This function implements the non-trivial case of pow(x,y) where x is
+// positive and y is (possibly) non-integer.
+// Formally, pow(x,y) = exp2(y * log2(x)), where exp2(x) is shorthand for 2^x.
+// TODO(rmlarsen): We should probably add this as a packet up 'ppow', to make it
+// easier to specialize or turn off for specific types and/or backends.x
+template <typename Packet>
+EIGEN_STRONG_INLINE Packet generic_pow_impl(const Packet& x, const Packet& y) {
+ typedef typename unpacket_traits<Packet>::type Scalar;
+ // Split x into exponent e_x and mantissa m_x.
+ Packet e_x;
+ Packet m_x = pfrexp(x, e_x);
+
+ // Adjust m_x to lie in [1/sqrt(2):sqrt(2)] to minimize absolute error in log2(m_x).
+ EIGEN_CONSTEXPR Scalar sqrt_half = Scalar(0.70710678118654752440);
+ const Packet m_x_scale_mask = pcmp_lt(m_x, pset1<Packet>(sqrt_half));
+ m_x = pselect(m_x_scale_mask, pmul(pset1<Packet>(Scalar(2)), m_x), m_x);
+ e_x = pselect(m_x_scale_mask, psub(e_x, pset1<Packet>(Scalar(1))), e_x);
+
+ // Compute log2(m_x) with 6 extra bits of accuracy.
+ Packet rx_hi, rx_lo;
+ accurate_log2<Scalar>()(m_x, rx_hi, rx_lo);
+
+ // Compute the two terms {y * e_x, y * r_x} in f = y * log2(x) with doubled
+ // precision using double word arithmetic.
+ Packet f1_hi, f1_lo, f2_hi, f2_lo;
+ twoprod(e_x, y, f1_hi, f1_lo);
+ twoprod(rx_hi, rx_lo, y, f2_hi, f2_lo);
+ // Sum the two terms in f using double word arithmetic. We know
+ // that |e_x| > |log2(m_x)|, except for the case where e_x==0.
+ // This means that we can use fast_twosum(f1,f2).
+ // In the case e_x == 0, e_x * y = f1 = 0, so we don't lose any
+ // accuracy by violating the assumption of fast_twosum, because
+ // it's a no-op.
+ Packet f_hi, f_lo;
+ fast_twosum(f1_hi, f1_lo, f2_hi, f2_lo, f_hi, f_lo);
+
+ // Split f into integer and fractional parts.
+ Packet n_z, r_z;
+ absolute_split(f_hi, n_z, r_z);
+ r_z = padd(r_z, f_lo);
+ Packet n_r;
+ absolute_split(r_z, n_r, r_z);
+ n_z = padd(n_z, n_r);
+
+ // We now have an accurate split of f = n_z + r_z and can compute
+ // x^y = 2**{n_z + r_z) = exp2(r_z) * 2**{n_z}.
+ // Since r_z is in [-0.5;0.5], we compute the first factor to high accuracy
+ // using a specialized algorithm. Multiplication by the second factor can
+ // be done exactly using pldexp(), since it is an integer power of 2.
+ const Packet e_r = fast_accurate_exp2<Scalar>()(r_z);
+ return pldexp(e_r, n_z);
+}
+
+// Generic implementation of pow(x,y).
+template<typename Packet>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+EIGEN_UNUSED
+Packet generic_pow(const Packet& x, const Packet& y) {
+ typedef typename unpacket_traits<Packet>::type Scalar;
+
+ const Packet cst_pos_inf = pset1<Packet>(NumTraits<Scalar>::infinity());
+ const Packet cst_zero = pset1<Packet>(Scalar(0));
+ const Packet cst_one = pset1<Packet>(Scalar(1));
+ const Packet cst_nan = pset1<Packet>(NumTraits<Scalar>::quiet_NaN());
+
+ const Packet abs_x = pabs(x);
+ // Predicates for sign and magnitude of x.
+ const Packet x_is_zero = pcmp_eq(x, cst_zero);
+ const Packet x_is_neg = pcmp_lt(x, cst_zero);
+ const Packet abs_x_is_inf = pcmp_eq(abs_x, cst_pos_inf);
+ const Packet abs_x_is_one = pcmp_eq(abs_x, cst_one);
+ const Packet abs_x_is_gt_one = pcmp_lt(cst_one, abs_x);
+ const Packet abs_x_is_lt_one = pcmp_lt(abs_x, cst_one);
+ const Packet x_is_one = pandnot(abs_x_is_one, x_is_neg);
+ const Packet x_is_neg_one = pand(abs_x_is_one, x_is_neg);
+ const Packet x_is_nan = pandnot(ptrue(x), pcmp_eq(x, x));
+
+ // Predicates for sign and magnitude of y.
+ const Packet y_is_one = pcmp_eq(y, cst_one);
+ const Packet y_is_zero = pcmp_eq(y, cst_zero);
+ const Packet y_is_neg = pcmp_lt(y, cst_zero);
+ const Packet y_is_pos = pandnot(ptrue(y), por(y_is_zero, y_is_neg));
+ const Packet y_is_nan = pandnot(ptrue(y), pcmp_eq(y, y));
+ const Packet abs_y_is_inf = pcmp_eq(pabs(y), cst_pos_inf);
+ EIGEN_CONSTEXPR Scalar huge_exponent =
+ (NumTraits<Scalar>::max_exponent() * Scalar(EIGEN_LN2)) /
+ NumTraits<Scalar>::epsilon();
+ const Packet abs_y_is_huge = pcmp_le(pset1<Packet>(huge_exponent), pabs(y));
+
+ // Predicates for whether y is integer and/or even.
+ const Packet y_is_int = pcmp_eq(pfloor(y), y);
+ const Packet y_div_2 = pmul(y, pset1<Packet>(Scalar(0.5)));
+ const Packet y_is_even = pcmp_eq(pround(y_div_2), y_div_2);
+
+ // Predicates encoding special cases for the value of pow(x,y)
+ const Packet invalid_negative_x = pandnot(pandnot(pandnot(x_is_neg, abs_x_is_inf),
+ y_is_int),
+ abs_y_is_inf);
+ const Packet pow_is_one = por(por(x_is_one, y_is_zero),
+ pand(x_is_neg_one,
+ por(abs_y_is_inf, pandnot(y_is_even, invalid_negative_x))));
+ const Packet pow_is_nan = por(invalid_negative_x, por(x_is_nan, y_is_nan));
+ const Packet pow_is_zero = por(por(por(pand(x_is_zero, y_is_pos),
+ pand(abs_x_is_inf, y_is_neg)),
+ pand(pand(abs_x_is_lt_one, abs_y_is_huge),
+ y_is_pos)),
+ pand(pand(abs_x_is_gt_one, abs_y_is_huge),
+ y_is_neg));
+ const Packet pow_is_inf = por(por(por(pand(x_is_zero, y_is_neg),
+ pand(abs_x_is_inf, y_is_pos)),
+ pand(pand(abs_x_is_lt_one, abs_y_is_huge),
+ y_is_neg)),
+ pand(pand(abs_x_is_gt_one, abs_y_is_huge),
+ y_is_pos));
+
+ // General computation of pow(x,y) for positive x or negative x and integer y.
+ const Packet negate_pow_abs = pandnot(x_is_neg, y_is_even);
+ const Packet pow_abs = generic_pow_impl(abs_x, y);
+ return pselect(y_is_one, x,
+ pselect(pow_is_one, cst_one,
+ pselect(pow_is_nan, cst_nan,
+ pselect(pow_is_inf, cst_pos_inf,
+ pselect(pow_is_zero, cst_zero,
+ pselect(negate_pow_abs, pnegate(pow_abs), pow_abs))))));
+}
+
+
+
+/* polevl (modified for Eigen)
+ *
+ * Evaluate polynomial
+ *
+ *
+ *
+ * SYNOPSIS:
+ *
+ * int N;
+ * Scalar x, y, coef[N+1];
+ *
+ * y = polevl<decltype(x), N>( x, coef);
+ *
+ *
+ *
+ * DESCRIPTION:
+ *
+ * Evaluates polynomial of degree N:
+ *
+ * 2 N
+ * y = C + C x + C x +...+ C x
+ * 0 1 2 N
+ *
+ * Coefficients are stored in reverse order:
+ *
+ * coef[0] = C , ..., coef[N] = C .
+ * N 0
+ *
+ * The function p1evl() assumes that coef[N] = 1.0 and is
+ * omitted from the array. Its calling arguments are
+ * otherwise the same as polevl().
+ *
+ *
+ * The Eigen implementation is templatized. For best speed, store
+ * coef as a const array (constexpr), e.g.
+ *
+ * const double coef[] = {1.0, 2.0, 3.0, ...};
+ *
+ */
+template <typename Packet, int N>
+struct ppolevl {
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet run(const Packet& x, const typename unpacket_traits<Packet>::type coeff[]) {
+ EIGEN_STATIC_ASSERT((N > 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ return pmadd(ppolevl<Packet, N-1>::run(x, coeff), x, pset1<Packet>(coeff[N]));
+ }
+};
+
+template <typename Packet>
+struct ppolevl<Packet, 0> {
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet run(const Packet& x, const typename unpacket_traits<Packet>::type coeff[]) {
+ EIGEN_UNUSED_VARIABLE(x);
+ return pset1<Packet>(coeff[0]);
+ }
+};
+
+/* chbevl (modified for Eigen)
+ *
+ * Evaluate Chebyshev series
+ *
+ *
+ *
+ * SYNOPSIS:
+ *
+ * int N;
+ * Scalar x, y, coef[N], chebevl();
+ *
+ * y = chbevl( x, coef, N );
+ *
+ *
+ *
+ * DESCRIPTION:
+ *
+ * Evaluates the series
+ *
+ * N-1
+ * - '
+ * y = > coef[i] T (x/2)
+ * - i
+ * i=0
+ *
+ * of Chebyshev polynomials Ti at argument x/2.
+ *
+ * Coefficients are stored in reverse order, i.e. the zero
+ * order term is last in the array. Note N is the number of
+ * coefficients, not the order.
+ *
+ * If coefficients are for the interval a to b, x must
+ * have been transformed to x -> 2(2x - b - a)/(b-a) before
+ * entering the routine. This maps x from (a, b) to (-1, 1),
+ * over which the Chebyshev polynomials are defined.
+ *
+ * If the coefficients are for the inverted interval, in
+ * which (a, b) is mapped to (1/b, 1/a), the transformation
+ * required is x -> 2(2ab/x - b - a)/(b-a). If b is infinity,
+ * this becomes x -> 4a/x - 1.
+ *
+ *
+ *
+ * SPEED:
+ *
+ * Taking advantage of the recurrence properties of the
+ * Chebyshev polynomials, the routine requires one more
+ * addition per loop than evaluating a nested polynomial of
+ * the same degree.
+ *
+ */
+
+template <typename Packet, int N>
+struct pchebevl {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE Packet run(Packet x, const typename unpacket_traits<Packet>::type coef[]) {
+ typedef typename unpacket_traits<Packet>::type Scalar;
+ Packet b0 = pset1<Packet>(coef[0]);
+ Packet b1 = pset1<Packet>(static_cast<Scalar>(0.f));
+ Packet b2;
+
+ for (int i = 1; i < N; i++) {
+ b2 = b1;
+ b1 = b0;
+ b0 = psub(pmadd(x, b1, pset1<Packet>(coef[i])), b2);
+ }
+
+ return pmul(pset1<Packet>(static_cast<Scalar>(0.5f)), psub(b0, b2));
+ }
+};
+
+} // end namespace internal
+} // end namespace Eigen
+
+#endif // EIGEN_ARCH_GENERIC_PACKET_MATH_FUNCTIONS_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h
new file mode 100644
index 000000000..177a04e93
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h
@@ -0,0 +1,110 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2019 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ARCH_GENERIC_PACKET_MATH_FUNCTIONS_FWD_H
+#define EIGEN_ARCH_GENERIC_PACKET_MATH_FUNCTIONS_FWD_H
+
+namespace Eigen {
+namespace internal {
+
+// Forward declarations of the generic math functions
+// implemented in GenericPacketMathFunctions.h
+// This is needed to workaround a circular dependency.
+
+/***************************************************************************
+ * Some generic implementations to be used by implementors
+***************************************************************************/
+
+/** Default implementation of pfrexp.
+ * It is expected to be called by implementers of template<> pfrexp.
+ */
+template<typename Packet> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
+Packet pfrexp_generic(const Packet& a, Packet& exponent);
+
+// Extracts the biased exponent value from Packet p, and casts the results to
+// a floating-point Packet type. Used by pfrexp_generic. Override this if
+// there is no unpacket_traits<Packet>::integer_packet.
+template<typename Packet> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
+Packet pfrexp_generic_get_biased_exponent(const Packet& p);
+
+/** Default implementation of pldexp.
+ * It is expected to be called by implementers of template<> pldexp.
+ */
+template<typename Packet> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
+Packet pldexp_generic(const Packet& a, const Packet& exponent);
+
+/** \internal \returns log(x) for single precision float */
+template <typename Packet>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+EIGEN_UNUSED
+Packet plog_float(const Packet _x);
+
+/** \internal \returns log2(x) for single precision float */
+template <typename Packet>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+EIGEN_UNUSED
+Packet plog2_float(const Packet _x);
+
+/** \internal \returns log(x) for single precision float */
+template <typename Packet>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+EIGEN_UNUSED
+Packet plog_double(const Packet _x);
+
+/** \internal \returns log2(x) for single precision float */
+template <typename Packet>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+EIGEN_UNUSED
+Packet plog2_double(const Packet _x);
+
+/** \internal \returns log(1 + x) */
+template<typename Packet>
+Packet generic_plog1p(const Packet& x);
+
+/** \internal \returns exp(x)-1 */
+template<typename Packet>
+Packet generic_expm1(const Packet& x);
+
+/** \internal \returns exp(x) for single precision float */
+template <typename Packet>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+EIGEN_UNUSED
+Packet pexp_float(const Packet _x);
+
+/** \internal \returns exp(x) for double precision real numbers */
+template <typename Packet>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+EIGEN_UNUSED
+Packet pexp_double(const Packet _x);
+
+/** \internal \returns sin(x) for single precision float */
+template<typename Packet>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+EIGEN_UNUSED
+Packet psin_float(const Packet& x);
+
+/** \internal \returns cos(x) for single precision float */
+template<typename Packet>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+EIGEN_UNUSED
+Packet pcos_float(const Packet& x);
+
+/** \internal \returns sqrt(x) for complex types */
+template<typename Packet>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+EIGEN_UNUSED
+Packet psqrt_complex(const Packet& a);
+
+template <typename Packet, int N> struct ppolevl;
+
+
+} // end namespace internal
+} // end namespace Eigen
+
+#endif // EIGEN_ARCH_GENERIC_PACKET_MATH_FUNCTIONS_FWD_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/Half.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/Half.h
new file mode 100644
index 000000000..9f8e8cc1e
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/Half.h
@@ -0,0 +1,942 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+//
+// The conversion routines are Copyright (c) Fabian Giesen, 2016.
+// The original license follows:
+//
+// Copyright (c) Fabian Giesen, 2016
+// All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted.
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Standard 16-bit float type, mostly useful for GPUs. Defines a new
+// type Eigen::half (inheriting either from CUDA's or HIP's __half struct) with
+// operator overloads such that it behaves basically as an arithmetic
+// type. It will be quite slow on CPUs (so it is recommended to stay
+// in fp32 for CPUs, except for simple parameter conversions, I/O
+// to disk and the likes), but fast on GPUs.
+
+
+#ifndef EIGEN_HALF_H
+#define EIGEN_HALF_H
+
+#include <sstream>
+
+#if defined(EIGEN_HAS_GPU_FP16) || defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC)
+// When compiling with GPU support, the "__half_raw" base class as well as
+// some other routines are defined in the GPU compiler header files
+// (cuda_fp16.h, hip_fp16.h), and they are not tagged constexpr
+// As a consequence, we get compile failures when compiling Eigen with
+// GPU support. Hence the need to disable EIGEN_CONSTEXPR when building
+// Eigen with GPU support
+ #pragma push_macro("EIGEN_CONSTEXPR")
+ #undef EIGEN_CONSTEXPR
+ #define EIGEN_CONSTEXPR
+#endif
+
+#define F16_PACKET_FUNCTION(PACKET_F, PACKET_F16, METHOD) \
+ template <> \
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC EIGEN_UNUSED \
+ PACKET_F16 METHOD<PACKET_F16>(const PACKET_F16& _x) { \
+ return float2half(METHOD<PACKET_F>(half2float(_x))); \
+ }
+
+namespace Eigen {
+
+struct half;
+
+namespace half_impl {
+
+// We want to use the __half_raw struct from the HIP header file only during the device compile phase.
+// This is required because of a quirk in the way TensorFlow GPU builds are done.
+// When compiling TensorFlow source code with GPU support, files that
+// * contain GPU kernels (i.e. *.cu.cc files) are compiled via hipcc
+// * do not contain GPU kernels ( i.e. *.cc files) are compiled via gcc (typically)
+//
+// Tensorflow uses the Eigen::half type as its FP16 type, and there are functions that
+// * are defined in a file that gets compiled via hipcc AND
+// * have Eigen::half as a pass-by-value argument AND
+// * are called in a file that gets compiled via gcc
+//
+// In the scenario described above the caller and callee will see different versions
+// of the Eigen::half base class __half_raw, and they will be compiled by different compilers
+//
+// There appears to be an ABI mismatch between gcc and clang (which is called by hipcc) that results in
+// the callee getting corrupted values for the Eigen::half argument.
+//
+// Making the host side compile phase of hipcc use the same Eigen::half impl, as the gcc compile, resolves
+// this error, and hence the following convoluted #if condition
+#if !defined(EIGEN_HAS_GPU_FP16) || !defined(EIGEN_GPU_COMPILE_PHASE)
+// Make our own __half_raw definition that is similar to CUDA's.
+struct __half_raw {
+#if (defined(EIGEN_HAS_GPU_FP16) && !defined(EIGEN_GPU_COMPILE_PHASE))
+ // Eigen::half can be used as the datatype for shared memory declarations (in Eigen and TF)
+ // The element type for shared memory cannot have non-trivial constructors
+ // and hence the following special casing (which skips the zero-initilization).
+ // Note that this check gets done even in the host compilation phase, and
+ // hence the need for this
+ EIGEN_DEVICE_FUNC __half_raw() {}
+#else
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __half_raw() : x(0) {}
+#endif
+#if defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC)
+ explicit EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __half_raw(numext::uint16_t raw) : x(numext::bit_cast<__fp16>(raw)) {
+ }
+ __fp16 x;
+#else
+ explicit EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __half_raw(numext::uint16_t raw) : x(raw) {}
+ numext::uint16_t x;
+#endif
+};
+
+#elif defined(EIGEN_HAS_HIP_FP16)
+ // Nothing to do here
+ // HIP fp16 header file has a definition for __half_raw
+#elif defined(EIGEN_HAS_CUDA_FP16)
+ #if EIGEN_CUDA_SDK_VER < 90000
+ // In CUDA < 9.0, __half is the equivalent of CUDA 9's __half_raw
+ typedef __half __half_raw;
+ #endif // defined(EIGEN_HAS_CUDA_FP16)
+#elif defined(SYCL_DEVICE_ONLY)
+ typedef cl::sycl::half __half_raw;
+#endif
+
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __half_raw raw_uint16_to_half(numext::uint16_t x);
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half_raw float_to_half_rtne(float ff);
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC float half_to_float(__half_raw h);
+
+struct half_base : public __half_raw {
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half_base() {}
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half_base(const __half_raw& h) : __half_raw(h) {}
+
+#if defined(EIGEN_HAS_GPU_FP16)
+ #if defined(EIGEN_HAS_HIP_FP16)
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half_base(const __half& h) { x = __half_as_ushort(h); }
+ #elif defined(EIGEN_HAS_CUDA_FP16)
+ #if EIGEN_CUDA_SDK_VER >= 90000
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half_base(const __half& h) : __half_raw(*(__half_raw*)&h) {}
+ #endif
+ #endif
+#endif
+};
+
+} // namespace half_impl
+
+// Class definition.
+struct half : public half_impl::half_base {
+
+ // Writing this out as separate #if-else blocks to make the code easier to follow
+ // The same applies to most #if-else blocks in this file
+#if !defined(EIGEN_HAS_GPU_FP16) || !defined(EIGEN_GPU_COMPILE_PHASE)
+ // Use the same base class for the following two scenarios
+ // * when compiling without GPU support enabled
+ // * during host compile phase when compiling with GPU support enabled
+ typedef half_impl::__half_raw __half_raw;
+#elif defined(EIGEN_HAS_HIP_FP16)
+ // Nothing to do here
+ // HIP fp16 header file has a definition for __half_raw
+#elif defined(EIGEN_HAS_CUDA_FP16)
+ // Note that EIGEN_CUDA_SDK_VER is set to 0 even when compiling with HIP, so
+ // (EIGEN_CUDA_SDK_VER < 90000) is true even for HIP! So keeping this within
+ // #if defined(EIGEN_HAS_CUDA_FP16) is needed
+ #if defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER < 90000
+ typedef half_impl::__half_raw __half_raw;
+ #endif
+#endif
+
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half() {}
+
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half(const __half_raw& h) : half_impl::half_base(h) {}
+
+#if defined(EIGEN_HAS_GPU_FP16)
+ #if defined(EIGEN_HAS_HIP_FP16)
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half(const __half& h) : half_impl::half_base(h) {}
+ #elif defined(EIGEN_HAS_CUDA_FP16)
+ #if defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER >= 90000
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half(const __half& h) : half_impl::half_base(h) {}
+ #endif
+ #endif
+#endif
+
+
+ explicit EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half(bool b)
+ : half_impl::half_base(half_impl::raw_uint16_to_half(b ? 0x3c00 : 0)) {}
+ template<class T>
+ explicit EIGEN_DEVICE_FUNC half(T val)
+ : half_impl::half_base(half_impl::float_to_half_rtne(static_cast<float>(val))) {}
+ explicit EIGEN_DEVICE_FUNC half(float f)
+ : half_impl::half_base(half_impl::float_to_half_rtne(f)) {}
+
+ // Following the convention of numpy, converting between complex and
+ // float will lead to loss of imag value.
+ template<typename RealScalar>
+ explicit EIGEN_DEVICE_FUNC half(std::complex<RealScalar> c)
+ : half_impl::half_base(half_impl::float_to_half_rtne(static_cast<float>(c.real()))) {}
+
+ EIGEN_DEVICE_FUNC operator float() const { // NOLINT: Allow implicit conversion to float, because it is lossless.
+ return half_impl::half_to_float(*this);
+ }
+
+#if defined(EIGEN_HAS_GPU_FP16) && !defined(EIGEN_GPU_COMPILE_PHASE)
+ EIGEN_DEVICE_FUNC operator __half() const {
+ ::__half_raw hr;
+ hr.x = x;
+ return __half(hr);
+ }
+#endif
+};
+
+} // end namespace Eigen
+
+namespace std {
+template<>
+struct numeric_limits<Eigen::half> {
+ static const bool is_specialized = true;
+ static const bool is_signed = true;
+ static const bool is_integer = false;
+ static const bool is_exact = false;
+ static const bool has_infinity = true;
+ static const bool has_quiet_NaN = true;
+ static const bool has_signaling_NaN = true;
+ static const float_denorm_style has_denorm = denorm_present;
+ static const bool has_denorm_loss = false;
+ static const std::float_round_style round_style = std::round_to_nearest;
+ static const bool is_iec559 = false;
+ static const bool is_bounded = false;
+ static const bool is_modulo = false;
+ static const int digits = 11;
+ static const int digits10 = 3; // according to http://half.sourceforge.net/structstd_1_1numeric__limits_3_01half__float_1_1half_01_4.html
+ static const int max_digits10 = 5; // according to http://half.sourceforge.net/structstd_1_1numeric__limits_3_01half__float_1_1half_01_4.html
+ static const int radix = 2;
+ static const int min_exponent = -13;
+ static const int min_exponent10 = -4;
+ static const int max_exponent = 16;
+ static const int max_exponent10 = 4;
+ static const bool traps = true;
+ static const bool tinyness_before = false;
+
+ static Eigen::half (min)() { return Eigen::half_impl::raw_uint16_to_half(0x400); }
+ static Eigen::half lowest() { return Eigen::half_impl::raw_uint16_to_half(0xfbff); }
+ static Eigen::half (max)() { return Eigen::half_impl::raw_uint16_to_half(0x7bff); }
+ static Eigen::half epsilon() { return Eigen::half_impl::raw_uint16_to_half(0x0800); }
+ static Eigen::half round_error() { return Eigen::half(0.5); }
+ static Eigen::half infinity() { return Eigen::half_impl::raw_uint16_to_half(0x7c00); }
+ static Eigen::half quiet_NaN() { return Eigen::half_impl::raw_uint16_to_half(0x7e00); }
+ static Eigen::half signaling_NaN() { return Eigen::half_impl::raw_uint16_to_half(0x7d00); }
+ static Eigen::half denorm_min() { return Eigen::half_impl::raw_uint16_to_half(0x1); }
+};
+
+// If std::numeric_limits<T> is specialized, should also specialize
+// std::numeric_limits<const T>, std::numeric_limits<volatile T>, and
+// std::numeric_limits<const volatile T>
+// https://stackoverflow.com/a/16519653/
+template<>
+struct numeric_limits<const Eigen::half> : numeric_limits<Eigen::half> {};
+template<>
+struct numeric_limits<volatile Eigen::half> : numeric_limits<Eigen::half> {};
+template<>
+struct numeric_limits<const volatile Eigen::half> : numeric_limits<Eigen::half> {};
+} // end namespace std
+
+namespace Eigen {
+
+namespace half_impl {
+
+#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && \
+ EIGEN_CUDA_ARCH >= 530) || \
+ (defined(EIGEN_HAS_HIP_FP16) && defined(HIP_DEVICE_COMPILE))
+// Note: We deliberatly do *not* define this to 1 even if we have Arm's native
+// fp16 type since GPU halfs are rather different from native CPU halfs.
+// TODO: Rename to something like EIGEN_HAS_NATIVE_GPU_FP16
+#define EIGEN_HAS_NATIVE_FP16
+#endif
+
+// Intrinsics for native fp16 support. Note that on current hardware,
+// these are no faster than fp32 arithmetic (you need to use the half2
+// versions to get the ALU speed increased), but you do save the
+// conversion steps back and forth.
+
+#if defined(EIGEN_HAS_NATIVE_FP16)
+EIGEN_STRONG_INLINE __device__ half operator + (const half& a, const half& b) {
+#if defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER >= 90000
+ return __hadd(::__half(a), ::__half(b));
+#else
+ return __hadd(a, b);
+#endif
+}
+EIGEN_STRONG_INLINE __device__ half operator * (const half& a, const half& b) {
+ return __hmul(a, b);
+}
+EIGEN_STRONG_INLINE __device__ half operator - (const half& a, const half& b) {
+ return __hsub(a, b);
+}
+EIGEN_STRONG_INLINE __device__ half operator / (const half& a, const half& b) {
+#if defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER >= 90000
+ return __hdiv(a, b);
+#else
+ float num = __half2float(a);
+ float denom = __half2float(b);
+ return __float2half(num / denom);
+#endif
+}
+EIGEN_STRONG_INLINE __device__ half operator - (const half& a) {
+ return __hneg(a);
+}
+EIGEN_STRONG_INLINE __device__ half& operator += (half& a, const half& b) {
+ a = a + b;
+ return a;
+}
+EIGEN_STRONG_INLINE __device__ half& operator *= (half& a, const half& b) {
+ a = a * b;
+ return a;
+}
+EIGEN_STRONG_INLINE __device__ half& operator -= (half& a, const half& b) {
+ a = a - b;
+ return a;
+}
+EIGEN_STRONG_INLINE __device__ half& operator /= (half& a, const half& b) {
+ a = a / b;
+ return a;
+}
+EIGEN_STRONG_INLINE __device__ bool operator == (const half& a, const half& b) {
+ return __heq(a, b);
+}
+EIGEN_STRONG_INLINE __device__ bool operator != (const half& a, const half& b) {
+ return __hne(a, b);
+}
+EIGEN_STRONG_INLINE __device__ bool operator < (const half& a, const half& b) {
+ return __hlt(a, b);
+}
+EIGEN_STRONG_INLINE __device__ bool operator <= (const half& a, const half& b) {
+ return __hle(a, b);
+}
+EIGEN_STRONG_INLINE __device__ bool operator > (const half& a, const half& b) {
+ return __hgt(a, b);
+}
+EIGEN_STRONG_INLINE __device__ bool operator >= (const half& a, const half& b) {
+ return __hge(a, b);
+}
+#endif
+
+#if defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC)
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator + (const half& a, const half& b) {
+ return half(vaddh_f16(a.x, b.x));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator * (const half& a, const half& b) {
+ return half(vmulh_f16(a.x, b.x));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator - (const half& a, const half& b) {
+ return half(vsubh_f16(a.x, b.x));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator / (const half& a, const half& b) {
+ return half(vdivh_f16(a.x, b.x));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator - (const half& a) {
+ return half(vnegh_f16(a.x));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator += (half& a, const half& b) {
+ a = half(vaddh_f16(a.x, b.x));
+ return a;
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator *= (half& a, const half& b) {
+ a = half(vmulh_f16(a.x, b.x));
+ return a;
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator -= (half& a, const half& b) {
+ a = half(vsubh_f16(a.x, b.x));
+ return a;
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator /= (half& a, const half& b) {
+ a = half(vdivh_f16(a.x, b.x));
+ return a;
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator == (const half& a, const half& b) {
+ return vceqh_f16(a.x, b.x);
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator != (const half& a, const half& b) {
+ return !vceqh_f16(a.x, b.x);
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator < (const half& a, const half& b) {
+ return vclth_f16(a.x, b.x);
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator <= (const half& a, const half& b) {
+ return vcleh_f16(a.x, b.x);
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator > (const half& a, const half& b) {
+ return vcgth_f16(a.x, b.x);
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator >= (const half& a, const half& b) {
+ return vcgeh_f16(a.x, b.x);
+}
+// We need to distinguish ‘clang as the CUDA compiler’ from ‘clang as the host compiler,
+// invoked by NVCC’ (e.g. on MacOS). The former needs to see both host and device implementation
+// of the functions, while the latter can only deal with one of them.
+#elif !defined(EIGEN_HAS_NATIVE_FP16) || (EIGEN_COMP_CLANG && !EIGEN_COMP_NVCC) // Emulate support for half floats
+
+#if EIGEN_COMP_CLANG && defined(EIGEN_CUDACC)
+// We need to provide emulated *host-side* FP16 operators for clang.
+#pragma push_macro("EIGEN_DEVICE_FUNC")
+#undef EIGEN_DEVICE_FUNC
+#if defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_HAS_NATIVE_FP16)
+#define EIGEN_DEVICE_FUNC __host__
+#else // both host and device need emulated ops.
+#define EIGEN_DEVICE_FUNC __host__ __device__
+#endif
+#endif
+
+// Definitions for CPUs and older HIP+CUDA, mostly working through conversion
+// to/from fp32.
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator + (const half& a, const half& b) {
+ return half(float(a) + float(b));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator * (const half& a, const half& b) {
+ return half(float(a) * float(b));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator - (const half& a, const half& b) {
+ return half(float(a) - float(b));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator / (const half& a, const half& b) {
+ return half(float(a) / float(b));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator - (const half& a) {
+ half result;
+ result.x = a.x ^ 0x8000;
+ return result;
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator += (half& a, const half& b) {
+ a = half(float(a) + float(b));
+ return a;
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator *= (half& a, const half& b) {
+ a = half(float(a) * float(b));
+ return a;
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator -= (half& a, const half& b) {
+ a = half(float(a) - float(b));
+ return a;
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator /= (half& a, const half& b) {
+ a = half(float(a) / float(b));
+ return a;
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator == (const half& a, const half& b) {
+ return numext::equal_strict(float(a),float(b));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator != (const half& a, const half& b) {
+ return numext::not_equal_strict(float(a), float(b));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator < (const half& a, const half& b) {
+ return float(a) < float(b);
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator <= (const half& a, const half& b) {
+ return float(a) <= float(b);
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator > (const half& a, const half& b) {
+ return float(a) > float(b);
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator >= (const half& a, const half& b) {
+ return float(a) >= float(b);
+}
+
+#if defined(__clang__) && defined(__CUDA__)
+#pragma pop_macro("EIGEN_DEVICE_FUNC")
+#endif
+#endif // Emulate support for half floats
+
+// Division by an index. Do it in full float precision to avoid accuracy
+// issues in converting the denominator to half.
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator / (const half& a, Index b) {
+ return half(static_cast<float>(a) / static_cast<float>(b));
+}
+
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator++(half& a) {
+ a += half(1);
+ return a;
+}
+
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator--(half& a) {
+ a -= half(1);
+ return a;
+}
+
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator++(half& a, int) {
+ half original_value = a;
+ ++a;
+ return original_value;
+}
+
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator--(half& a, int) {
+ half original_value = a;
+ --a;
+ return original_value;
+}
+
+// Conversion routines, including fallbacks for the host or older CUDA.
+// Note that newer Intel CPUs (Haswell or newer) have vectorized versions of
+// these in hardware. If we need more performance on older/other CPUs, they are
+// also possible to vectorize directly.
+
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __half_raw raw_uint16_to_half(numext::uint16_t x) {
+ // We cannot simply do a "return __half_raw(x)" here, because __half_raw is union type
+ // in the hip_fp16 header file, and that will trigger a compile error
+ // On the other hand, having anything but a return statement also triggers a compile error
+ // because this is constexpr function.
+ // Fortunately, since we need to disable EIGEN_CONSTEXPR for GPU anyway, we can get out
+ // of this catch22 by having separate bodies for GPU / non GPU
+#if defined(EIGEN_HAS_GPU_FP16)
+ __half_raw h;
+ h.x = x;
+ return h;
+#else
+ return __half_raw(x);
+#endif
+}
+
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC numext::uint16_t raw_half_as_uint16(const __half_raw& h) {
+ // HIP/CUDA/Default have a member 'x' of type uint16_t.
+ // For ARM64 native half, the member 'x' is of type __fp16, so we need to bit-cast.
+ // For SYCL, cl::sycl::half is _Float16, so cast directly.
+#if defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC)
+ return numext::bit_cast<numext::uint16_t>(h.x);
+#elif defined(SYCL_DEVICE_ONLY)
+ return numext::bit_cast<numext::uint16_t>(h);
+#else
+ return h.x;
+#endif
+}
+
+union float32_bits {
+ unsigned int u;
+ float f;
+};
+
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half_raw float_to_half_rtne(float ff) {
+#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
+ __half tmp_ff = __float2half(ff);
+ return *(__half_raw*)&tmp_ff;
+
+#elif defined(EIGEN_HAS_FP16_C)
+ __half_raw h;
+ h.x = _cvtss_sh(ff, 0);
+ return h;
+
+#elif defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC)
+ __half_raw h;
+ h.x = static_cast<__fp16>(ff);
+ return h;
+
+#else
+ float32_bits f; f.f = ff;
+
+ const float32_bits f32infty = { 255 << 23 };
+ const float32_bits f16max = { (127 + 16) << 23 };
+ const float32_bits denorm_magic = { ((127 - 15) + (23 - 10) + 1) << 23 };
+ unsigned int sign_mask = 0x80000000u;
+ __half_raw o;
+ o.x = static_cast<numext::uint16_t>(0x0u);
+
+ unsigned int sign = f.u & sign_mask;
+ f.u ^= sign;
+
+ // NOTE all the integer compares in this function can be safely
+ // compiled into signed compares since all operands are below
+ // 0x80000000. Important if you want fast straight SSE2 code
+ // (since there's no unsigned PCMPGTD).
+
+ if (f.u >= f16max.u) { // result is Inf or NaN (all exponent bits set)
+ o.x = (f.u > f32infty.u) ? 0x7e00 : 0x7c00; // NaN->qNaN and Inf->Inf
+ } else { // (De)normalized number or zero
+ if (f.u < (113 << 23)) { // resulting FP16 is subnormal or zero
+ // use a magic value to align our 10 mantissa bits at the bottom of
+ // the float. as long as FP addition is round-to-nearest-even this
+ // just works.
+ f.f += denorm_magic.f;
+
+ // and one integer subtract of the bias later, we have our final float!
+ o.x = static_cast<numext::uint16_t>(f.u - denorm_magic.u);
+ } else {
+ unsigned int mant_odd = (f.u >> 13) & 1; // resulting mantissa is odd
+
+ // update exponent, rounding bias part 1
+ // Equivalent to `f.u += ((unsigned int)(15 - 127) << 23) + 0xfff`, but
+ // without arithmetic overflow.
+ f.u += 0xc8000fffU;
+ // rounding bias part 2
+ f.u += mant_odd;
+ // take the bits!
+ o.x = static_cast<numext::uint16_t>(f.u >> 13);
+ }
+ }
+
+ o.x |= static_cast<numext::uint16_t>(sign >> 16);
+ return o;
+#endif
+}
+
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC float half_to_float(__half_raw h) {
+#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
+ return __half2float(h);
+#elif defined(EIGEN_HAS_FP16_C)
+ return _cvtsh_ss(h.x);
+#elif defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC)
+ return static_cast<float>(h.x);
+#else
+ const float32_bits magic = { 113 << 23 };
+ const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift
+ float32_bits o;
+
+ o.u = (h.x & 0x7fff) << 13; // exponent/mantissa bits
+ unsigned int exp = shifted_exp & o.u; // just the exponent
+ o.u += (127 - 15) << 23; // exponent adjust
+
+ // handle exponent special cases
+ if (exp == shifted_exp) { // Inf/NaN?
+ o.u += (128 - 16) << 23; // extra exp adjust
+ } else if (exp == 0) { // Zero/Denormal?
+ o.u += 1 << 23; // extra exp adjust
+ o.f -= magic.f; // renormalize
+ }
+
+ o.u |= (h.x & 0x8000) << 16; // sign bit
+ return o.f;
+#endif
+}
+
+// --- standard functions ---
+
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isinf)(const half& a) {
+#ifdef EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC
+ return (numext::bit_cast<numext::uint16_t>(a.x) & 0x7fff) == 0x7c00;
+#else
+ return (a.x & 0x7fff) == 0x7c00;
+#endif
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isnan)(const half& a) {
+#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530) || \
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
+ return __hisnan(a);
+#elif defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC)
+ return (numext::bit_cast<numext::uint16_t>(a.x) & 0x7fff) > 0x7c00;
+#else
+ return (a.x & 0x7fff) > 0x7c00;
+#endif
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isfinite)(const half& a) {
+ return !(isinf EIGEN_NOT_A_MACRO (a)) && !(isnan EIGEN_NOT_A_MACRO (a));
+}
+
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half abs(const half& a) {
+#if defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC)
+ return half(vabsh_f16(a.x));
+#else
+ half result;
+ result.x = a.x & 0x7FFF;
+ return result;
+#endif
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half exp(const half& a) {
+#if (EIGEN_CUDA_SDK_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 530) || \
+ defined(EIGEN_HIP_DEVICE_COMPILE)
+ return half(hexp(a));
+#else
+ return half(::expf(float(a)));
+#endif
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half expm1(const half& a) {
+ return half(numext::expm1(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half log(const half& a) {
+#if (defined(EIGEN_HAS_CUDA_FP16) && EIGEN_CUDA_SDK_VER >= 80000 && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530) || \
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
+ return half(::hlog(a));
+#else
+ return half(::logf(float(a)));
+#endif
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half log1p(const half& a) {
+ return half(numext::log1p(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half log10(const half& a) {
+ return half(::log10f(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half log2(const half& a) {
+ return half(static_cast<float>(EIGEN_LOG2E) * ::logf(float(a)));
+}
+
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half sqrt(const half& a) {
+#if (EIGEN_CUDA_SDK_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 530) || \
+ defined(EIGEN_HIP_DEVICE_COMPILE)
+ return half(hsqrt(a));
+#else
+ return half(::sqrtf(float(a)));
+#endif
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half pow(const half& a, const half& b) {
+ return half(::powf(float(a), float(b)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half sin(const half& a) {
+ return half(::sinf(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half cos(const half& a) {
+ return half(::cosf(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half tan(const half& a) {
+ return half(::tanf(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half tanh(const half& a) {
+ return half(::tanhf(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half asin(const half& a) {
+ return half(::asinf(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half acos(const half& a) {
+ return half(::acosf(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half floor(const half& a) {
+#if (EIGEN_CUDA_SDK_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 300) || \
+ defined(EIGEN_HIP_DEVICE_COMPILE)
+ return half(hfloor(a));
+#else
+ return half(::floorf(float(a)));
+#endif
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half ceil(const half& a) {
+#if (EIGEN_CUDA_SDK_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 300) || \
+ defined(EIGEN_HIP_DEVICE_COMPILE)
+ return half(hceil(a));
+#else
+ return half(::ceilf(float(a)));
+#endif
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half rint(const half& a) {
+ return half(::rintf(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half round(const half& a) {
+ return half(::roundf(float(a)));
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half fmod(const half& a, const half& b) {
+ return half(::fmodf(float(a), float(b)));
+}
+
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half (min)(const half& a, const half& b) {
+#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530) || \
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
+ return __hlt(b, a) ? b : a;
+#else
+ const float f1 = static_cast<float>(a);
+ const float f2 = static_cast<float>(b);
+ return f2 < f1 ? b : a;
+#endif
+}
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half (max)(const half& a, const half& b) {
+#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530) || \
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
+ return __hlt(a, b) ? b : a;
+#else
+ const float f1 = static_cast<float>(a);
+ const float f2 = static_cast<float>(b);
+ return f1 < f2 ? b : a;
+#endif
+}
+
+#ifndef EIGEN_NO_IO
+EIGEN_ALWAYS_INLINE std::ostream& operator << (std::ostream& os, const half& v) {
+ os << static_cast<float>(v);
+ return os;
+}
+#endif
+
+} // end namespace half_impl
+
+// import Eigen::half_impl::half into Eigen namespace
+// using half_impl::half;
+
+namespace internal {
+
+template<>
+struct random_default_impl<half, false, false>
+{
+ static inline half run(const half& x, const half& y)
+ {
+ return x + (y-x) * half(float(std::rand()) / float(RAND_MAX));
+ }
+ static inline half run()
+ {
+ return run(half(-1.f), half(1.f));
+ }
+};
+
+template<> struct is_arithmetic<half> { enum { value = true }; };
+
+} // end namespace internal
+
+template<> struct NumTraits<Eigen::half>
+ : GenericNumTraits<Eigen::half>
+{
+ enum {
+ IsSigned = true,
+ IsInteger = false,
+ IsComplex = false,
+ RequireInitialization = false
+ };
+
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::half epsilon() {
+ return half_impl::raw_uint16_to_half(0x0800);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::half dummy_precision() {
+ return half_impl::raw_uint16_to_half(0x211f); // Eigen::half(1e-2f);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::half highest() {
+ return half_impl::raw_uint16_to_half(0x7bff);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::half lowest() {
+ return half_impl::raw_uint16_to_half(0xfbff);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::half infinity() {
+ return half_impl::raw_uint16_to_half(0x7c00);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::half quiet_NaN() {
+ return half_impl::raw_uint16_to_half(0x7e00);
+ }
+};
+
+} // end namespace Eigen
+
+#if defined(EIGEN_HAS_GPU_FP16) || defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC)
+ #pragma pop_macro("EIGEN_CONSTEXPR")
+#endif
+
+namespace Eigen {
+namespace numext {
+
+#if defined(EIGEN_GPU_COMPILE_PHASE)
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool(isnan)(const Eigen::half& h) {
+ return (half_impl::isnan)(h);
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool(isinf)(const Eigen::half& h) {
+ return (half_impl::isinf)(h);
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool(isfinite)(const Eigen::half& h) {
+ return (half_impl::isfinite)(h);
+}
+
+#endif
+
+template <>
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half bit_cast<Eigen::half, uint16_t>(const uint16_t& src) {
+ return Eigen::half(Eigen::half_impl::raw_uint16_to_half(src));
+}
+
+template <>
+EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC uint16_t bit_cast<uint16_t, Eigen::half>(const Eigen::half& src) {
+ return Eigen::half_impl::raw_half_as_uint16(src);
+}
+
+} // namespace numext
+} // namespace Eigen
+
+// Add the missing shfl* intrinsics.
+// The __shfl* functions are only valid on HIP or _CUDA_ARCH_ >= 300.
+// CUDA defines them for (__CUDA_ARCH__ >= 300 || !defined(__CUDA_ARCH__))
+//
+// HIP and CUDA prior to SDK 9.0 define
+// __shfl, __shfl_up, __shfl_down, __shfl_xor for int and float
+// CUDA since 9.0 deprecates those and instead defines
+// __shfl_sync, __shfl_up_sync, __shfl_down_sync, __shfl_xor_sync,
+// with native support for __half and __nv_bfloat16
+//
+// Note that the following are __device__ - only functions.
+#if (defined(EIGEN_CUDACC) && (!defined(EIGEN_CUDA_ARCH) || EIGEN_CUDA_ARCH >= 300)) \
+ || defined(EIGEN_HIPCC)
+
+#if defined(EIGEN_HAS_CUDA_FP16) && EIGEN_CUDA_SDK_VER >= 90000
+
+__device__ EIGEN_STRONG_INLINE Eigen::half __shfl_sync(unsigned mask, Eigen::half var, int srcLane, int width=warpSize) {
+ const __half h = var;
+ return static_cast<Eigen::half>(__shfl_sync(mask, h, srcLane, width));
+}
+
+__device__ EIGEN_STRONG_INLINE Eigen::half __shfl_up_sync(unsigned mask, Eigen::half var, unsigned int delta, int width=warpSize) {
+ const __half h = var;
+ return static_cast<Eigen::half>(__shfl_up_sync(mask, h, delta, width));
+}
+
+__device__ EIGEN_STRONG_INLINE Eigen::half __shfl_down_sync(unsigned mask, Eigen::half var, unsigned int delta, int width=warpSize) {
+ const __half h = var;
+ return static_cast<Eigen::half>(__shfl_down_sync(mask, h, delta, width));
+}
+
+__device__ EIGEN_STRONG_INLINE Eigen::half __shfl_xor_sync(unsigned mask, Eigen::half var, int laneMask, int width=warpSize) {
+ const __half h = var;
+ return static_cast<Eigen::half>(__shfl_xor_sync(mask, h, laneMask, width));
+}
+
+#else // HIP or CUDA SDK < 9.0
+
+__device__ EIGEN_STRONG_INLINE Eigen::half __shfl(Eigen::half var, int srcLane, int width=warpSize) {
+ const int ivar = static_cast<int>(Eigen::numext::bit_cast<Eigen::numext::uint16_t>(var));
+ return Eigen::numext::bit_cast<Eigen::half>(static_cast<Eigen::numext::uint16_t>(__shfl(ivar, srcLane, width)));
+}
+
+__device__ EIGEN_STRONG_INLINE Eigen::half __shfl_up(Eigen::half var, unsigned int delta, int width=warpSize) {
+ const int ivar = static_cast<int>(Eigen::numext::bit_cast<Eigen::numext::uint16_t>(var));
+ return Eigen::numext::bit_cast<Eigen::half>(static_cast<Eigen::numext::uint16_t>(__shfl_up(ivar, delta, width)));
+}
+
+__device__ EIGEN_STRONG_INLINE Eigen::half __shfl_down(Eigen::half var, unsigned int delta, int width=warpSize) {
+ const int ivar = static_cast<int>(Eigen::numext::bit_cast<Eigen::numext::uint16_t>(var));
+ return Eigen::numext::bit_cast<Eigen::half>(static_cast<Eigen::numext::uint16_t>(__shfl_down(ivar, delta, width)));
+}
+
+__device__ EIGEN_STRONG_INLINE Eigen::half __shfl_xor(Eigen::half var, int laneMask, int width=warpSize) {
+ const int ivar = static_cast<int>(Eigen::numext::bit_cast<Eigen::numext::uint16_t>(var));
+ return Eigen::numext::bit_cast<Eigen::half>(static_cast<Eigen::numext::uint16_t>(__shfl_xor(ivar, laneMask, width)));
+}
+
+#endif // HIP vs CUDA
+#endif // __shfl*
+
+// ldg() has an overload for __half_raw, but we also need one for Eigen::half.
+#if (defined(EIGEN_CUDACC) && (!defined(EIGEN_CUDA_ARCH) || EIGEN_CUDA_ARCH >= 350)) \
+ || defined(EIGEN_HIPCC)
+EIGEN_STRONG_INLINE __device__ Eigen::half __ldg(const Eigen::half* ptr) {
+ return Eigen::half_impl::raw_uint16_to_half(__ldg(reinterpret_cast<const Eigen::numext::uint16_t*>(ptr)));
+}
+#endif // __ldg
+
+#if EIGEN_HAS_STD_HASH
+namespace std {
+template <>
+struct hash<Eigen::half> {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::size_t operator()(const Eigen::half& a) const {
+ return static_cast<std::size_t>(Eigen::numext::bit_cast<Eigen::numext::uint16_t>(a));
+ }
+};
+} // end namespace std
+#endif
+
+#endif // EIGEN_HALF_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/Settings.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/Settings.h
index 097373c84..a5c3ada4c 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/Settings.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/Settings.h
@@ -21,7 +21,7 @@
* it does not correspond to the number of iterations or the number of instructions
*/
#ifndef EIGEN_UNROLLING_LIMIT
-#define EIGEN_UNROLLING_LIMIT 100
+#define EIGEN_UNROLLING_LIMIT 110
#endif
/** Defines the threshold between a "small" and a "large" matrix.
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/TypeCasting.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/TypeCasting.h
new file mode 100644
index 000000000..fb8183b78
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/Default/TypeCasting.h
@@ -0,0 +1,120 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
+// Copyright (C) 2019 Rasmus Munk Larsen <rmlarsen@google.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_GENERIC_TYPE_CASTING_H
+#define EIGEN_GENERIC_TYPE_CASTING_H
+
+namespace Eigen {
+
+namespace internal {
+
+template<>
+struct scalar_cast_op<float, Eigen::half> {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)
+ typedef Eigen::half result_type;
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half operator() (const float& a) const {
+ #if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
+ return __float2half(a);
+ #else
+ return Eigen::half(a);
+ #endif
+ }
+};
+
+template<>
+struct functor_traits<scalar_cast_op<float, Eigen::half> >
+{ enum { Cost = NumTraits<float>::AddCost, PacketAccess = false }; };
+
+
+template<>
+struct scalar_cast_op<int, Eigen::half> {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)
+ typedef Eigen::half result_type;
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half operator() (const int& a) const {
+ #if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
+ return __float2half(static_cast<float>(a));
+ #else
+ return Eigen::half(static_cast<float>(a));
+ #endif
+ }
+};
+
+template<>
+struct functor_traits<scalar_cast_op<int, Eigen::half> >
+{ enum { Cost = NumTraits<float>::AddCost, PacketAccess = false }; };
+
+
+template<>
+struct scalar_cast_op<Eigen::half, float> {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)
+ typedef float result_type;
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float operator() (const Eigen::half& a) const {
+ #if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
+ return __half2float(a);
+ #else
+ return static_cast<float>(a);
+ #endif
+ }
+};
+
+template<>
+struct functor_traits<scalar_cast_op<Eigen::half, float> >
+{ enum { Cost = NumTraits<float>::AddCost, PacketAccess = false }; };
+
+
+template<>
+struct scalar_cast_op<float, Eigen::bfloat16> {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)
+ typedef Eigen::bfloat16 result_type;
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::bfloat16 operator() (const float& a) const {
+ return Eigen::bfloat16(a);
+ }
+};
+
+template<>
+struct functor_traits<scalar_cast_op<float, Eigen::bfloat16> >
+{ enum { Cost = NumTraits<float>::AddCost, PacketAccess = false }; };
+
+
+template<>
+struct scalar_cast_op<int, Eigen::bfloat16> {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)
+ typedef Eigen::bfloat16 result_type;
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::bfloat16 operator() (const int& a) const {
+ return Eigen::bfloat16(static_cast<float>(a));
+ }
+};
+
+template<>
+struct functor_traits<scalar_cast_op<int, Eigen::bfloat16> >
+{ enum { Cost = NumTraits<float>::AddCost, PacketAccess = false }; };
+
+
+template<>
+struct scalar_cast_op<Eigen::bfloat16, float> {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)
+ typedef float result_type;
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float operator() (const Eigen::bfloat16& a) const {
+ return static_cast<float>(a);
+ }
+};
+
+template<>
+struct functor_traits<scalar_cast_op<Eigen::bfloat16, float> >
+{ enum { Cost = NumTraits<float>::AddCost, PacketAccess = false }; };
+
+
+}
+}
+
+#endif // EIGEN_GENERIC_TYPE_CASTING_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/CUDA/MathFunctions.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/GPU/MathFunctions.h
index ff6256ce0..d2b3a2568 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/arch/CUDA/MathFunctions.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/GPU/MathFunctions.h
@@ -7,8 +7,8 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-#ifndef EIGEN_MATH_FUNCTIONS_CUDA_H
-#define EIGEN_MATH_FUNCTIONS_CUDA_H
+#ifndef EIGEN_MATH_FUNCTIONS_GPU_H
+#define EIGEN_MATH_FUNCTIONS_GPU_H
namespace Eigen {
@@ -17,7 +17,7 @@ namespace internal {
// Make sure this is only available when targeting a GPU: we don't want to
// introduce conflicts between these packet_traits definitions and the ones
// we'll use on the host side (SSE, AVX, ...)
-#if defined(EIGEN_CUDACC) && defined(EIGEN_USE_GPU)
+#if defined(EIGEN_GPUCC) && defined(EIGEN_USE_GPU)
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
float4 plog<float4>(const float4& a)
{
@@ -100,4 +100,4 @@ double2 prsqrt<double2>(const double2& a)
} // end namespace Eigen
-#endif // EIGEN_MATH_FUNCTIONS_CUDA_H
+#endif // EIGEN_MATH_FUNCTIONS_GPU_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/GPU/PacketMath.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/GPU/PacketMath.h
new file mode 100644
index 000000000..25c45fd35
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/GPU/PacketMath.h
@@ -0,0 +1,1649 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_PACKET_MATH_GPU_H
+#define EIGEN_PACKET_MATH_GPU_H
+
+namespace Eigen {
+
+namespace internal {
+
+// Read-only data cached load available.
+#if defined(EIGEN_HIP_DEVICE_COMPILE) || (defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 350)
+#define EIGEN_GPU_HAS_LDG 1
+#endif
+
+// FP16 math available.
+#if (defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530)
+#define EIGEN_CUDA_HAS_FP16_ARITHMETIC 1
+#endif
+
+#if defined(EIGEN_HIP_DEVICE_COMPILE) || defined(EIGEN_CUDA_HAS_FP16_ARITHMETIC)
+#define EIGEN_GPU_HAS_FP16_ARITHMETIC 1
+#endif
+
+// Make sure this is only available when targeting a GPU: we don't want to
+// introduce conflicts between these packet_traits definitions and the ones
+// we'll use on the host side (SSE, AVX, ...)
+#if defined(EIGEN_GPUCC) && defined(EIGEN_USE_GPU)
+
+template<> struct is_arithmetic<float4> { enum { value = true }; };
+template<> struct is_arithmetic<double2> { enum { value = true }; };
+
+template<> struct packet_traits<float> : default_packet_traits
+{
+ typedef float4 type;
+ typedef float4 half;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size=4,
+ HasHalfPacket = 0,
+
+ HasDiv = 1,
+ HasSin = 0,
+ HasCos = 0,
+ HasLog = 1,
+ HasExp = 1,
+ HasSqrt = 1,
+ HasRsqrt = 1,
+ HasLGamma = 1,
+ HasDiGamma = 1,
+ HasZeta = 1,
+ HasPolygamma = 1,
+ HasErf = 1,
+ HasErfc = 1,
+ HasNdtri = 1,
+ HasBessel = 1,
+ HasIGamma = 1,
+ HasIGammaDerA = 1,
+ HasGammaSampleDerAlpha = 1,
+ HasIGammac = 1,
+ HasBetaInc = 1,
+
+ HasBlend = 0,
+ HasFloor = 1,
+ };
+};
+
+template<> struct packet_traits<double> : default_packet_traits
+{
+ typedef double2 type;
+ typedef double2 half;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size=2,
+ HasHalfPacket = 0,
+
+ HasDiv = 1,
+ HasLog = 1,
+ HasExp = 1,
+ HasSqrt = 1,
+ HasRsqrt = 1,
+ HasLGamma = 1,
+ HasDiGamma = 1,
+ HasZeta = 1,
+ HasPolygamma = 1,
+ HasErf = 1,
+ HasErfc = 1,
+ HasNdtri = 1,
+ HasBessel = 1,
+ HasIGamma = 1,
+ HasIGammaDerA = 1,
+ HasGammaSampleDerAlpha = 1,
+ HasIGammac = 1,
+ HasBetaInc = 1,
+
+ HasBlend = 0,
+ HasFloor = 1,
+ };
+};
+
+
+template<> struct unpacket_traits<float4> { typedef float type; enum {size=4, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef float4 half; };
+template<> struct unpacket_traits<double2> { typedef double type; enum {size=2, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef double2 half; };
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pset1<float4>(const float& from) {
+ return make_float4(from, from, from, from);
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pset1<double2>(const double& from) {
+ return make_double2(from, from);
+}
+
+// We need to distinguish ‘clang as the CUDA compiler’ from ‘clang as the host compiler,
+// invoked by NVCC’ (e.g. on MacOS). The former needs to see both host and device implementation
+// of the functions, while the latter can only deal with one of them.
+#if defined(EIGEN_CUDA_ARCH) || defined(EIGEN_HIPCC) || (defined(EIGEN_CUDACC) && EIGEN_COMP_CLANG && !EIGEN_COMP_NVCC)
+namespace {
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float bitwise_and(const float& a,
+ const float& b) {
+ return __int_as_float(__float_as_int(a) & __float_as_int(b));
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bitwise_and(const double& a,
+ const double& b) {
+ return __longlong_as_double(__double_as_longlong(a) &
+ __double_as_longlong(b));
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float bitwise_or(const float& a,
+ const float& b) {
+ return __int_as_float(__float_as_int(a) | __float_as_int(b));
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bitwise_or(const double& a,
+ const double& b) {
+ return __longlong_as_double(__double_as_longlong(a) |
+ __double_as_longlong(b));
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float bitwise_xor(const float& a,
+ const float& b) {
+ return __int_as_float(__float_as_int(a) ^ __float_as_int(b));
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bitwise_xor(const double& a,
+ const double& b) {
+ return __longlong_as_double(__double_as_longlong(a) ^
+ __double_as_longlong(b));
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float bitwise_andnot(const float& a,
+ const float& b) {
+ return __int_as_float(__float_as_int(a) & ~__float_as_int(b));
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bitwise_andnot(const double& a,
+ const double& b) {
+ return __longlong_as_double(__double_as_longlong(a) &
+ ~__double_as_longlong(b));
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float eq_mask(const float& a,
+ const float& b) {
+ return __int_as_float(a == b ? 0xffffffffu : 0u);
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double eq_mask(const double& a,
+ const double& b) {
+ return __longlong_as_double(a == b ? 0xffffffffffffffffull : 0ull);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float lt_mask(const float& a,
+ const float& b) {
+ return __int_as_float(a < b ? 0xffffffffu : 0u);
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double lt_mask(const double& a,
+ const double& b) {
+ return __longlong_as_double(a < b ? 0xffffffffffffffffull : 0ull);
+}
+
+} // namespace
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pand<float4>(const float4& a,
+ const float4& b) {
+ return make_float4(bitwise_and(a.x, b.x), bitwise_and(a.y, b.y),
+ bitwise_and(a.z, b.z), bitwise_and(a.w, b.w));
+}
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pand<double2>(const double2& a,
+ const double2& b) {
+ return make_double2(bitwise_and(a.x, b.x), bitwise_and(a.y, b.y));
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 por<float4>(const float4& a,
+ const float4& b) {
+ return make_float4(bitwise_or(a.x, b.x), bitwise_or(a.y, b.y),
+ bitwise_or(a.z, b.z), bitwise_or(a.w, b.w));
+}
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 por<double2>(const double2& a,
+ const double2& b) {
+ return make_double2(bitwise_or(a.x, b.x), bitwise_or(a.y, b.y));
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pxor<float4>(const float4& a,
+ const float4& b) {
+ return make_float4(bitwise_xor(a.x, b.x), bitwise_xor(a.y, b.y),
+ bitwise_xor(a.z, b.z), bitwise_xor(a.w, b.w));
+}
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pxor<double2>(const double2& a,
+ const double2& b) {
+ return make_double2(bitwise_xor(a.x, b.x), bitwise_xor(a.y, b.y));
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pandnot<float4>(const float4& a,
+ const float4& b) {
+ return make_float4(bitwise_andnot(a.x, b.x), bitwise_andnot(a.y, b.y),
+ bitwise_andnot(a.z, b.z), bitwise_andnot(a.w, b.w));
+}
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2
+pandnot<double2>(const double2& a, const double2& b) {
+ return make_double2(bitwise_andnot(a.x, b.x), bitwise_andnot(a.y, b.y));
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pcmp_eq<float4>(const float4& a,
+ const float4& b) {
+ return make_float4(eq_mask(a.x, b.x), eq_mask(a.y, b.y), eq_mask(a.z, b.z),
+ eq_mask(a.w, b.w));
+}
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pcmp_lt<float4>(const float4& a,
+ const float4& b) {
+ return make_float4(lt_mask(a.x, b.x), lt_mask(a.y, b.y), lt_mask(a.z, b.z),
+ lt_mask(a.w, b.w));
+}
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2
+pcmp_eq<double2>(const double2& a, const double2& b) {
+ return make_double2(eq_mask(a.x, b.x), eq_mask(a.y, b.y));
+}
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2
+pcmp_lt<double2>(const double2& a, const double2& b) {
+ return make_double2(lt_mask(a.x, b.x), lt_mask(a.y, b.y));
+}
+#endif // defined(EIGEN_CUDA_ARCH) || defined(EIGEN_HIPCC) || (defined(EIGEN_CUDACC) && EIGEN_COMP_CLANG && !EIGEN_COMP_NVCC)
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 plset<float4>(const float& a) {
+ return make_float4(a, a+1, a+2, a+3);
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 plset<double2>(const double& a) {
+ return make_double2(a, a+1);
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 padd<float4>(const float4& a, const float4& b) {
+ return make_float4(a.x+b.x, a.y+b.y, a.z+b.z, a.w+b.w);
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 padd<double2>(const double2& a, const double2& b) {
+ return make_double2(a.x+b.x, a.y+b.y);
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 psub<float4>(const float4& a, const float4& b) {
+ return make_float4(a.x-b.x, a.y-b.y, a.z-b.z, a.w-b.w);
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 psub<double2>(const double2& a, const double2& b) {
+ return make_double2(a.x-b.x, a.y-b.y);
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pnegate(const float4& a) {
+ return make_float4(-a.x, -a.y, -a.z, -a.w);
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pnegate(const double2& a) {
+ return make_double2(-a.x, -a.y);
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pconj(const float4& a) { return a; }
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pconj(const double2& a) { return a; }
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pmul<float4>(const float4& a, const float4& b) {
+ return make_float4(a.x*b.x, a.y*b.y, a.z*b.z, a.w*b.w);
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pmul<double2>(const double2& a, const double2& b) {
+ return make_double2(a.x*b.x, a.y*b.y);
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pdiv<float4>(const float4& a, const float4& b) {
+ return make_float4(a.x/b.x, a.y/b.y, a.z/b.z, a.w/b.w);
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pdiv<double2>(const double2& a, const double2& b) {
+ return make_double2(a.x/b.x, a.y/b.y);
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pmin<float4>(const float4& a, const float4& b) {
+ return make_float4(fminf(a.x, b.x), fminf(a.y, b.y), fminf(a.z, b.z), fminf(a.w, b.w));
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pmin<double2>(const double2& a, const double2& b) {
+ return make_double2(fmin(a.x, b.x), fmin(a.y, b.y));
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pmax<float4>(const float4& a, const float4& b) {
+ return make_float4(fmaxf(a.x, b.x), fmaxf(a.y, b.y), fmaxf(a.z, b.z), fmaxf(a.w, b.w));
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pmax<double2>(const double2& a, const double2& b) {
+ return make_double2(fmax(a.x, b.x), fmax(a.y, b.y));
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pload<float4>(const float* from) {
+ return *reinterpret_cast<const float4*>(from);
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pload<double2>(const double* from) {
+ return *reinterpret_cast<const double2*>(from);
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 ploadu<float4>(const float* from) {
+ return make_float4(from[0], from[1], from[2], from[3]);
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 ploadu<double2>(const double* from) {
+ return make_double2(from[0], from[1]);
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 ploaddup<float4>(const float* from) {
+ return make_float4(from[0], from[0], from[1], from[1]);
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 ploaddup<double2>(const double* from) {
+ return make_double2(from[0], from[0]);
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstore<float>(float* to, const float4& from) {
+ *reinterpret_cast<float4*>(to) = from;
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstore<double>(double* to, const double2& from) {
+ *reinterpret_cast<double2*>(to) = from;
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const float4& from) {
+ to[0] = from.x;
+ to[1] = from.y;
+ to[2] = from.z;
+ to[3] = from.w;
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const double2& from) {
+ to[0] = from.x;
+ to[1] = from.y;
+}
+
+template<>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float4 ploadt_ro<float4, Aligned>(const float* from) {
+#if defined(EIGEN_GPU_HAS_LDG)
+ return __ldg((const float4*)from);
+#else
+ return make_float4(from[0], from[1], from[2], from[3]);
+#endif
+}
+template<>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double2 ploadt_ro<double2, Aligned>(const double* from) {
+#if defined(EIGEN_GPU_HAS_LDG)
+ return __ldg((const double2*)from);
+#else
+ return make_double2(from[0], from[1]);
+#endif
+}
+
+template<>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float4 ploadt_ro<float4, Unaligned>(const float* from) {
+#if defined(EIGEN_GPU_HAS_LDG)
+ return make_float4(__ldg(from+0), __ldg(from+1), __ldg(from+2), __ldg(from+3));
+#else
+ return make_float4(from[0], from[1], from[2], from[3]);
+#endif
+}
+template<>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double2 ploadt_ro<double2, Unaligned>(const double* from) {
+#if defined(EIGEN_GPU_HAS_LDG)
+ return make_double2(__ldg(from+0), __ldg(from+1));
+#else
+ return make_double2(from[0], from[1]);
+#endif
+}
+
+template<> EIGEN_DEVICE_FUNC inline float4 pgather<float, float4>(const float* from, Index stride) {
+ return make_float4(from[0*stride], from[1*stride], from[2*stride], from[3*stride]);
+}
+
+template<> EIGEN_DEVICE_FUNC inline double2 pgather<double, double2>(const double* from, Index stride) {
+ return make_double2(from[0*stride], from[1*stride]);
+}
+
+template<> EIGEN_DEVICE_FUNC inline void pscatter<float, float4>(float* to, const float4& from, Index stride) {
+ to[stride*0] = from.x;
+ to[stride*1] = from.y;
+ to[stride*2] = from.z;
+ to[stride*3] = from.w;
+}
+template<> EIGEN_DEVICE_FUNC inline void pscatter<double, double2>(double* to, const double2& from, Index stride) {
+ to[stride*0] = from.x;
+ to[stride*1] = from.y;
+}
+
+template<> EIGEN_DEVICE_FUNC inline float pfirst<float4>(const float4& a) {
+ return a.x;
+}
+template<> EIGEN_DEVICE_FUNC inline double pfirst<double2>(const double2& a) {
+ return a.x;
+}
+
+template<> EIGEN_DEVICE_FUNC inline float predux<float4>(const float4& a) {
+ return a.x + a.y + a.z + a.w;
+}
+template<> EIGEN_DEVICE_FUNC inline double predux<double2>(const double2& a) {
+ return a.x + a.y;
+}
+
+template<> EIGEN_DEVICE_FUNC inline float predux_max<float4>(const float4& a) {
+ return fmaxf(fmaxf(a.x, a.y), fmaxf(a.z, a.w));
+}
+template<> EIGEN_DEVICE_FUNC inline double predux_max<double2>(const double2& a) {
+ return fmax(a.x, a.y);
+}
+
+template<> EIGEN_DEVICE_FUNC inline float predux_min<float4>(const float4& a) {
+ return fminf(fminf(a.x, a.y), fminf(a.z, a.w));
+}
+template<> EIGEN_DEVICE_FUNC inline double predux_min<double2>(const double2& a) {
+ return fmin(a.x, a.y);
+}
+
+template<> EIGEN_DEVICE_FUNC inline float predux_mul<float4>(const float4& a) {
+ return a.x * a.y * a.z * a.w;
+}
+template<> EIGEN_DEVICE_FUNC inline double predux_mul<double2>(const double2& a) {
+ return a.x * a.y;
+}
+
+template<> EIGEN_DEVICE_FUNC inline float4 pabs<float4>(const float4& a) {
+ return make_float4(fabsf(a.x), fabsf(a.y), fabsf(a.z), fabsf(a.w));
+}
+template<> EIGEN_DEVICE_FUNC inline double2 pabs<double2>(const double2& a) {
+ return make_double2(fabs(a.x), fabs(a.y));
+}
+
+template<> EIGEN_DEVICE_FUNC inline float4 pfloor<float4>(const float4& a) {
+ return make_float4(floorf(a.x), floorf(a.y), floorf(a.z), floorf(a.w));
+}
+template<> EIGEN_DEVICE_FUNC inline double2 pfloor<double2>(const double2& a) {
+ return make_double2(floor(a.x), floor(a.y));
+}
+
+EIGEN_DEVICE_FUNC inline void
+ptranspose(PacketBlock<float4,4>& kernel) {
+ float tmp = kernel.packet[0].y;
+ kernel.packet[0].y = kernel.packet[1].x;
+ kernel.packet[1].x = tmp;
+
+ tmp = kernel.packet[0].z;
+ kernel.packet[0].z = kernel.packet[2].x;
+ kernel.packet[2].x = tmp;
+
+ tmp = kernel.packet[0].w;
+ kernel.packet[0].w = kernel.packet[3].x;
+ kernel.packet[3].x = tmp;
+
+ tmp = kernel.packet[1].z;
+ kernel.packet[1].z = kernel.packet[2].y;
+ kernel.packet[2].y = tmp;
+
+ tmp = kernel.packet[1].w;
+ kernel.packet[1].w = kernel.packet[3].y;
+ kernel.packet[3].y = tmp;
+
+ tmp = kernel.packet[2].w;
+ kernel.packet[2].w = kernel.packet[3].z;
+ kernel.packet[3].z = tmp;
+}
+
+EIGEN_DEVICE_FUNC inline void
+ptranspose(PacketBlock<double2,2>& kernel) {
+ double tmp = kernel.packet[0].y;
+ kernel.packet[0].y = kernel.packet[1].x;
+ kernel.packet[1].x = tmp;
+}
+
+#endif // defined(EIGEN_GPUCC) && defined(EIGEN_USE_GPU)
+
+// Half-packet functions are not available on the host for CUDA 9.0-9.2, only
+// on device. There is no benefit to using them on the host anyways, since they are
+// emulated.
+#if (defined(EIGEN_HAS_CUDA_FP16) || defined(EIGEN_HAS_HIP_FP16)) && defined(EIGEN_GPU_COMPILE_PHASE)
+
+typedef ulonglong2 Packet4h2;
+template<> struct unpacket_traits<Packet4h2> { typedef Eigen::half type; enum {size=8, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet4h2 half; };
+template<> struct is_arithmetic<Packet4h2> { enum { value = true }; };
+
+template<> struct unpacket_traits<half2> { typedef Eigen::half type; enum {size=2, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef half2 half; };
+template<> struct is_arithmetic<half2> { enum { value = true }; };
+
+template<> struct packet_traits<Eigen::half> : default_packet_traits
+{
+ typedef Packet4h2 type;
+ typedef Packet4h2 half;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size=8,
+ HasHalfPacket = 0,
+ HasAdd = 1,
+ HasSub = 1,
+ HasMul = 1,
+ HasDiv = 1,
+ HasSqrt = 1,
+ HasRsqrt = 1,
+ HasExp = 1,
+ HasExpm1 = 1,
+ HasLog = 1,
+ HasLog1p = 1
+ };
+};
+
+template<>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pset1<half2>(const Eigen::half& from) {
+ return __half2half2(from);
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2
+pset1<Packet4h2>(const Eigen::half& from) {
+ Packet4h2 r;
+ half2* p_alias = reinterpret_cast<half2*>(&r);
+ p_alias[0] = pset1<half2>(from);
+ p_alias[1] = pset1<half2>(from);
+ p_alias[2] = pset1<half2>(from);
+ p_alias[3] = pset1<half2>(from);
+ return r;
+}
+
+namespace {
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pload(const Eigen::half* from) {
+ return *reinterpret_cast<const half2*>(from);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 ploadu(const Eigen::half* from) {
+ return __halves2half2(from[0], from[1]);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 ploaddup(const Eigen::half* from) {
+ return __halves2half2(from[0], from[0]);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstore(Eigen::half* to,
+ const half2& from) {
+ *reinterpret_cast<half2*>(to) = from;
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstoreu(Eigen::half* to,
+ const half2& from) {
+ to[0] = __low2half(from);
+ to[1] = __high2half(from);
+}
+
+
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE half2 ploadt_ro_aligned(
+ const Eigen::half* from) {
+#if defined(EIGEN_GPU_HAS_LDG)
+ // Input is guaranteed to be properly aligned.
+ return __ldg(reinterpret_cast<const half2*>(from));
+#else
+ return __halves2half2(*(from+0), *(from+1));
+#endif
+}
+
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE half2 ploadt_ro_unaligned(
+ const Eigen::half* from) {
+#if defined(EIGEN_GPU_HAS_LDG)
+ return __halves2half2(__ldg(from+0), __ldg(from+1));
+#else
+ return __halves2half2(*(from+0), *(from+1));
+#endif
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pgather(const Eigen::half* from,
+ Index stride) {
+ return __halves2half2(from[0*stride], from[1*stride]);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter(
+ Eigen::half* to, const half2& from, Index stride) {
+ to[stride*0] = __low2half(from);
+ to[stride*1] = __high2half(from);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half pfirst(const half2& a) {
+ return __low2half(a);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pabs(const half2& a) {
+ half a1 = __low2half(a);
+ half a2 = __high2half(a);
+ half result1 = half_impl::raw_uint16_to_half(a1.x & 0x7FFF);
+ half result2 = half_impl::raw_uint16_to_half(a2.x & 0x7FFF);
+ return __halves2half2(result1, result2);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 ptrue(const half2& /*a*/) {
+ half true_half = half_impl::raw_uint16_to_half(0xffffu);
+ return pset1<half2>(true_half);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pzero(const half2& /*a*/) {
+ half false_half = half_impl::raw_uint16_to_half(0x0000u);
+ return pset1<half2>(false_half);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void
+ptranspose(PacketBlock<half2,2>& kernel) {
+ __half a1 = __low2half(kernel.packet[0]);
+ __half a2 = __high2half(kernel.packet[0]);
+ __half b1 = __low2half(kernel.packet[1]);
+ __half b2 = __high2half(kernel.packet[1]);
+ kernel.packet[0] = __halves2half2(a1, b1);
+ kernel.packet[1] = __halves2half2(a2, b2);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 plset(const Eigen::half& a) {
+#if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC)
+ return __halves2half2(a, __hadd(a, __float2half(1.0f)));
+#else
+ float f = __half2float(a) + 1.0f;
+ return __halves2half2(a, __float2half(f));
+#endif
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pselect(const half2& mask,
+ const half2& a,
+ const half2& b) {
+ half mask_low = __low2half(mask);
+ half mask_high = __high2half(mask);
+ half result_low = mask_low == half(0) ? __low2half(b) : __low2half(a);
+ half result_high = mask_high == half(0) ? __high2half(b) : __high2half(a);
+ return __halves2half2(result_low, result_high);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pcmp_eq(const half2& a,
+ const half2& b) {
+ half true_half = half_impl::raw_uint16_to_half(0xffffu);
+ half false_half = half_impl::raw_uint16_to_half(0x0000u);
+ half a1 = __low2half(a);
+ half a2 = __high2half(a);
+ half b1 = __low2half(b);
+ half b2 = __high2half(b);
+ half eq1 = __half2float(a1) == __half2float(b1) ? true_half : false_half;
+ half eq2 = __half2float(a2) == __half2float(b2) ? true_half : false_half;
+ return __halves2half2(eq1, eq2);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pcmp_lt(const half2& a,
+ const half2& b) {
+ half true_half = half_impl::raw_uint16_to_half(0xffffu);
+ half false_half = half_impl::raw_uint16_to_half(0x0000u);
+ half a1 = __low2half(a);
+ half a2 = __high2half(a);
+ half b1 = __low2half(b);
+ half b2 = __high2half(b);
+ half eq1 = __half2float(a1) < __half2float(b1) ? true_half : false_half;
+ half eq2 = __half2float(a2) < __half2float(b2) ? true_half : false_half;
+ return __halves2half2(eq1, eq2);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pand(const half2& a,
+ const half2& b) {
+ half a1 = __low2half(a);
+ half a2 = __high2half(a);
+ half b1 = __low2half(b);
+ half b2 = __high2half(b);
+ half result1 = half_impl::raw_uint16_to_half(a1.x & b1.x);
+ half result2 = half_impl::raw_uint16_to_half(a2.x & b2.x);
+ return __halves2half2(result1, result2);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 por(const half2& a,
+ const half2& b) {
+ half a1 = __low2half(a);
+ half a2 = __high2half(a);
+ half b1 = __low2half(b);
+ half b2 = __high2half(b);
+ half result1 = half_impl::raw_uint16_to_half(a1.x | b1.x);
+ half result2 = half_impl::raw_uint16_to_half(a2.x | b2.x);
+ return __halves2half2(result1, result2);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pxor(const half2& a,
+ const half2& b) {
+ half a1 = __low2half(a);
+ half a2 = __high2half(a);
+ half b1 = __low2half(b);
+ half b2 = __high2half(b);
+ half result1 = half_impl::raw_uint16_to_half(a1.x ^ b1.x);
+ half result2 = half_impl::raw_uint16_to_half(a2.x ^ b2.x);
+ return __halves2half2(result1, result2);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pandnot(const half2& a,
+ const half2& b) {
+ half a1 = __low2half(a);
+ half a2 = __high2half(a);
+ half b1 = __low2half(b);
+ half b2 = __high2half(b);
+ half result1 = half_impl::raw_uint16_to_half(a1.x & ~b1.x);
+ half result2 = half_impl::raw_uint16_to_half(a2.x & ~b2.x);
+ return __halves2half2(result1, result2);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 padd(const half2& a,
+ const half2& b) {
+#if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC)
+ return __hadd2(a, b);
+#else
+ float a1 = __low2float(a);
+ float a2 = __high2float(a);
+ float b1 = __low2float(b);
+ float b2 = __high2float(b);
+ float r1 = a1 + b1;
+ float r2 = a2 + b2;
+ return __floats2half2_rn(r1, r2);
+#endif
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 psub(const half2& a,
+ const half2& b) {
+#if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC)
+ return __hsub2(a, b);
+#else
+ float a1 = __low2float(a);
+ float a2 = __high2float(a);
+ float b1 = __low2float(b);
+ float b2 = __high2float(b);
+ float r1 = a1 - b1;
+ float r2 = a2 - b2;
+ return __floats2half2_rn(r1, r2);
+#endif
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pnegate(const half2& a) {
+#if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC)
+ return __hneg2(a);
+#else
+ float a1 = __low2float(a);
+ float a2 = __high2float(a);
+ return __floats2half2_rn(-a1, -a2);
+#endif
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pconj(const half2& a) { return a; }
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pmul(const half2& a,
+ const half2& b) {
+#if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC)
+ return __hmul2(a, b);
+#else
+ float a1 = __low2float(a);
+ float a2 = __high2float(a);
+ float b1 = __low2float(b);
+ float b2 = __high2float(b);
+ float r1 = a1 * b1;
+ float r2 = a2 * b2;
+ return __floats2half2_rn(r1, r2);
+#endif
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pmadd(const half2& a,
+ const half2& b,
+ const half2& c) {
+#if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC)
+ return __hfma2(a, b, c);
+#else
+ float a1 = __low2float(a);
+ float a2 = __high2float(a);
+ float b1 = __low2float(b);
+ float b2 = __high2float(b);
+ float c1 = __low2float(c);
+ float c2 = __high2float(c);
+ float r1 = a1 * b1 + c1;
+ float r2 = a2 * b2 + c2;
+ return __floats2half2_rn(r1, r2);
+#endif
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pdiv(const half2& a,
+ const half2& b) {
+#if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC)
+ return __h2div(a, b);
+#else
+ float a1 = __low2float(a);
+ float a2 = __high2float(a);
+ float b1 = __low2float(b);
+ float b2 = __high2float(b);
+ float r1 = a1 / b1;
+ float r2 = a2 / b2;
+ return __floats2half2_rn(r1, r2);
+#endif
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pmin(const half2& a,
+ const half2& b) {
+ float a1 = __low2float(a);
+ float a2 = __high2float(a);
+ float b1 = __low2float(b);
+ float b2 = __high2float(b);
+ __half r1 = a1 < b1 ? __low2half(a) : __low2half(b);
+ __half r2 = a2 < b2 ? __high2half(a) : __high2half(b);
+ return __halves2half2(r1, r2);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pmax(const half2& a,
+ const half2& b) {
+ float a1 = __low2float(a);
+ float a2 = __high2float(a);
+ float b1 = __low2float(b);
+ float b2 = __high2float(b);
+ __half r1 = a1 > b1 ? __low2half(a) : __low2half(b);
+ __half r2 = a2 > b2 ? __high2half(a) : __high2half(b);
+ return __halves2half2(r1, r2);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux(const half2& a) {
+#if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC)
+ return __hadd(__low2half(a), __high2half(a));
+#else
+ float a1 = __low2float(a);
+ float a2 = __high2float(a);
+ return Eigen::half(__float2half(a1 + a2));
+#endif
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux_max(const half2& a) {
+#if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC)
+ __half first = __low2half(a);
+ __half second = __high2half(a);
+ return __hgt(first, second) ? first : second;
+#else
+ float a1 = __low2float(a);
+ float a2 = __high2float(a);
+ return a1 > a2 ? __low2half(a) : __high2half(a);
+#endif
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux_min(const half2& a) {
+#if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC)
+ __half first = __low2half(a);
+ __half second = __high2half(a);
+ return __hlt(first, second) ? first : second;
+#else
+ float a1 = __low2float(a);
+ float a2 = __high2float(a);
+ return a1 < a2 ? __low2half(a) : __high2half(a);
+#endif
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux_mul(const half2& a) {
+#if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC)
+ return __hmul(__low2half(a), __high2half(a));
+#else
+ float a1 = __low2float(a);
+ float a2 = __high2float(a);
+ return Eigen::half(__float2half(a1 * a2));
+#endif
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 plog1p(const half2& a) {
+ float a1 = __low2float(a);
+ float a2 = __high2float(a);
+ float r1 = log1pf(a1);
+ float r2 = log1pf(a2);
+ return __floats2half2_rn(r1, r2);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pexpm1(const half2& a) {
+ float a1 = __low2float(a);
+ float a2 = __high2float(a);
+ float r1 = expm1f(a1);
+ float r2 = expm1f(a2);
+ return __floats2half2_rn(r1, r2);
+}
+
+#if (EIGEN_CUDA_SDK_VER >= 80000 && defined(EIGEN_CUDA_HAS_FP16_ARITHMETIC)) || \
+ defined(EIGEN_HIP_DEVICE_COMPILE)
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+half2 plog(const half2& a) {
+ return h2log(a);
+}
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+half2 pexp(const half2& a) {
+ return h2exp(a);
+}
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+half2 psqrt(const half2& a) {
+ return h2sqrt(a);
+}
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+half2 prsqrt(const half2& a) {
+ return h2rsqrt(a);
+}
+
+#else
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 plog(const half2& a) {
+ float a1 = __low2float(a);
+ float a2 = __high2float(a);
+ float r1 = logf(a1);
+ float r2 = logf(a2);
+ return __floats2half2_rn(r1, r2);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pexp(const half2& a) {
+ float a1 = __low2float(a);
+ float a2 = __high2float(a);
+ float r1 = expf(a1);
+ float r2 = expf(a2);
+ return __floats2half2_rn(r1, r2);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 psqrt(const half2& a) {
+ float a1 = __low2float(a);
+ float a2 = __high2float(a);
+ float r1 = sqrtf(a1);
+ float r2 = sqrtf(a2);
+ return __floats2half2_rn(r1, r2);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 prsqrt(const half2& a) {
+ float a1 = __low2float(a);
+ float a2 = __high2float(a);
+ float r1 = rsqrtf(a1);
+ float r2 = rsqrtf(a2);
+ return __floats2half2_rn(r1, r2);
+}
+#endif
+} // namespace
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2
+pload<Packet4h2>(const Eigen::half* from) {
+ return *reinterpret_cast<const Packet4h2*>(from);
+}
+
+// unaligned load;
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2
+ploadu<Packet4h2>(const Eigen::half* from) {
+ Packet4h2 r;
+ half2* p_alias = reinterpret_cast<half2*>(&r);
+ p_alias[0] = ploadu(from + 0);
+ p_alias[1] = ploadu(from + 2);
+ p_alias[2] = ploadu(from + 4);
+ p_alias[3] = ploadu(from + 6);
+ return r;
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2
+ploaddup<Packet4h2>(const Eigen::half* from) {
+ Packet4h2 r;
+ half2* p_alias = reinterpret_cast<half2*>(&r);
+ p_alias[0] = ploaddup(from + 0);
+ p_alias[1] = ploaddup(from + 1);
+ p_alias[2] = ploaddup(from + 2);
+ p_alias[3] = ploaddup(from + 3);
+ return r;
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstore<Eigen::half>(
+ Eigen::half* to, const Packet4h2& from) {
+ *reinterpret_cast<Packet4h2*>(to) = from;
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstoreu<Eigen::half>(
+ Eigen::half* to, const Packet4h2& from) {
+ const half2* from_alias = reinterpret_cast<const half2*>(&from);
+ pstoreu(to + 0,from_alias[0]);
+ pstoreu(to + 2,from_alias[1]);
+ pstoreu(to + 4,from_alias[2]);
+ pstoreu(to + 6,from_alias[3]);
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet4h2
+ploadt_ro<Packet4h2, Aligned>(const Eigen::half* from) {
+#if defined(EIGEN_GPU_HAS_LDG)
+ Packet4h2 r;
+ r = __ldg(reinterpret_cast<const Packet4h2*>(from));
+ return r;
+#else
+ Packet4h2 r;
+ half2* r_alias = reinterpret_cast<half2*>(&r);
+ r_alias[0] = ploadt_ro_aligned(from + 0);
+ r_alias[1] = ploadt_ro_aligned(from + 2);
+ r_alias[2] = ploadt_ro_aligned(from + 4);
+ r_alias[3] = ploadt_ro_aligned(from + 6);
+ return r;
+#endif
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet4h2
+ploadt_ro<Packet4h2, Unaligned>(const Eigen::half* from) {
+ Packet4h2 r;
+ half2* r_alias = reinterpret_cast<half2*>(&r);
+ r_alias[0] = ploadt_ro_unaligned(from + 0);
+ r_alias[1] = ploadt_ro_unaligned(from + 2);
+ r_alias[2] = ploadt_ro_unaligned(from + 4);
+ r_alias[3] = ploadt_ro_unaligned(from + 6);
+ return r;
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2
+pgather<Eigen::half, Packet4h2>(const Eigen::half* from, Index stride) {
+ Packet4h2 r;
+ half2* p_alias = reinterpret_cast<half2*>(&r);
+ p_alias[0] = __halves2half2(from[0 * stride], from[1 * stride]);
+ p_alias[1] = __halves2half2(from[2 * stride], from[3 * stride]);
+ p_alias[2] = __halves2half2(from[4 * stride], from[5 * stride]);
+ p_alias[3] = __halves2half2(from[6 * stride], from[7 * stride]);
+ return r;
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter<Eigen::half, Packet4h2>(
+ Eigen::half* to, const Packet4h2& from, Index stride) {
+ const half2* from_alias = reinterpret_cast<const half2*>(&from);
+ pscatter(to + stride * 0, from_alias[0], stride);
+ pscatter(to + stride * 2, from_alias[1], stride);
+ pscatter(to + stride * 4, from_alias[2], stride);
+ pscatter(to + stride * 6, from_alias[3], stride);
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half pfirst<Packet4h2>(
+ const Packet4h2& a) {
+ return pfirst(*(reinterpret_cast<const half2*>(&a)));
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pabs<Packet4h2>(
+ const Packet4h2& a) {
+ Packet4h2 r;
+ half2* p_alias = reinterpret_cast<half2*>(&r);
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
+ p_alias[0] = pabs(a_alias[0]);
+ p_alias[1] = pabs(a_alias[1]);
+ p_alias[2] = pabs(a_alias[2]);
+ p_alias[3] = pabs(a_alias[3]);
+ return r;
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 ptrue<Packet4h2>(
+ const Packet4h2& /*a*/) {
+ half true_half = half_impl::raw_uint16_to_half(0xffffu);
+ return pset1<Packet4h2>(true_half);
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pzero<Packet4h2>(const Packet4h2& /*a*/) {
+ half false_half = half_impl::raw_uint16_to_half(0x0000u);
+ return pset1<Packet4h2>(false_half);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose_double(
+ double* d_row0, double* d_row1, double* d_row2, double* d_row3,
+ double* d_row4, double* d_row5, double* d_row6, double* d_row7) {
+ double d_tmp;
+ d_tmp = d_row0[1];
+ d_row0[1] = d_row4[0];
+ d_row4[0] = d_tmp;
+
+ d_tmp = d_row1[1];
+ d_row1[1] = d_row5[0];
+ d_row5[0] = d_tmp;
+
+ d_tmp = d_row2[1];
+ d_row2[1] = d_row6[0];
+ d_row6[0] = d_tmp;
+
+ d_tmp = d_row3[1];
+ d_row3[1] = d_row7[0];
+ d_row7[0] = d_tmp;
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose_half2(
+ half2* f_row0, half2* f_row1, half2* f_row2, half2* f_row3) {
+ half2 f_tmp;
+ f_tmp = f_row0[1];
+ f_row0[1] = f_row2[0];
+ f_row2[0] = f_tmp;
+
+ f_tmp = f_row1[1];
+ f_row1[1] = f_row3[0];
+ f_row3[0] = f_tmp;
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void
+ptranspose_half(half2& f0, half2& f1) {
+ __half a1 = __low2half(f0);
+ __half a2 = __high2half(f0);
+ __half b1 = __low2half(f1);
+ __half b2 = __high2half(f1);
+ f0 = __halves2half2(a1, b1);
+ f1 = __halves2half2(a2, b2);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void
+ptranspose(PacketBlock<Packet4h2,8>& kernel) {
+ double* d_row0 = reinterpret_cast<double*>(&kernel.packet[0]);
+ double* d_row1 = reinterpret_cast<double*>(&kernel.packet[1]);
+ double* d_row2 = reinterpret_cast<double*>(&kernel.packet[2]);
+ double* d_row3 = reinterpret_cast<double*>(&kernel.packet[3]);
+ double* d_row4 = reinterpret_cast<double*>(&kernel.packet[4]);
+ double* d_row5 = reinterpret_cast<double*>(&kernel.packet[5]);
+ double* d_row6 = reinterpret_cast<double*>(&kernel.packet[6]);
+ double* d_row7 = reinterpret_cast<double*>(&kernel.packet[7]);
+ ptranspose_double(d_row0, d_row1, d_row2, d_row3,
+ d_row4, d_row5, d_row6, d_row7);
+
+
+ half2* f_row0 = reinterpret_cast<half2*>(d_row0);
+ half2* f_row1 = reinterpret_cast<half2*>(d_row1);
+ half2* f_row2 = reinterpret_cast<half2*>(d_row2);
+ half2* f_row3 = reinterpret_cast<half2*>(d_row3);
+ ptranspose_half2(f_row0, f_row1, f_row2, f_row3);
+ ptranspose_half(f_row0[0], f_row1[0]);
+ ptranspose_half(f_row0[1], f_row1[1]);
+ ptranspose_half(f_row2[0], f_row3[0]);
+ ptranspose_half(f_row2[1], f_row3[1]);
+
+ f_row0 = reinterpret_cast<half2*>(d_row0 + 1);
+ f_row1 = reinterpret_cast<half2*>(d_row1 + 1);
+ f_row2 = reinterpret_cast<half2*>(d_row2 + 1);
+ f_row3 = reinterpret_cast<half2*>(d_row3 + 1);
+ ptranspose_half2(f_row0, f_row1, f_row2, f_row3);
+ ptranspose_half(f_row0[0], f_row1[0]);
+ ptranspose_half(f_row0[1], f_row1[1]);
+ ptranspose_half(f_row2[0], f_row3[0]);
+ ptranspose_half(f_row2[1], f_row3[1]);
+
+ f_row0 = reinterpret_cast<half2*>(d_row4);
+ f_row1 = reinterpret_cast<half2*>(d_row5);
+ f_row2 = reinterpret_cast<half2*>(d_row6);
+ f_row3 = reinterpret_cast<half2*>(d_row7);
+ ptranspose_half2(f_row0, f_row1, f_row2, f_row3);
+ ptranspose_half(f_row0[0], f_row1[0]);
+ ptranspose_half(f_row0[1], f_row1[1]);
+ ptranspose_half(f_row2[0], f_row3[0]);
+ ptranspose_half(f_row2[1], f_row3[1]);
+
+ f_row0 = reinterpret_cast<half2*>(d_row4 + 1);
+ f_row1 = reinterpret_cast<half2*>(d_row5 + 1);
+ f_row2 = reinterpret_cast<half2*>(d_row6 + 1);
+ f_row3 = reinterpret_cast<half2*>(d_row7 + 1);
+ ptranspose_half2(f_row0, f_row1, f_row2, f_row3);
+ ptranspose_half(f_row0[0], f_row1[0]);
+ ptranspose_half(f_row0[1], f_row1[1]);
+ ptranspose_half(f_row2[0], f_row3[0]);
+ ptranspose_half(f_row2[1], f_row3[1]);
+
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2
+plset<Packet4h2>(const Eigen::half& a) {
+#if defined(EIGEN_HIP_DEVICE_COMPILE)
+
+ Packet4h2 r;
+ half2* p_alias = reinterpret_cast<half2*>(&r);
+ p_alias[0] = __halves2half2(a, __hadd(a, __float2half(1.0f)));
+ p_alias[1] = __halves2half2(__hadd(a, __float2half(2.0f)),
+ __hadd(a, __float2half(3.0f)));
+ p_alias[2] = __halves2half2(__hadd(a, __float2half(4.0f)),
+ __hadd(a, __float2half(5.0f)));
+ p_alias[3] = __halves2half2(__hadd(a, __float2half(6.0f)),
+ __hadd(a, __float2half(7.0f)));
+ return r;
+#elif defined(EIGEN_CUDA_HAS_FP16_ARITHMETIC)
+ Packet4h2 r;
+ half2* r_alias = reinterpret_cast<half2*>(&r);
+
+ half2 b = pset1<half2>(a);
+ half2 c;
+ half2 half_offset0 = __halves2half2(__float2half(0.0f),__float2half(2.0f));
+ half2 half_offset1 = __halves2half2(__float2half(4.0f),__float2half(6.0f));
+
+ c = __hadd2(b, half_offset0);
+ r_alias[0] = plset(__low2half(c));
+ r_alias[1] = plset(__high2half(c));
+
+ c = __hadd2(b, half_offset1);
+ r_alias[2] = plset(__low2half(c));
+ r_alias[3] = plset(__high2half(c));
+
+ return r;
+
+#else
+ float f = __half2float(a);
+ Packet4h2 r;
+ half2* p_alias = reinterpret_cast<half2*>(&r);
+ p_alias[0] = __halves2half2(a, __float2half(f + 1.0f));
+ p_alias[1] = __halves2half2(__float2half(f + 2.0f), __float2half(f + 3.0f));
+ p_alias[2] = __halves2half2(__float2half(f + 4.0f), __float2half(f + 5.0f));
+ p_alias[3] = __halves2half2(__float2half(f + 6.0f), __float2half(f + 7.0f));
+ return r;
+#endif
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2
+pselect<Packet4h2>(const Packet4h2& mask, const Packet4h2& a,
+ const Packet4h2& b) {
+ Packet4h2 r;
+ half2* r_alias = reinterpret_cast<half2*>(&r);
+ const half2* mask_alias = reinterpret_cast<const half2*>(&mask);
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
+ const half2* b_alias = reinterpret_cast<const half2*>(&b);
+ r_alias[0] = pselect(mask_alias[0], a_alias[0], b_alias[0]);
+ r_alias[1] = pselect(mask_alias[1], a_alias[1], b_alias[1]);
+ r_alias[2] = pselect(mask_alias[2], a_alias[2], b_alias[2]);
+ r_alias[3] = pselect(mask_alias[3], a_alias[3], b_alias[3]);
+ return r;
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2
+pcmp_eq<Packet4h2>(const Packet4h2& a, const Packet4h2& b) {
+ Packet4h2 r;
+ half2* r_alias = reinterpret_cast<half2*>(&r);
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
+ const half2* b_alias = reinterpret_cast<const half2*>(&b);
+ r_alias[0] = pcmp_eq(a_alias[0], b_alias[0]);
+ r_alias[1] = pcmp_eq(a_alias[1], b_alias[1]);
+ r_alias[2] = pcmp_eq(a_alias[2], b_alias[2]);
+ r_alias[3] = pcmp_eq(a_alias[3], b_alias[3]);
+ return r;
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pand<Packet4h2>(
+ const Packet4h2& a, const Packet4h2& b) {
+ Packet4h2 r;
+ half2* r_alias = reinterpret_cast<half2*>(&r);
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
+ const half2* b_alias = reinterpret_cast<const half2*>(&b);
+ r_alias[0] = pand(a_alias[0], b_alias[0]);
+ r_alias[1] = pand(a_alias[1], b_alias[1]);
+ r_alias[2] = pand(a_alias[2], b_alias[2]);
+ r_alias[3] = pand(a_alias[3], b_alias[3]);
+ return r;
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 por<Packet4h2>(
+ const Packet4h2& a, const Packet4h2& b) {
+ Packet4h2 r;
+ half2* r_alias = reinterpret_cast<half2*>(&r);
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
+ const half2* b_alias = reinterpret_cast<const half2*>(&b);
+ r_alias[0] = por(a_alias[0], b_alias[0]);
+ r_alias[1] = por(a_alias[1], b_alias[1]);
+ r_alias[2] = por(a_alias[2], b_alias[2]);
+ r_alias[3] = por(a_alias[3], b_alias[3]);
+ return r;
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pxor<Packet4h2>(
+ const Packet4h2& a, const Packet4h2& b) {
+ Packet4h2 r;
+ half2* r_alias = reinterpret_cast<half2*>(&r);
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
+ const half2* b_alias = reinterpret_cast<const half2*>(&b);
+ r_alias[0] = pxor(a_alias[0], b_alias[0]);
+ r_alias[1] = pxor(a_alias[1], b_alias[1]);
+ r_alias[2] = pxor(a_alias[2], b_alias[2]);
+ r_alias[3] = pxor(a_alias[3], b_alias[3]);
+ return r;
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2
+pandnot<Packet4h2>(const Packet4h2& a, const Packet4h2& b) {
+ Packet4h2 r;
+ half2* r_alias = reinterpret_cast<half2*>(&r);
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
+ const half2* b_alias = reinterpret_cast<const half2*>(&b);
+ r_alias[0] = pandnot(a_alias[0], b_alias[0]);
+ r_alias[1] = pandnot(a_alias[1], b_alias[1]);
+ r_alias[2] = pandnot(a_alias[2], b_alias[2]);
+ r_alias[3] = pandnot(a_alias[3], b_alias[3]);
+ return r;
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 padd<Packet4h2>(
+ const Packet4h2& a, const Packet4h2& b) {
+ Packet4h2 r;
+ half2* r_alias = reinterpret_cast<half2*>(&r);
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
+ const half2* b_alias = reinterpret_cast<const half2*>(&b);
+ r_alias[0] = padd(a_alias[0], b_alias[0]);
+ r_alias[1] = padd(a_alias[1], b_alias[1]);
+ r_alias[2] = padd(a_alias[2], b_alias[2]);
+ r_alias[3] = padd(a_alias[3], b_alias[3]);
+ return r;
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 psub<Packet4h2>(
+ const Packet4h2& a, const Packet4h2& b) {
+ Packet4h2 r;
+ half2* r_alias = reinterpret_cast<half2*>(&r);
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
+ const half2* b_alias = reinterpret_cast<const half2*>(&b);
+ r_alias[0] = psub(a_alias[0], b_alias[0]);
+ r_alias[1] = psub(a_alias[1], b_alias[1]);
+ r_alias[2] = psub(a_alias[2], b_alias[2]);
+ r_alias[3] = psub(a_alias[3], b_alias[3]);
+ return r;
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pnegate(const Packet4h2& a) {
+ Packet4h2 r;
+ half2* r_alias = reinterpret_cast<half2*>(&r);
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
+ r_alias[0] = pnegate(a_alias[0]);
+ r_alias[1] = pnegate(a_alias[1]);
+ r_alias[2] = pnegate(a_alias[2]);
+ r_alias[3] = pnegate(a_alias[3]);
+ return r;
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pconj(const Packet4h2& a) {
+ return a;
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pmul<Packet4h2>(
+ const Packet4h2& a, const Packet4h2& b) {
+ Packet4h2 r;
+ half2* r_alias = reinterpret_cast<half2*>(&r);
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
+ const half2* b_alias = reinterpret_cast<const half2*>(&b);
+ r_alias[0] = pmul(a_alias[0], b_alias[0]);
+ r_alias[1] = pmul(a_alias[1], b_alias[1]);
+ r_alias[2] = pmul(a_alias[2], b_alias[2]);
+ r_alias[3] = pmul(a_alias[3], b_alias[3]);
+ return r;
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pmadd<Packet4h2>(
+ const Packet4h2& a, const Packet4h2& b, const Packet4h2& c) {
+ Packet4h2 r;
+ half2* r_alias = reinterpret_cast<half2*>(&r);
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
+ const half2* b_alias = reinterpret_cast<const half2*>(&b);
+ const half2* c_alias = reinterpret_cast<const half2*>(&c);
+ r_alias[0] = pmadd(a_alias[0], b_alias[0], c_alias[0]);
+ r_alias[1] = pmadd(a_alias[1], b_alias[1], c_alias[1]);
+ r_alias[2] = pmadd(a_alias[2], b_alias[2], c_alias[2]);
+ r_alias[3] = pmadd(a_alias[3], b_alias[3], c_alias[3]);
+ return r;
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pdiv<Packet4h2>(
+ const Packet4h2& a, const Packet4h2& b) {
+ Packet4h2 r;
+ half2* r_alias = reinterpret_cast<half2*>(&r);
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
+ const half2* b_alias = reinterpret_cast<const half2*>(&b);
+ r_alias[0] = pdiv(a_alias[0], b_alias[0]);
+ r_alias[1] = pdiv(a_alias[1], b_alias[1]);
+ r_alias[2] = pdiv(a_alias[2], b_alias[2]);
+ r_alias[3] = pdiv(a_alias[3], b_alias[3]);
+ return r;
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pmin<Packet4h2>(
+ const Packet4h2& a, const Packet4h2& b) {
+ Packet4h2 r;
+ half2* r_alias = reinterpret_cast<half2*>(&r);
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
+ const half2* b_alias = reinterpret_cast<const half2*>(&b);
+ r_alias[0] = pmin(a_alias[0], b_alias[0]);
+ r_alias[1] = pmin(a_alias[1], b_alias[1]);
+ r_alias[2] = pmin(a_alias[2], b_alias[2]);
+ r_alias[3] = pmin(a_alias[3], b_alias[3]);
+ return r;
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pmax<Packet4h2>(
+ const Packet4h2& a, const Packet4h2& b) {
+ Packet4h2 r;
+ half2* r_alias = reinterpret_cast<half2*>(&r);
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
+ const half2* b_alias = reinterpret_cast<const half2*>(&b);
+ r_alias[0] = pmax(a_alias[0], b_alias[0]);
+ r_alias[1] = pmax(a_alias[1], b_alias[1]);
+ r_alias[2] = pmax(a_alias[2], b_alias[2]);
+ r_alias[3] = pmax(a_alias[3], b_alias[3]);
+ return r;
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux<Packet4h2>(
+ const Packet4h2& a) {
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
+
+ return predux(a_alias[0]) + predux(a_alias[1]) +
+ predux(a_alias[2]) + predux(a_alias[3]);
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux_max<Packet4h2>(
+ const Packet4h2& a) {
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
+ half2 m0 = __halves2half2(predux_max(a_alias[0]),
+ predux_max(a_alias[1]));
+ half2 m1 = __halves2half2(predux_max(a_alias[2]),
+ predux_max(a_alias[3]));
+ __half first = predux_max(m0);
+ __half second = predux_max(m1);
+#if defined(EIGEN_CUDA_HAS_FP16_ARITHMETIC)
+ return (__hgt(first, second) ? first : second);
+#else
+ float ffirst = __half2float(first);
+ float fsecond = __half2float(second);
+ return (ffirst > fsecond)? first: second;
+#endif
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux_min<Packet4h2>(
+ const Packet4h2& a) {
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
+ half2 m0 = __halves2half2(predux_min(a_alias[0]),
+ predux_min(a_alias[1]));
+ half2 m1 = __halves2half2(predux_min(a_alias[2]),
+ predux_min(a_alias[3]));
+ __half first = predux_min(m0);
+ __half second = predux_min(m1);
+#if defined(EIGEN_CUDA_HAS_FP16_ARITHMETIC)
+ return (__hlt(first, second) ? first : second);
+#else
+ float ffirst = __half2float(first);
+ float fsecond = __half2float(second);
+ return (ffirst < fsecond)? first: second;
+#endif
+}
+
+// likely overflow/underflow
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux_mul<Packet4h2>(
+ const Packet4h2& a) {
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
+ return predux_mul(pmul(pmul(a_alias[0], a_alias[1]),
+ pmul(a_alias[2], a_alias[3])));
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2
+plog1p<Packet4h2>(const Packet4h2& a) {
+ Packet4h2 r;
+ half2* r_alias = reinterpret_cast<half2*>(&r);
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
+ r_alias[0] = plog1p(a_alias[0]);
+ r_alias[1] = plog1p(a_alias[1]);
+ r_alias[2] = plog1p(a_alias[2]);
+ r_alias[3] = plog1p(a_alias[3]);
+ return r;
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2
+pexpm1<Packet4h2>(const Packet4h2& a) {
+ Packet4h2 r;
+ half2* r_alias = reinterpret_cast<half2*>(&r);
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
+ r_alias[0] = pexpm1(a_alias[0]);
+ r_alias[1] = pexpm1(a_alias[1]);
+ r_alias[2] = pexpm1(a_alias[2]);
+ r_alias[3] = pexpm1(a_alias[3]);
+ return r;
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 plog<Packet4h2>(const Packet4h2& a) {
+ Packet4h2 r;
+ half2* r_alias = reinterpret_cast<half2*>(&r);
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
+ r_alias[0] = plog(a_alias[0]);
+ r_alias[1] = plog(a_alias[1]);
+ r_alias[2] = plog(a_alias[2]);
+ r_alias[3] = plog(a_alias[3]);
+ return r;
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pexp<Packet4h2>(const Packet4h2& a) {
+ Packet4h2 r;
+ half2* r_alias = reinterpret_cast<half2*>(&r);
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
+ r_alias[0] = pexp(a_alias[0]);
+ r_alias[1] = pexp(a_alias[1]);
+ r_alias[2] = pexp(a_alias[2]);
+ r_alias[3] = pexp(a_alias[3]);
+ return r;
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 psqrt<Packet4h2>(const Packet4h2& a) {
+ Packet4h2 r;
+ half2* r_alias = reinterpret_cast<half2*>(&r);
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
+ r_alias[0] = psqrt(a_alias[0]);
+ r_alias[1] = psqrt(a_alias[1]);
+ r_alias[2] = psqrt(a_alias[2]);
+ r_alias[3] = psqrt(a_alias[3]);
+ return r;
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2
+prsqrt<Packet4h2>(const Packet4h2& a) {
+ Packet4h2 r;
+ half2* r_alias = reinterpret_cast<half2*>(&r);
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
+ r_alias[0] = prsqrt(a_alias[0]);
+ r_alias[1] = prsqrt(a_alias[1]);
+ r_alias[2] = prsqrt(a_alias[2]);
+ r_alias[3] = prsqrt(a_alias[3]);
+ return r;
+}
+
+// The following specialized padd, pmul, pdiv, pmin, pmax, pset1 are needed for
+// the implementation of GPU half reduction.
+template<>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 padd<half2>(const half2& a,
+ const half2& b) {
+#if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC)
+ return __hadd2(a, b);
+#else
+ float a1 = __low2float(a);
+ float a2 = __high2float(a);
+ float b1 = __low2float(b);
+ float b2 = __high2float(b);
+ float r1 = a1 + b1;
+ float r2 = a2 + b2;
+ return __floats2half2_rn(r1, r2);
+#endif
+}
+
+template<>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pmul<half2>(const half2& a,
+ const half2& b) {
+#if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC)
+ return __hmul2(a, b);
+#else
+ float a1 = __low2float(a);
+ float a2 = __high2float(a);
+ float b1 = __low2float(b);
+ float b2 = __high2float(b);
+ float r1 = a1 * b1;
+ float r2 = a2 * b2;
+ return __floats2half2_rn(r1, r2);
+#endif
+}
+
+template<>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pdiv<half2>(const half2& a,
+ const half2& b) {
+#if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC)
+ return __h2div(a, b);
+#else
+ float a1 = __low2float(a);
+ float a2 = __high2float(a);
+ float b1 = __low2float(b);
+ float b2 = __high2float(b);
+ float r1 = a1 / b1;
+ float r2 = a2 / b2;
+ return __floats2half2_rn(r1, r2);
+#endif
+}
+
+template<>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pmin<half2>(const half2& a,
+ const half2& b) {
+ float a1 = __low2float(a);
+ float a2 = __high2float(a);
+ float b1 = __low2float(b);
+ float b2 = __high2float(b);
+ __half r1 = a1 < b1 ? __low2half(a) : __low2half(b);
+ __half r2 = a2 < b2 ? __high2half(a) : __high2half(b);
+ return __halves2half2(r1, r2);
+}
+
+template<>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pmax<half2>(const half2& a,
+ const half2& b) {
+ float a1 = __low2float(a);
+ float a2 = __high2float(a);
+ float b1 = __low2float(b);
+ float b2 = __high2float(b);
+ __half r1 = a1 > b1 ? __low2half(a) : __low2half(b);
+ __half r2 = a2 > b2 ? __high2half(a) : __high2half(b);
+ return __halves2half2(r1, r2);
+}
+
+#endif // (defined(EIGEN_HAS_CUDA_FP16) || defined(EIGEN_HAS_HIP_FP16)) && defined(EIGEN_GPU_COMPILE_PHASE)
+
+#undef EIGEN_GPU_HAS_LDG
+#undef EIGEN_CUDA_HAS_FP16_ARITHMETIC
+#undef EIGEN_GPU_HAS_FP16_ARITHMETIC
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+
+#endif // EIGEN_PACKET_MATH_GPU_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/GPU/TypeCasting.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/GPU/TypeCasting.h
new file mode 100644
index 000000000..c8195bb2b
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/GPU/TypeCasting.h
@@ -0,0 +1,79 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_TYPE_CASTING_GPU_H
+#define EIGEN_TYPE_CASTING_GPU_H
+
+namespace Eigen {
+
+namespace internal {
+
+#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
+
+template <>
+struct type_casting_traits<Eigen::half, float> {
+ enum {
+ VectorizedCast = 1,
+ SrcCoeffRatio = 1,
+ TgtCoeffRatio = 2
+ };
+};
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pcast<half2, float4>(const half2& a, const half2& b) {
+ float2 r1 = __half22float2(a);
+ float2 r2 = __half22float2(b);
+ return make_float4(r1.x, r1.y, r2.x, r2.y);
+}
+
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pcast<float4, Packet4h2>(const float4& a, const float4& b) {
+ Packet4h2 r;
+ half2* r_alias=reinterpret_cast<half2*>(&r);
+ r_alias[0]=__floats2half2_rn(a.x,a.y);
+ r_alias[1]=__floats2half2_rn(a.z,a.w);
+ r_alias[2]=__floats2half2_rn(b.x,b.y);
+ r_alias[3]=__floats2half2_rn(b.z,b.w);
+ return r;
+}
+
+template <>
+struct type_casting_traits<float, Eigen::half> {
+ enum {
+ VectorizedCast = 1,
+ SrcCoeffRatio = 2,
+ TgtCoeffRatio = 1
+ };
+};
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pcast<Packet4h2, float4>(const Packet4h2& a) {
+ // Simply discard the second half of the input
+ float4 r;
+ const half2* a_alias=reinterpret_cast<const half2*>(&a);
+ float2 r1 = __half22float2(a_alias[0]);
+ float2 r2 = __half22float2(a_alias[1]);
+ r.x=static_cast<float>(r1.x);
+ r.y=static_cast<float>(r1.y);
+ r.z=static_cast<float>(r2.x);
+ r.w=static_cast<float>(r2.y);
+ return r;
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pcast<float4, half2>(const float4& a) {
+ // Simply discard the second half of the input
+ return __floats2half2_rn(a.x, a.y);
+}
+
+#endif
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_TYPE_CASTING_GPU_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/HIP/hcc/math_constants.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/HIP/hcc/math_constants.h
new file mode 100644
index 000000000..25375a0a4
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/HIP/hcc/math_constants.h
@@ -0,0 +1,23 @@
+/*
+ * math_constants.h -
+ * HIP equivalent of the CUDA header of the same name
+ */
+
+#ifndef __MATH_CONSTANTS_H__
+#define __MATH_CONSTANTS_H__
+
+/* single precision constants */
+
+#define HIPRT_INF_F __int_as_float(0x7f800000)
+#define HIPRT_NAN_F __int_as_float(0x7fffffff)
+#define HIPRT_MIN_DENORM_F __int_as_float(0x00000001)
+#define HIPRT_MAX_NORMAL_F __int_as_float(0x7f7fffff)
+#define HIPRT_NEG_ZERO_F __int_as_float(0x80000000)
+#define HIPRT_ZERO_F 0.0f
+#define HIPRT_ONE_F 1.0f
+
+/* double precision constants */
+#define HIPRT_INF __hiloint2double(0x7ff00000, 0x00000000)
+#define HIPRT_NAN __hiloint2double(0xfff80000, 0x00000000)
+
+#endif
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/MSA/Complex.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/MSA/Complex.h
new file mode 100644
index 000000000..53dacfa43
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/MSA/Complex.h
@@ -0,0 +1,648 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2018 Wave Computing, Inc.
+// Written by:
+// Chris Larsen
+// Alexey Frunze (afrunze@wavecomp.com)
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_COMPLEX_MSA_H
+#define EIGEN_COMPLEX_MSA_H
+
+#include <iostream>
+
+namespace Eigen {
+
+namespace internal {
+
+//---------- float ----------
+struct Packet2cf {
+ EIGEN_STRONG_INLINE Packet2cf() {
+ }
+ EIGEN_STRONG_INLINE explicit Packet2cf(const std::complex<float>& a,
+ const std::complex<float>& b) {
+ Packet4f t = { std::real(a), std::imag(a), std::real(b), std::imag(b) };
+ v = t;
+ }
+ EIGEN_STRONG_INLINE explicit Packet2cf(const Packet4f& a) : v(a) {
+ }
+ EIGEN_STRONG_INLINE Packet2cf(const Packet2cf& a) : v(a.v) {
+ }
+ EIGEN_STRONG_INLINE Packet2cf& operator=(const Packet2cf& b) {
+ v = b.v;
+ return *this;
+ }
+ EIGEN_STRONG_INLINE Packet2cf conjugate(void) const {
+ return Packet2cf((Packet4f)__builtin_msa_bnegi_d((v2u64)v, 63));
+ }
+ EIGEN_STRONG_INLINE Packet2cf& operator*=(const Packet2cf& b) {
+ Packet4f v1, v2;
+
+ // Get the real values of a | a1_re | a1_re | a2_re | a2_re |
+ v1 = (Packet4f)__builtin_msa_ilvev_w((v4i32)v, (v4i32)v);
+ // Get the imag values of a | a1_im | a1_im | a2_im | a2_im |
+ v2 = (Packet4f)__builtin_msa_ilvod_w((v4i32)v, (v4i32)v);
+ // Multiply the real a with b
+ v1 = pmul(v1, b.v);
+ // Multiply the imag a with b
+ v2 = pmul(v2, b.v);
+ // Conjugate v2
+ v2 = Packet2cf(v2).conjugate().v;
+ // Swap real/imag elements in v2.
+ v2 = (Packet4f)__builtin_msa_shf_w((v4i32)v2, EIGEN_MSA_SHF_I8(1, 0, 3, 2));
+ // Add and return the result
+ v = padd(v1, v2);
+ return *this;
+ }
+ EIGEN_STRONG_INLINE Packet2cf operator*(const Packet2cf& b) const {
+ return Packet2cf(*this) *= b;
+ }
+ EIGEN_STRONG_INLINE Packet2cf& operator+=(const Packet2cf& b) {
+ v = padd(v, b.v);
+ return *this;
+ }
+ EIGEN_STRONG_INLINE Packet2cf operator+(const Packet2cf& b) const {
+ return Packet2cf(*this) += b;
+ }
+ EIGEN_STRONG_INLINE Packet2cf& operator-=(const Packet2cf& b) {
+ v = psub(v, b.v);
+ return *this;
+ }
+ EIGEN_STRONG_INLINE Packet2cf operator-(const Packet2cf& b) const {
+ return Packet2cf(*this) -= b;
+ }
+ EIGEN_STRONG_INLINE Packet2cf& operator/=(const Packet2cf& b) {
+ *this *= b.conjugate();
+ Packet4f s = pmul<Packet4f>(b.v, b.v);
+ s = padd(s, (Packet4f)__builtin_msa_shf_w((v4i32)s, EIGEN_MSA_SHF_I8(1, 0, 3, 2)));
+ v = pdiv(v, s);
+ return *this;
+ }
+ EIGEN_STRONG_INLINE Packet2cf operator/(const Packet2cf& b) const {
+ return Packet2cf(*this) /= b;
+ }
+ EIGEN_STRONG_INLINE Packet2cf operator-(void) const {
+ return Packet2cf(pnegate(v));
+ }
+
+ Packet4f v;
+};
+
+inline std::ostream& operator<<(std::ostream& os, const Packet2cf& value) {
+ os << "[ (" << value.v[0] << ", " << value.v[1]
+ << "i),"
+ " ("
+ << value.v[2] << ", " << value.v[3] << "i) ]";
+ return os;
+}
+
+template <>
+struct packet_traits<std::complex<float> > : default_packet_traits {
+ typedef Packet2cf type;
+ typedef Packet2cf half;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 2,
+ HasHalfPacket = 0,
+
+ HasAdd = 1,
+ HasSub = 1,
+ HasMul = 1,
+ HasDiv = 1,
+ HasNegate = 1,
+ HasAbs = 0,
+ HasAbs2 = 0,
+ HasMin = 0,
+ HasMax = 0,
+ HasSetLinear = 0,
+ HasBlend = 1
+ };
+};
+
+template <>
+struct unpacket_traits<Packet2cf> {
+ typedef std::complex<float> type;
+ enum { size = 2, alignment = Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false };
+ typedef Packet2cf half;
+};
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>& from) {
+ EIGEN_MSA_DEBUG;
+
+ float f0 = from.real(), f1 = from.imag();
+ Packet4f v0 = { f0, f0, f0, f0 };
+ Packet4f v1 = { f1, f1, f1, f1 };
+ return Packet2cf((Packet4f)__builtin_msa_ilvr_w((Packet4i)v1, (Packet4i)v0));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) {
+ EIGEN_MSA_DEBUG;
+
+ return a + b;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf psub<Packet2cf>(const Packet2cf& a, const Packet2cf& b) {
+ EIGEN_MSA_DEBUG;
+
+ return a - b;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) {
+ EIGEN_MSA_DEBUG;
+
+ return -a;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a) {
+ EIGEN_MSA_DEBUG;
+
+ return a.conjugate();
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, const Packet2cf& b) {
+ EIGEN_MSA_DEBUG;
+
+ return a * b;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf pand<Packet2cf>(const Packet2cf& a, const Packet2cf& b) {
+ EIGEN_MSA_DEBUG;
+
+ return Packet2cf(pand(a.v, b.v));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf por<Packet2cf>(const Packet2cf& a, const Packet2cf& b) {
+ EIGEN_MSA_DEBUG;
+
+ return Packet2cf(por(a.v, b.v));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf pxor<Packet2cf>(const Packet2cf& a, const Packet2cf& b) {
+ EIGEN_MSA_DEBUG;
+
+ return Packet2cf(pxor(a.v, b.v));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b) {
+ EIGEN_MSA_DEBUG;
+
+ return Packet2cf(pandnot(a.v, b.v));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf pload<Packet2cf>(const std::complex<float>* from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>((const float*)from));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>((const float*)from));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<float>* from) {
+ EIGEN_MSA_DEBUG;
+
+ return pset1<Packet2cf>(*from);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<std::complex<float> >(std::complex<float>* to,
+ const Packet2cf& from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_ALIGNED_STORE pstore<float>((float*)to, from.v);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float>* to,
+ const Packet2cf& from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_UNALIGNED_STORE pstoreu<float>((float*)to, from.v);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(
+ const std::complex<float>* from, Index stride) {
+ EIGEN_MSA_DEBUG;
+
+ return Packet2cf(from[0 * stride], from[1 * stride]);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to,
+ const Packet2cf& from,
+ Index stride) {
+ EIGEN_MSA_DEBUG;
+
+ *to = std::complex<float>(from.v[0], from.v[1]);
+ to += stride;
+ *to = std::complex<float>(from.v[2], from.v[3]);
+}
+
+template <>
+EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float>* addr) {
+ EIGEN_MSA_DEBUG;
+
+ prefetch(reinterpret_cast<const float*>(addr));
+}
+
+template <>
+EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet2cf>(const Packet2cf& a) {
+ EIGEN_MSA_DEBUG;
+
+ return std::complex<float>(a.v[0], a.v[1]);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a) {
+ EIGEN_MSA_DEBUG;
+
+ return Packet2cf((Packet4f)__builtin_msa_shf_w((v4i32)a.v, EIGEN_MSA_SHF_I8(2, 3, 0, 1)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf pcplxflip<Packet2cf>(const Packet2cf& a) {
+ EIGEN_MSA_DEBUG;
+
+ return Packet2cf((Packet4f)__builtin_msa_shf_w((v4i32)a.v, EIGEN_MSA_SHF_I8(1, 0, 3, 2)));
+}
+
+template <>
+EIGEN_STRONG_INLINE std::complex<float> predux<Packet2cf>(const Packet2cf& a) {
+ EIGEN_MSA_DEBUG;
+
+ Packet4f value = (Packet4f)preverse((Packet2d)a.v);
+ value += a.v;
+ return std::complex<float>(value[0], value[1]);
+}
+
+template <>
+EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const Packet2cf& a) {
+ EIGEN_MSA_DEBUG;
+
+ return std::complex<float>((a.v[0] * a.v[2]) - (a.v[1] * a.v[3]),
+ (a.v[0] * a.v[3]) + (a.v[1] * a.v[2]));
+}
+
+EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf, Packet4f)
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b) {
+ EIGEN_MSA_DEBUG;
+
+ return a / b;
+}
+
+inline std::ostream& operator<<(std::ostream& os, const PacketBlock<Packet2cf, 2>& value) {
+ os << "[ " << value.packet[0] << ", " << std::endl << " " << value.packet[1] << " ]";
+ return os;
+}
+
+EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet2cf, 2>& kernel) {
+ EIGEN_MSA_DEBUG;
+
+ Packet4f tmp =
+ (Packet4f)__builtin_msa_ilvl_d((v2i64)kernel.packet[1].v, (v2i64)kernel.packet[0].v);
+ kernel.packet[0].v =
+ (Packet4f)__builtin_msa_ilvr_d((v2i64)kernel.packet[1].v, (v2i64)kernel.packet[0].v);
+ kernel.packet[1].v = tmp;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2cf pblend(const Selector<2>& ifPacket, const Packet2cf& thenPacket,
+ const Packet2cf& elsePacket) {
+ return (Packet2cf)(Packet4f)pblend<Packet2d>(ifPacket, (Packet2d)thenPacket.v,
+ (Packet2d)elsePacket.v);
+}
+
+//---------- double ----------
+
+struct Packet1cd {
+ EIGEN_STRONG_INLINE Packet1cd() {
+ }
+ EIGEN_STRONG_INLINE explicit Packet1cd(const std::complex<double>& a) {
+ v[0] = std::real(a);
+ v[1] = std::imag(a);
+ }
+ EIGEN_STRONG_INLINE explicit Packet1cd(const Packet2d& a) : v(a) {
+ }
+ EIGEN_STRONG_INLINE Packet1cd(const Packet1cd& a) : v(a.v) {
+ }
+ EIGEN_STRONG_INLINE Packet1cd& operator=(const Packet1cd& b) {
+ v = b.v;
+ return *this;
+ }
+ EIGEN_STRONG_INLINE Packet1cd conjugate(void) const {
+ static const v2u64 p2ul_CONJ_XOR = { 0x0, 0x8000000000000000 };
+ return (Packet1cd)pxor(v, (Packet2d)p2ul_CONJ_XOR);
+ }
+ EIGEN_STRONG_INLINE Packet1cd& operator*=(const Packet1cd& b) {
+ Packet2d v1, v2;
+
+ // Get the real values of a | a1_re | a1_re
+ v1 = (Packet2d)__builtin_msa_ilvev_d((v2i64)v, (v2i64)v);
+ // Get the imag values of a | a1_im | a1_im
+ v2 = (Packet2d)__builtin_msa_ilvod_d((v2i64)v, (v2i64)v);
+ // Multiply the real a with b
+ v1 = pmul(v1, b.v);
+ // Multiply the imag a with b
+ v2 = pmul(v2, b.v);
+ // Conjugate v2
+ v2 = Packet1cd(v2).conjugate().v;
+ // Swap real/imag elements in v2.
+ v2 = (Packet2d)__builtin_msa_shf_w((v4i32)v2, EIGEN_MSA_SHF_I8(2, 3, 0, 1));
+ // Add and return the result
+ v = padd(v1, v2);
+ return *this;
+ }
+ EIGEN_STRONG_INLINE Packet1cd operator*(const Packet1cd& b) const {
+ return Packet1cd(*this) *= b;
+ }
+ EIGEN_STRONG_INLINE Packet1cd& operator+=(const Packet1cd& b) {
+ v = padd(v, b.v);
+ return *this;
+ }
+ EIGEN_STRONG_INLINE Packet1cd operator+(const Packet1cd& b) const {
+ return Packet1cd(*this) += b;
+ }
+ EIGEN_STRONG_INLINE Packet1cd& operator-=(const Packet1cd& b) {
+ v = psub(v, b.v);
+ return *this;
+ }
+ EIGEN_STRONG_INLINE Packet1cd operator-(const Packet1cd& b) const {
+ return Packet1cd(*this) -= b;
+ }
+ EIGEN_STRONG_INLINE Packet1cd& operator/=(const Packet1cd& b) {
+ *this *= b.conjugate();
+ Packet2d s = pmul<Packet2d>(b.v, b.v);
+ s = padd(s, preverse<Packet2d>(s));
+ v = pdiv(v, s);
+ return *this;
+ }
+ EIGEN_STRONG_INLINE Packet1cd operator/(const Packet1cd& b) const {
+ return Packet1cd(*this) /= b;
+ }
+ EIGEN_STRONG_INLINE Packet1cd operator-(void) const {
+ return Packet1cd(pnegate(v));
+ }
+
+ Packet2d v;
+};
+
+inline std::ostream& operator<<(std::ostream& os, const Packet1cd& value) {
+ os << "[ (" << value.v[0] << ", " << value.v[1] << "i) ]";
+ return os;
+}
+
+template <>
+struct packet_traits<std::complex<double> > : default_packet_traits {
+ typedef Packet1cd type;
+ typedef Packet1cd half;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 0,
+ size = 1,
+ HasHalfPacket = 0,
+
+ HasAdd = 1,
+ HasSub = 1,
+ HasMul = 1,
+ HasDiv = 1,
+ HasNegate = 1,
+ HasAbs = 0,
+ HasAbs2 = 0,
+ HasMin = 0,
+ HasMax = 0,
+ HasSetLinear = 0
+ };
+};
+
+template <>
+struct unpacket_traits<Packet1cd> {
+ typedef std::complex<double> type;
+ enum { size = 1, alignment = Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false };
+ typedef Packet1cd half;
+};
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd pload<Packet1cd>(const std::complex<double>* from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(pload<Packet2d>((const double*)from));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd ploadu<Packet1cd>(const std::complex<double>* from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ploadu<Packet2d>((const double*)from));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd pset1<Packet1cd>(const std::complex<double>& from) {
+ EIGEN_MSA_DEBUG;
+
+ return Packet1cd(from);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd padd<Packet1cd>(const Packet1cd& a, const Packet1cd& b) {
+ EIGEN_MSA_DEBUG;
+
+ return a + b;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd psub<Packet1cd>(const Packet1cd& a, const Packet1cd& b) {
+ EIGEN_MSA_DEBUG;
+
+ return a - b;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a) {
+ EIGEN_MSA_DEBUG;
+
+ return -a;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a) {
+ EIGEN_MSA_DEBUG;
+
+ return a.conjugate();
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd pmul<Packet1cd>(const Packet1cd& a, const Packet1cd& b) {
+ EIGEN_MSA_DEBUG;
+
+ return a * b;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd pand<Packet1cd>(const Packet1cd& a, const Packet1cd& b) {
+ EIGEN_MSA_DEBUG;
+
+ return Packet1cd(pand(a.v, b.v));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd por<Packet1cd>(const Packet1cd& a, const Packet1cd& b) {
+ EIGEN_MSA_DEBUG;
+
+ return Packet1cd(por(a.v, b.v));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd pxor<Packet1cd>(const Packet1cd& a, const Packet1cd& b) {
+ EIGEN_MSA_DEBUG;
+
+ return Packet1cd(pxor(a.v, b.v));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd pandnot<Packet1cd>(const Packet1cd& a, const Packet1cd& b) {
+ EIGEN_MSA_DEBUG;
+
+ return Packet1cd(pandnot(a.v, b.v));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd ploaddup<Packet1cd>(const std::complex<double>* from) {
+ EIGEN_MSA_DEBUG;
+
+ return pset1<Packet1cd>(*from);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<std::complex<double> >(std::complex<double>* to,
+ const Packet1cd& from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_ALIGNED_STORE pstore<double>((double*)to, from.v);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double>* to,
+ const Packet1cd& from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_UNALIGNED_STORE pstoreu<double>((double*)to, from.v);
+}
+
+template <>
+EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::complex<double>* addr) {
+ EIGEN_MSA_DEBUG;
+
+ prefetch(reinterpret_cast<const double*>(addr));
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Packet1cd>(
+ const std::complex<double>* from, Index stride __attribute__((unused))) {
+ EIGEN_MSA_DEBUG;
+
+ Packet1cd res;
+ res.v[0] = std::real(from[0]);
+ res.v[1] = std::imag(from[0]);
+ return res;
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet1cd>(std::complex<double>* to,
+ const Packet1cd& from,
+ Index stride
+ __attribute__((unused))) {
+ EIGEN_MSA_DEBUG;
+
+ pstore(to, from);
+}
+
+template <>
+EIGEN_STRONG_INLINE std::complex<double> pfirst<Packet1cd>(const Packet1cd& a) {
+ EIGEN_MSA_DEBUG;
+
+ return std::complex<double>(a.v[0], a.v[1]);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd preverse(const Packet1cd& a) {
+ EIGEN_MSA_DEBUG;
+
+ return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE std::complex<double> predux<Packet1cd>(const Packet1cd& a) {
+ EIGEN_MSA_DEBUG;
+
+ return pfirst(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE std::complex<double> predux_mul<Packet1cd>(const Packet1cd& a) {
+ EIGEN_MSA_DEBUG;
+
+ return pfirst(a);
+}
+
+EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cd, Packet2d)
+
+template <>
+EIGEN_STRONG_INLINE Packet1cd pdiv<Packet1cd>(const Packet1cd& a, const Packet1cd& b) {
+ EIGEN_MSA_DEBUG;
+
+ return a / b;
+}
+
+EIGEN_STRONG_INLINE Packet1cd pcplxflip /*<Packet1cd>*/ (const Packet1cd& x) {
+ EIGEN_MSA_DEBUG;
+
+ return Packet1cd(preverse(Packet2d(x.v)));
+}
+
+inline std::ostream& operator<<(std::ostream& os, const PacketBlock<Packet1cd, 2>& value) {
+ os << "[ " << value.packet[0] << ", " << std::endl << " " << value.packet[1] << " ]";
+ return os;
+}
+
+EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet1cd, 2>& kernel) {
+ EIGEN_MSA_DEBUG;
+
+ Packet2d v1, v2;
+
+ v1 = (Packet2d)__builtin_msa_ilvev_d((v2i64)kernel.packet[0].v, (v2i64)kernel.packet[1].v);
+ // Get the imag values of a
+ v2 = (Packet2d)__builtin_msa_ilvod_d((v2i64)kernel.packet[0].v, (v2i64)kernel.packet[1].v);
+
+ kernel.packet[0].v = v1;
+ kernel.packet[1].v = v2;
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_COMPLEX_MSA_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/MSA/MathFunctions.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/MSA/MathFunctions.h
new file mode 100644
index 000000000..f5181b90e
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/MSA/MathFunctions.h
@@ -0,0 +1,387 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2007 Julien Pommier
+// Copyright (C) 2014 Pedro Gonnet (pedro.gonnet@gmail.com)
+// Copyright (C) 2016 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Copyright (C) 2018 Wave Computing, Inc.
+// Written by:
+// Chris Larsen
+// Alexey Frunze (afrunze@wavecomp.com)
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/* The sin, cos, exp, and log functions of this file come from
+ * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/
+ */
+
+/* The tanh function of this file is an adaptation of
+ * template<typename T> T generic_fast_tanh_float(const T&)
+ * from MathFunctionsImpl.h.
+ */
+
+#ifndef EIGEN_MATH_FUNCTIONS_MSA_H
+#define EIGEN_MATH_FUNCTIONS_MSA_H
+
+namespace Eigen {
+
+namespace internal {
+
+template <>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f
+plog<Packet4f>(const Packet4f& _x) {
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_SQRTHF, 0.707106781186547524f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p0, 7.0376836292e-2f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p1, -1.1514610310e-1f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p2, 1.1676998740e-1f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p3, -1.2420140846e-1f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p4, +1.4249322787e-1f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p5, -1.6668057665e-1f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p6, +2.0000714765e-1f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p7, -2.4999993993e-1f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p8, +3.3333331174e-1f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q1, -2.12194440e-4f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q2, 0.693359375f);
+ static _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
+ static _EIGEN_DECLARE_CONST_Packet4f(1, 1.0f);
+
+ // Convert negative argument into NAN (quiet negative, to be specific).
+ Packet4f zero = (Packet4f)__builtin_msa_ldi_w(0);
+ Packet4i neg_mask = __builtin_msa_fclt_w(_x, zero);
+ Packet4i zero_mask = __builtin_msa_fceq_w(_x, zero);
+ Packet4f non_neg_x_or_nan = padd(_x, (Packet4f)neg_mask); // Add 0.0 or NAN.
+ Packet4f x = non_neg_x_or_nan;
+
+ // Extract exponent from x = mantissa * 2**exponent, where 1.0 <= mantissa < 2.0.
+ // N.B. the exponent is one less of what frexpf() would return.
+ Packet4i e_int = __builtin_msa_ftint_s_w(__builtin_msa_flog2_w(x));
+ // Multiply x by 2**(-exponent-1) to get 0.5 <= x < 1.0 as from frexpf().
+ x = __builtin_msa_fexp2_w(x, (Packet4i)__builtin_msa_nori_b((v16u8)e_int, 0));
+
+ /*
+ if (x < SQRTHF) {
+ x = x + x - 1.0;
+ } else {
+ e += 1;
+ x = x - 1.0;
+ }
+ */
+ Packet4f xx = padd(x, x);
+ Packet4i ge_mask = __builtin_msa_fcle_w(p4f_cephes_SQRTHF, x);
+ e_int = psub(e_int, ge_mask);
+ x = (Packet4f)__builtin_msa_bsel_v((v16u8)ge_mask, (v16u8)xx, (v16u8)x);
+ x = psub(x, p4f_1);
+ Packet4f e = __builtin_msa_ffint_s_w(e_int);
+
+ Packet4f x2 = pmul(x, x);
+ Packet4f x3 = pmul(x2, x);
+
+ Packet4f y, y1, y2;
+ y = pmadd(p4f_cephes_log_p0, x, p4f_cephes_log_p1);
+ y1 = pmadd(p4f_cephes_log_p3, x, p4f_cephes_log_p4);
+ y2 = pmadd(p4f_cephes_log_p6, x, p4f_cephes_log_p7);
+ y = pmadd(y, x, p4f_cephes_log_p2);
+ y1 = pmadd(y1, x, p4f_cephes_log_p5);
+ y2 = pmadd(y2, x, p4f_cephes_log_p8);
+ y = pmadd(y, x3, y1);
+ y = pmadd(y, x3, y2);
+ y = pmul(y, x3);
+
+ y = pmadd(e, p4f_cephes_log_q1, y);
+ x = __builtin_msa_fmsub_w(x, x2, p4f_half);
+ x = padd(x, y);
+ x = pmadd(e, p4f_cephes_log_q2, x);
+
+ // x is now the logarithm result candidate. We still need to handle the
+ // extreme arguments of zero and positive infinity, though.
+ // N.B. if the argument is +INFINITY, x is NAN because the polynomial terms
+ // contain infinities of both signs (see the coefficients and code above).
+ // INFINITY - INFINITY is NAN.
+
+ // If the argument is +INFINITY, make it the new result candidate.
+ // To achieve that we choose the smaller of the result candidate and the
+ // argument.
+ // This is correct for all finite pairs of values (the logarithm is smaller
+ // than the argument).
+ // This is also correct in the special case when the argument is +INFINITY
+ // and the result candidate is NAN. This is because the fmin.df instruction
+ // prefers non-NANs to NANs.
+ x = __builtin_msa_fmin_w(x, non_neg_x_or_nan);
+
+ // If the argument is zero (including -0.0), the result becomes -INFINITY.
+ Packet4i neg_infs = __builtin_msa_slli_w(zero_mask, 23);
+ x = (Packet4f)__builtin_msa_bsel_v((v16u8)zero_mask, (v16u8)x, (v16u8)neg_infs);
+
+ return x;
+}
+
+template <>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f
+pexp<Packet4f>(const Packet4f& _x) {
+ // Limiting single-precision pexp's argument to [-128, +128] lets pexp
+ // reach 0 and INFINITY naturally.
+ static _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -128.0f);
+ static _EIGEN_DECLARE_CONST_Packet4f(exp_hi, +128.0f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500e-4f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507e-3f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073e-3f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894e-2f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459e-1f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201e-1f);
+ static _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
+ static _EIGEN_DECLARE_CONST_Packet4f(1, 1.0f);
+
+ Packet4f x = _x;
+
+ // Clamp x.
+ x = (Packet4f)__builtin_msa_bsel_v((v16u8)__builtin_msa_fclt_w(x, p4f_exp_lo), (v16u8)x,
+ (v16u8)p4f_exp_lo);
+ x = (Packet4f)__builtin_msa_bsel_v((v16u8)__builtin_msa_fclt_w(p4f_exp_hi, x), (v16u8)x,
+ (v16u8)p4f_exp_hi);
+
+ // Round to nearest integer by adding 0.5 (with x's sign) and truncating.
+ Packet4f x2_add = (Packet4f)__builtin_msa_binsli_w((v4u32)p4f_half, (v4u32)x, 0);
+ Packet4f x2 = pmadd(x, p4f_cephes_LOG2EF, x2_add);
+ Packet4i x2_int = __builtin_msa_ftrunc_s_w(x2);
+ Packet4f x2_int_f = __builtin_msa_ffint_s_w(x2_int);
+
+ x = __builtin_msa_fmsub_w(x, x2_int_f, p4f_cephes_exp_C1);
+ x = __builtin_msa_fmsub_w(x, x2_int_f, p4f_cephes_exp_C2);
+
+ Packet4f z = pmul(x, x);
+
+ Packet4f y = p4f_cephes_exp_p0;
+ y = pmadd(y, x, p4f_cephes_exp_p1);
+ y = pmadd(y, x, p4f_cephes_exp_p2);
+ y = pmadd(y, x, p4f_cephes_exp_p3);
+ y = pmadd(y, x, p4f_cephes_exp_p4);
+ y = pmadd(y, x, p4f_cephes_exp_p5);
+ y = pmadd(y, z, x);
+ y = padd(y, p4f_1);
+
+ // y *= 2**exponent.
+ y = __builtin_msa_fexp2_w(y, x2_int);
+
+ return y;
+}
+
+template <>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f
+ptanh<Packet4f>(const Packet4f& _x) {
+ static _EIGEN_DECLARE_CONST_Packet4f(tanh_tiny, 1e-4f);
+ static _EIGEN_DECLARE_CONST_Packet4f(tanh_hi, 9.0f);
+ // The monomial coefficients of the numerator polynomial (odd).
+ static _EIGEN_DECLARE_CONST_Packet4f(alpha_1, 4.89352455891786e-3f);
+ static _EIGEN_DECLARE_CONST_Packet4f(alpha_3, 6.37261928875436e-4f);
+ static _EIGEN_DECLARE_CONST_Packet4f(alpha_5, 1.48572235717979e-5f);
+ static _EIGEN_DECLARE_CONST_Packet4f(alpha_7, 5.12229709037114e-8f);
+ static _EIGEN_DECLARE_CONST_Packet4f(alpha_9, -8.60467152213735e-11f);
+ static _EIGEN_DECLARE_CONST_Packet4f(alpha_11, 2.00018790482477e-13f);
+ static _EIGEN_DECLARE_CONST_Packet4f(alpha_13, -2.76076847742355e-16f);
+ // The monomial coefficients of the denominator polynomial (even).
+ static _EIGEN_DECLARE_CONST_Packet4f(beta_0, 4.89352518554385e-3f);
+ static _EIGEN_DECLARE_CONST_Packet4f(beta_2, 2.26843463243900e-3f);
+ static _EIGEN_DECLARE_CONST_Packet4f(beta_4, 1.18534705686654e-4f);
+ static _EIGEN_DECLARE_CONST_Packet4f(beta_6, 1.19825839466702e-6f);
+
+ Packet4f x = pabs(_x);
+ Packet4i tiny_mask = __builtin_msa_fclt_w(x, p4f_tanh_tiny);
+
+ // Clamp the inputs to the range [-9, 9] since anything outside
+ // this range is -/+1.0f in single-precision.
+ x = (Packet4f)__builtin_msa_bsel_v((v16u8)__builtin_msa_fclt_w(p4f_tanh_hi, x), (v16u8)x,
+ (v16u8)p4f_tanh_hi);
+
+ // Since the polynomials are odd/even, we need x**2.
+ Packet4f x2 = pmul(x, x);
+
+ // Evaluate the numerator polynomial p.
+ Packet4f p = pmadd(x2, p4f_alpha_13, p4f_alpha_11);
+ p = pmadd(x2, p, p4f_alpha_9);
+ p = pmadd(x2, p, p4f_alpha_7);
+ p = pmadd(x2, p, p4f_alpha_5);
+ p = pmadd(x2, p, p4f_alpha_3);
+ p = pmadd(x2, p, p4f_alpha_1);
+ p = pmul(x, p);
+
+ // Evaluate the denominator polynomial q.
+ Packet4f q = pmadd(x2, p4f_beta_6, p4f_beta_4);
+ q = pmadd(x2, q, p4f_beta_2);
+ q = pmadd(x2, q, p4f_beta_0);
+
+ // Divide the numerator by the denominator.
+ p = pdiv(p, q);
+
+ // Reinstate the sign.
+ p = (Packet4f)__builtin_msa_binsli_w((v4u32)p, (v4u32)_x, 0);
+
+ // When the argument is very small in magnitude it's more accurate to just return it.
+ p = (Packet4f)__builtin_msa_bsel_v((v16u8)tiny_mask, (v16u8)p, (v16u8)_x);
+
+ return p;
+}
+
+template <bool sine>
+Packet4f psincos_inner_msa_float(const Packet4f& _x) {
+ static _EIGEN_DECLARE_CONST_Packet4f(sincos_max_arg, 13176795.0f); // Approx. (2**24) / (4/Pi).
+ static _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP1, -0.78515625f);
+ static _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP2, -2.4187564849853515625e-4f);
+ static _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP3, -3.77489497744594108e-8f);
+ static _EIGEN_DECLARE_CONST_Packet4f(sincof_p0, -1.9515295891e-4f);
+ static _EIGEN_DECLARE_CONST_Packet4f(sincof_p1, 8.3321608736e-3f);
+ static _EIGEN_DECLARE_CONST_Packet4f(sincof_p2, -1.6666654611e-1f);
+ static _EIGEN_DECLARE_CONST_Packet4f(coscof_p0, 2.443315711809948e-5f);
+ static _EIGEN_DECLARE_CONST_Packet4f(coscof_p1, -1.388731625493765e-3f);
+ static _EIGEN_DECLARE_CONST_Packet4f(coscof_p2, 4.166664568298827e-2f);
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4/Pi.
+ static _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
+ static _EIGEN_DECLARE_CONST_Packet4f(1, 1.0f);
+
+ Packet4f x = pabs(_x);
+
+ // Translate infinite arguments into NANs.
+ Packet4f zero_or_nan_if_inf = psub(_x, _x);
+ x = padd(x, zero_or_nan_if_inf);
+ // Prevent sin/cos from generating values larger than 1.0 in magnitude
+ // for very large arguments by setting x to 0.0.
+ Packet4i small_or_nan_mask = __builtin_msa_fcult_w(x, p4f_sincos_max_arg);
+ x = pand(x, (Packet4f)small_or_nan_mask);
+
+ // Scale x by 4/Pi to find x's octant.
+ Packet4f y = pmul(x, p4f_cephes_FOPI);
+ // Get the octant. We'll reduce x by this number of octants or by one more than it.
+ Packet4i y_int = __builtin_msa_ftrunc_s_w(y);
+ // x's from even-numbered octants will translate to octant 0: [0, +Pi/4].
+ // x's from odd-numbered octants will translate to octant -1: [-Pi/4, 0].
+ // Adjustment for odd-numbered octants: octant = (octant + 1) & (~1).
+ Packet4i y_int1 = __builtin_msa_addvi_w(y_int, 1);
+ Packet4i y_int2 = (Packet4i)__builtin_msa_bclri_w((Packet4ui)y_int1, 0); // bclri = bit-clear
+ y = __builtin_msa_ffint_s_w(y_int2);
+
+ // Compute the sign to apply to the polynomial.
+ Packet4i sign_mask = sine ? pxor(__builtin_msa_slli_w(y_int1, 29), (Packet4i)_x)
+ : __builtin_msa_slli_w(__builtin_msa_addvi_w(y_int, 3), 29);
+
+ // Get the polynomial selection mask.
+ // We'll calculate both (sin and cos) polynomials and then select from the two.
+ Packet4i poly_mask = __builtin_msa_ceqi_w(__builtin_msa_slli_w(y_int2, 30), 0);
+
+ // Reduce x by y octants to get: -Pi/4 <= x <= +Pi/4.
+ // The magic pass: "Extended precision modular arithmetic"
+ // x = ((x - y * DP1) - y * DP2) - y * DP3
+ Packet4f tmp1 = pmul(y, p4f_minus_cephes_DP1);
+ Packet4f tmp2 = pmul(y, p4f_minus_cephes_DP2);
+ Packet4f tmp3 = pmul(y, p4f_minus_cephes_DP3);
+ x = padd(x, tmp1);
+ x = padd(x, tmp2);
+ x = padd(x, tmp3);
+
+ // Evaluate the cos(x) polynomial.
+ y = p4f_coscof_p0;
+ Packet4f z = pmul(x, x);
+ y = pmadd(y, z, p4f_coscof_p1);
+ y = pmadd(y, z, p4f_coscof_p2);
+ y = pmul(y, z);
+ y = pmul(y, z);
+ y = __builtin_msa_fmsub_w(y, z, p4f_half);
+ y = padd(y, p4f_1);
+
+ // Evaluate the sin(x) polynomial.
+ Packet4f y2 = p4f_sincof_p0;
+ y2 = pmadd(y2, z, p4f_sincof_p1);
+ y2 = pmadd(y2, z, p4f_sincof_p2);
+ y2 = pmul(y2, z);
+ y2 = pmadd(y2, x, x);
+
+ // Select the correct result from the two polynomials.
+ y = sine ? (Packet4f)__builtin_msa_bsel_v((v16u8)poly_mask, (v16u8)y, (v16u8)y2)
+ : (Packet4f)__builtin_msa_bsel_v((v16u8)poly_mask, (v16u8)y2, (v16u8)y);
+
+ // Update the sign.
+ sign_mask = pxor(sign_mask, (Packet4i)y);
+ y = (Packet4f)__builtin_msa_binsli_w((v4u32)y, (v4u32)sign_mask, 0); // binsli = bit-insert-left
+ return y;
+}
+
+template <>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f
+psin<Packet4f>(const Packet4f& x) {
+ return psincos_inner_msa_float</* sine */ true>(x);
+}
+
+template <>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f
+pcos<Packet4f>(const Packet4f& x) {
+ return psincos_inner_msa_float</* sine */ false>(x);
+}
+
+template <>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2d
+pexp<Packet2d>(const Packet2d& _x) {
+ // Limiting double-precision pexp's argument to [-1024, +1024] lets pexp
+ // reach 0 and INFINITY naturally.
+ static _EIGEN_DECLARE_CONST_Packet2d(exp_lo, -1024.0);
+ static _EIGEN_DECLARE_CONST_Packet2d(exp_hi, +1024.0);
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_LOG2EF, 1.4426950408889634073599);
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C1, 0.693145751953125);
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6);
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p0, 1.26177193074810590878e-4);
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p1, 3.02994407707441961300e-2);
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p2, 9.99999999999999999910e-1);
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q0, 3.00198505138664455042e-6);
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q1, 2.52448340349684104192e-3);
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q2, 2.27265548208155028766e-1);
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q3, 2.00000000000000000009e0);
+ static _EIGEN_DECLARE_CONST_Packet2d(half, 0.5);
+ static _EIGEN_DECLARE_CONST_Packet2d(1, 1.0);
+ static _EIGEN_DECLARE_CONST_Packet2d(2, 2.0);
+
+ Packet2d x = _x;
+
+ // Clamp x.
+ x = (Packet2d)__builtin_msa_bsel_v((v16u8)__builtin_msa_fclt_d(x, p2d_exp_lo), (v16u8)x,
+ (v16u8)p2d_exp_lo);
+ x = (Packet2d)__builtin_msa_bsel_v((v16u8)__builtin_msa_fclt_d(p2d_exp_hi, x), (v16u8)x,
+ (v16u8)p2d_exp_hi);
+
+ // Round to nearest integer by adding 0.5 (with x's sign) and truncating.
+ Packet2d x2_add = (Packet2d)__builtin_msa_binsli_d((v2u64)p2d_half, (v2u64)x, 0);
+ Packet2d x2 = pmadd(x, p2d_cephes_LOG2EF, x2_add);
+ Packet2l x2_long = __builtin_msa_ftrunc_s_d(x2);
+ Packet2d x2_long_d = __builtin_msa_ffint_s_d(x2_long);
+
+ x = __builtin_msa_fmsub_d(x, x2_long_d, p2d_cephes_exp_C1);
+ x = __builtin_msa_fmsub_d(x, x2_long_d, p2d_cephes_exp_C2);
+
+ x2 = pmul(x, x);
+
+ Packet2d px = p2d_cephes_exp_p0;
+ px = pmadd(px, x2, p2d_cephes_exp_p1);
+ px = pmadd(px, x2, p2d_cephes_exp_p2);
+ px = pmul(px, x);
+
+ Packet2d qx = p2d_cephes_exp_q0;
+ qx = pmadd(qx, x2, p2d_cephes_exp_q1);
+ qx = pmadd(qx, x2, p2d_cephes_exp_q2);
+ qx = pmadd(qx, x2, p2d_cephes_exp_q3);
+
+ x = pdiv(px, psub(qx, px));
+ x = pmadd(p2d_2, x, p2d_1);
+
+ // x *= 2**exponent.
+ x = __builtin_msa_fexp2_d(x, x2_long);
+
+ return x;
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_MATH_FUNCTIONS_MSA_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/MSA/PacketMath.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/MSA/PacketMath.h
new file mode 100644
index 000000000..afe8f3375
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/MSA/PacketMath.h
@@ -0,0 +1,1233 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2018 Wave Computing, Inc.
+// Written by:
+// Chris Larsen
+// Alexey Frunze (afrunze@wavecomp.com)
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_PACKET_MATH_MSA_H
+#define EIGEN_PACKET_MATH_MSA_H
+
+#include <iostream>
+#include <string>
+
+namespace Eigen {
+
+namespace internal {
+
+#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
+#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
+#endif
+
+#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
+#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
+#endif
+
+#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
+#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32
+#endif
+
+#if 0
+#define EIGEN_MSA_DEBUG \
+ static bool firstTime = true; \
+ do { \
+ if (firstTime) { \
+ std::cout << __FILE__ << ':' << __LINE__ << ':' << __FUNCTION__ << std::endl; \
+ firstTime = false; \
+ } \
+ } while (0)
+#else
+#define EIGEN_MSA_DEBUG
+#endif
+
+#define EIGEN_MSA_SHF_I8(a, b, c, d) (((d) << 6) | ((c) << 4) | ((b) << 2) | (a))
+
+typedef v4f32 Packet4f;
+typedef v4i32 Packet4i;
+typedef v4u32 Packet4ui;
+
+#define _EIGEN_DECLARE_CONST_Packet4f(NAME, X) const Packet4f p4f_##NAME = { X, X, X, X }
+#define _EIGEN_DECLARE_CONST_Packet4i(NAME, X) const Packet4i p4i_##NAME = { X, X, X, X }
+#define _EIGEN_DECLARE_CONST_Packet4ui(NAME, X) const Packet4ui p4ui_##NAME = { X, X, X, X }
+
+inline std::ostream& operator<<(std::ostream& os, const Packet4f& value) {
+ os << "[ " << value[0] << ", " << value[1] << ", " << value[2] << ", " << value[3] << " ]";
+ return os;
+}
+
+inline std::ostream& operator<<(std::ostream& os, const Packet4i& value) {
+ os << "[ " << value[0] << ", " << value[1] << ", " << value[2] << ", " << value[3] << " ]";
+ return os;
+}
+
+inline std::ostream& operator<<(std::ostream& os, const Packet4ui& value) {
+ os << "[ " << value[0] << ", " << value[1] << ", " << value[2] << ", " << value[3] << " ]";
+ return os;
+}
+
+template <>
+struct packet_traits<float> : default_packet_traits {
+ typedef Packet4f type;
+ typedef Packet4f half; // Packet2f intrinsics not implemented yet
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 4,
+ HasHalfPacket = 0, // Packet2f intrinsics not implemented yet
+ // FIXME check the Has*
+ HasDiv = 1,
+ HasSin = EIGEN_FAST_MATH,
+ HasCos = EIGEN_FAST_MATH,
+ HasTanh = EIGEN_FAST_MATH,
+ HasErf = EIGEN_FAST_MATH,
+ HasLog = 1,
+ HasExp = 1,
+ HasSqrt = 1,
+ HasRsqrt = 1,
+ HasRound = 1,
+ HasFloor = 1,
+ HasCeil = 1,
+ HasBlend = 1
+ };
+};
+
+template <>
+struct packet_traits<int32_t> : default_packet_traits {
+ typedef Packet4i type;
+ typedef Packet4i half; // Packet2i intrinsics not implemented yet
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 4,
+ HasHalfPacket = 0, // Packet2i intrinsics not implemented yet
+ // FIXME check the Has*
+ HasDiv = 1,
+ HasBlend = 1
+ };
+};
+
+template <>
+struct unpacket_traits<Packet4f> {
+ typedef float type;
+ enum { size = 4, alignment = Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false };
+ typedef Packet4f half;
+};
+
+template <>
+struct unpacket_traits<Packet4i> {
+ typedef int32_t type;
+ enum { size = 4, alignment = Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false };
+ typedef Packet4i half;
+};
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) {
+ EIGEN_MSA_DEBUG;
+
+ Packet4f v = { from, from, from, from };
+ return v;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int32_t& from) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_fill_w(from);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pload1<Packet4f>(const float* from) {
+ EIGEN_MSA_DEBUG;
+
+ float f = *from;
+ Packet4f v = { f, f, f, f };
+ return v;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pload1<Packet4i>(const int32_t* from) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_fill_w(*from);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_fadd_w(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_addv_w(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(const float& a) {
+ EIGEN_MSA_DEBUG;
+
+ static const Packet4f countdown = { 0.0f, 1.0f, 2.0f, 3.0f };
+ return padd(pset1<Packet4f>(a), countdown);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(const int32_t& a) {
+ EIGEN_MSA_DEBUG;
+
+ static const Packet4i countdown = { 0, 1, 2, 3 };
+ return padd(pset1<Packet4i>(a), countdown);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_fsub_w(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_subv_w(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet4f)__builtin_msa_bnegi_w((v4u32)a, 31);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_addvi_w((v4i32)__builtin_msa_nori_b((v16u8)a, 0), 1);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) {
+ EIGEN_MSA_DEBUG;
+
+ return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) {
+ EIGEN_MSA_DEBUG;
+
+ return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_fmul_w(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_mulv_w(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_fdiv_w(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& a, const Packet4i& b) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_div_s_w(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_fmadd_w(c, a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) {
+ EIGEN_MSA_DEBUG;
+
+ // Use "asm" construct to avoid __builtin_msa_maddv_w GNU C bug.
+ Packet4i value = c;
+ __asm__("maddv.w %w[value], %w[a], %w[b]\n"
+ // Outputs
+ : [value] "+f"(value)
+ // Inputs
+ : [a] "f"(a), [b] "f"(b));
+ return value;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet4f)__builtin_msa_and_v((v16u8)a, (v16u8)b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet4i)__builtin_msa_and_v((v16u8)a, (v16u8)b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet4f)__builtin_msa_or_v((v16u8)a, (v16u8)b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet4i)__builtin_msa_or_v((v16u8)a, (v16u8)b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet4f)__builtin_msa_xor_v((v16u8)a, (v16u8)b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet4i)__builtin_msa_xor_v((v16u8)a, (v16u8)b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) {
+ EIGEN_MSA_DEBUG;
+
+ return pand(a, (Packet4f)__builtin_msa_xori_b((v16u8)b, 255));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) {
+ EIGEN_MSA_DEBUG;
+
+ return pand(a, (Packet4i)__builtin_msa_xori_b((v16u8)b, 255));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) {
+ EIGEN_MSA_DEBUG;
+
+#if EIGEN_FAST_MATH
+ // This prefers numbers to NaNs.
+ return __builtin_msa_fmin_w(a, b);
+#else
+ // This prefers NaNs to numbers.
+ Packet4i aNaN = __builtin_msa_fcun_w(a, a);
+ Packet4i aMinOrNaN = por(__builtin_msa_fclt_w(a, b), aNaN);
+ return (Packet4f)__builtin_msa_bsel_v((v16u8)aMinOrNaN, (v16u8)b, (v16u8)a);
+#endif
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_min_s_w(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) {
+ EIGEN_MSA_DEBUG;
+
+#if EIGEN_FAST_MATH
+ // This prefers numbers to NaNs.
+ return __builtin_msa_fmax_w(a, b);
+#else
+ // This prefers NaNs to numbers.
+ Packet4i aNaN = __builtin_msa_fcun_w(a, a);
+ Packet4i aMaxOrNaN = por(__builtin_msa_fclt_w(b, a), aNaN);
+ return (Packet4f)__builtin_msa_bsel_v((v16u8)aMaxOrNaN, (v16u8)b, (v16u8)a);
+#endif
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_max_s_w(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_ALIGNED_LOAD return (Packet4f)__builtin_msa_ld_w(const_cast<float*>(from), 0);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int32_t* from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_ALIGNED_LOAD return __builtin_msa_ld_w(const_cast<int32_t*>(from), 0);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_UNALIGNED_LOAD return (Packet4f)__builtin_msa_ld_w(const_cast<float*>(from), 0);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int32_t* from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_UNALIGNED_LOAD return (Packet4i)__builtin_msa_ld_w(const_cast<int32_t*>(from), 0);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from) {
+ EIGEN_MSA_DEBUG;
+
+ float f0 = from[0], f1 = from[1];
+ Packet4f v0 = { f0, f0, f0, f0 };
+ Packet4f v1 = { f1, f1, f1, f1 };
+ return (Packet4f)__builtin_msa_ilvr_d((v2i64)v1, (v2i64)v0);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int32_t* from) {
+ EIGEN_MSA_DEBUG;
+
+ int32_t i0 = from[0], i1 = from[1];
+ Packet4i v0 = { i0, i0, i0, i0 };
+ Packet4i v1 = { i1, i1, i1, i1 };
+ return (Packet4i)__builtin_msa_ilvr_d((v2i64)v1, (v2i64)v0);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_ALIGNED_STORE __builtin_msa_st_w((Packet4i)from, to, 0);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<int32_t>(int32_t* to, const Packet4i& from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_ALIGNED_STORE __builtin_msa_st_w(from, to, 0);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_UNALIGNED_STORE __builtin_msa_st_w((Packet4i)from, to, 0);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<int32_t>(int32_t* to, const Packet4i& from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_UNALIGNED_STORE __builtin_msa_st_w(from, to, 0);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride) {
+ EIGEN_MSA_DEBUG;
+
+ float f = *from;
+ Packet4f v = { f, f, f, f };
+ v[1] = from[stride];
+ v[2] = from[2 * stride];
+ v[3] = from[3 * stride];
+ return v;
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline Packet4i pgather<int32_t, Packet4i>(const int32_t* from, Index stride) {
+ EIGEN_MSA_DEBUG;
+
+ int32_t i = *from;
+ Packet4i v = { i, i, i, i };
+ v[1] = from[stride];
+ v[2] = from[2 * stride];
+ v[3] = from[3 * stride];
+ return v;
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from,
+ Index stride) {
+ EIGEN_MSA_DEBUG;
+
+ *to = from[0];
+ to += stride;
+ *to = from[1];
+ to += stride;
+ *to = from[2];
+ to += stride;
+ *to = from[3];
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline void pscatter<int32_t, Packet4i>(int32_t* to, const Packet4i& from,
+ Index stride) {
+ EIGEN_MSA_DEBUG;
+
+ *to = from[0];
+ to += stride;
+ *to = from[1];
+ to += stride;
+ *to = from[2];
+ to += stride;
+ *to = from[3];
+}
+
+template <>
+EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) {
+ EIGEN_MSA_DEBUG;
+
+ __builtin_prefetch(addr);
+}
+
+template <>
+EIGEN_STRONG_INLINE void prefetch<int32_t>(const int32_t* addr) {
+ EIGEN_MSA_DEBUG;
+
+ __builtin_prefetch(addr);
+}
+
+template <>
+EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) {
+ EIGEN_MSA_DEBUG;
+
+ return a[0];
+}
+
+template <>
+EIGEN_STRONG_INLINE int32_t pfirst<Packet4i>(const Packet4i& a) {
+ EIGEN_MSA_DEBUG;
+
+ return a[0];
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet4f)__builtin_msa_shf_w((v4i32)a, EIGEN_MSA_SHF_I8(3, 2, 1, 0));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_shf_w(a, EIGEN_MSA_SHF_I8(3, 2, 1, 0));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet4f)__builtin_msa_bclri_w((v4u32)a, 31);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) {
+ EIGEN_MSA_DEBUG;
+
+ Packet4i zero = __builtin_msa_ldi_w(0);
+ return __builtin_msa_add_a_w(zero, a);
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a) {
+ EIGEN_MSA_DEBUG;
+
+ Packet4f s = padd(a, (Packet4f)__builtin_msa_shf_w((v4i32)a, EIGEN_MSA_SHF_I8(2, 3, 0, 1)));
+ s = padd(s, (Packet4f)__builtin_msa_shf_w((v4i32)s, EIGEN_MSA_SHF_I8(1, 0, 3, 2)));
+ return s[0];
+}
+
+
+template <>
+EIGEN_STRONG_INLINE int32_t predux<Packet4i>(const Packet4i& a) {
+ EIGEN_MSA_DEBUG;
+
+ Packet4i s = padd(a, __builtin_msa_shf_w(a, EIGEN_MSA_SHF_I8(2, 3, 0, 1)));
+ s = padd(s, __builtin_msa_shf_w(s, EIGEN_MSA_SHF_I8(1, 0, 3, 2)));
+ return s[0];
+}
+
+// Other reduction functions:
+// mul
+template <>
+EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a) {
+ EIGEN_MSA_DEBUG;
+
+ Packet4f p = pmul(a, (Packet4f)__builtin_msa_shf_w((v4i32)a, EIGEN_MSA_SHF_I8(2, 3, 0, 1)));
+ p = pmul(p, (Packet4f)__builtin_msa_shf_w((v4i32)p, EIGEN_MSA_SHF_I8(1, 0, 3, 2)));
+ return p[0];
+}
+
+template <>
+EIGEN_STRONG_INLINE int32_t predux_mul<Packet4i>(const Packet4i& a) {
+ EIGEN_MSA_DEBUG;
+
+ Packet4i p = pmul(a, __builtin_msa_shf_w(a, EIGEN_MSA_SHF_I8(2, 3, 0, 1)));
+ p = pmul(p, __builtin_msa_shf_w(p, EIGEN_MSA_SHF_I8(1, 0, 3, 2)));
+ return p[0];
+}
+
+// min
+template <>
+EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a) {
+ EIGEN_MSA_DEBUG;
+
+ // Swap 64-bit halves of a.
+ Packet4f swapped = (Packet4f)__builtin_msa_shf_w((Packet4i)a, EIGEN_MSA_SHF_I8(2, 3, 0, 1));
+#if !EIGEN_FAST_MATH
+ // Detect presence of NaNs from pairs a[0]-a[2] and a[1]-a[3] as two 32-bit
+ // masks of all zeroes/ones in low 64 bits.
+ v16u8 unord = (v16u8)__builtin_msa_fcun_w(a, swapped);
+ // Combine the two masks into one: 64 ones if no NaNs, otherwise 64 zeroes.
+ unord = (v16u8)__builtin_msa_ceqi_d((v2i64)unord, 0);
+#endif
+ // Continue with min computation.
+ Packet4f v = __builtin_msa_fmin_w(a, swapped);
+ v = __builtin_msa_fmin_w(
+ v, (Packet4f)__builtin_msa_shf_w((Packet4i)v, EIGEN_MSA_SHF_I8(1, 0, 3, 2)));
+#if !EIGEN_FAST_MATH
+ // Based on the mask select between v and 4 qNaNs.
+ v16u8 qnans = (v16u8)__builtin_msa_fill_w(0x7FC00000);
+ v = (Packet4f)__builtin_msa_bsel_v(unord, qnans, (v16u8)v);
+#endif
+ return v[0];
+}
+
+template <>
+EIGEN_STRONG_INLINE int32_t predux_min<Packet4i>(const Packet4i& a) {
+ EIGEN_MSA_DEBUG;
+
+ Packet4i m = pmin(a, __builtin_msa_shf_w(a, EIGEN_MSA_SHF_I8(2, 3, 0, 1)));
+ m = pmin(m, __builtin_msa_shf_w(m, EIGEN_MSA_SHF_I8(1, 0, 3, 2)));
+ return m[0];
+}
+
+// max
+template <>
+EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a) {
+ EIGEN_MSA_DEBUG;
+
+ // Swap 64-bit halves of a.
+ Packet4f swapped = (Packet4f)__builtin_msa_shf_w((Packet4i)a, EIGEN_MSA_SHF_I8(2, 3, 0, 1));
+#if !EIGEN_FAST_MATH
+ // Detect presence of NaNs from pairs a[0]-a[2] and a[1]-a[3] as two 32-bit
+ // masks of all zeroes/ones in low 64 bits.
+ v16u8 unord = (v16u8)__builtin_msa_fcun_w(a, swapped);
+ // Combine the two masks into one: 64 ones if no NaNs, otherwise 64 zeroes.
+ unord = (v16u8)__builtin_msa_ceqi_d((v2i64)unord, 0);
+#endif
+ // Continue with max computation.
+ Packet4f v = __builtin_msa_fmax_w(a, swapped);
+ v = __builtin_msa_fmax_w(
+ v, (Packet4f)__builtin_msa_shf_w((Packet4i)v, EIGEN_MSA_SHF_I8(1, 0, 3, 2)));
+#if !EIGEN_FAST_MATH
+ // Based on the mask select between v and 4 qNaNs.
+ v16u8 qnans = (v16u8)__builtin_msa_fill_w(0x7FC00000);
+ v = (Packet4f)__builtin_msa_bsel_v(unord, qnans, (v16u8)v);
+#endif
+ return v[0];
+}
+
+template <>
+EIGEN_STRONG_INLINE int32_t predux_max<Packet4i>(const Packet4i& a) {
+ EIGEN_MSA_DEBUG;
+
+ Packet4i m = pmax(a, __builtin_msa_shf_w(a, EIGEN_MSA_SHF_I8(2, 3, 0, 1)));
+ m = pmax(m, __builtin_msa_shf_w(m, EIGEN_MSA_SHF_I8(1, 0, 3, 2)));
+ return m[0];
+}
+
+inline std::ostream& operator<<(std::ostream& os, const PacketBlock<Packet4f, 4>& value) {
+ os << "[ " << value.packet[0] << "," << std::endl
+ << " " << value.packet[1] << "," << std::endl
+ << " " << value.packet[2] << "," << std::endl
+ << " " << value.packet[3] << " ]";
+ return os;
+}
+
+EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet4f, 4>& kernel) {
+ EIGEN_MSA_DEBUG;
+
+ v4i32 tmp1, tmp2, tmp3, tmp4;
+
+ tmp1 = __builtin_msa_ilvr_w((v4i32)kernel.packet[1], (v4i32)kernel.packet[0]);
+ tmp2 = __builtin_msa_ilvr_w((v4i32)kernel.packet[3], (v4i32)kernel.packet[2]);
+ tmp3 = __builtin_msa_ilvl_w((v4i32)kernel.packet[1], (v4i32)kernel.packet[0]);
+ tmp4 = __builtin_msa_ilvl_w((v4i32)kernel.packet[3], (v4i32)kernel.packet[2]);
+
+ kernel.packet[0] = (Packet4f)__builtin_msa_ilvr_d((v2i64)tmp2, (v2i64)tmp1);
+ kernel.packet[1] = (Packet4f)__builtin_msa_ilvod_d((v2i64)tmp2, (v2i64)tmp1);
+ kernel.packet[2] = (Packet4f)__builtin_msa_ilvr_d((v2i64)tmp4, (v2i64)tmp3);
+ kernel.packet[3] = (Packet4f)__builtin_msa_ilvod_d((v2i64)tmp4, (v2i64)tmp3);
+}
+
+inline std::ostream& operator<<(std::ostream& os, const PacketBlock<Packet4i, 4>& value) {
+ os << "[ " << value.packet[0] << "," << std::endl
+ << " " << value.packet[1] << "," << std::endl
+ << " " << value.packet[2] << "," << std::endl
+ << " " << value.packet[3] << " ]";
+ return os;
+}
+
+EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet4i, 4>& kernel) {
+ EIGEN_MSA_DEBUG;
+
+ v4i32 tmp1, tmp2, tmp3, tmp4;
+
+ tmp1 = __builtin_msa_ilvr_w(kernel.packet[1], kernel.packet[0]);
+ tmp2 = __builtin_msa_ilvr_w(kernel.packet[3], kernel.packet[2]);
+ tmp3 = __builtin_msa_ilvl_w(kernel.packet[1], kernel.packet[0]);
+ tmp4 = __builtin_msa_ilvl_w(kernel.packet[3], kernel.packet[2]);
+
+ kernel.packet[0] = (Packet4i)__builtin_msa_ilvr_d((v2i64)tmp2, (v2i64)tmp1);
+ kernel.packet[1] = (Packet4i)__builtin_msa_ilvod_d((v2i64)tmp2, (v2i64)tmp1);
+ kernel.packet[2] = (Packet4i)__builtin_msa_ilvr_d((v2i64)tmp4, (v2i64)tmp3);
+ kernel.packet[3] = (Packet4i)__builtin_msa_ilvod_d((v2i64)tmp4, (v2i64)tmp3);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f psqrt(const Packet4f& a) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_fsqrt_w(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f prsqrt(const Packet4f& a) {
+ EIGEN_MSA_DEBUG;
+
+#if EIGEN_FAST_MATH
+ return __builtin_msa_frsqrt_w(a);
+#else
+ Packet4f ones = __builtin_msa_ffint_s_w(__builtin_msa_ldi_w(1));
+ return pdiv(ones, psqrt(a));
+#endif
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a) {
+ Packet4f v = a;
+ int32_t old_mode, new_mode;
+ asm volatile(
+ "cfcmsa %[old_mode], $1\n"
+ "ori %[new_mode], %[old_mode], 3\n" // 3 = round towards -INFINITY.
+ "ctcmsa $1, %[new_mode]\n"
+ "frint.w %w[v], %w[v]\n"
+ "ctcmsa $1, %[old_mode]\n"
+ : // outputs
+ [old_mode] "=r"(old_mode), [new_mode] "=r"(new_mode),
+ [v] "+f"(v)
+ : // inputs
+ : // clobbers
+ );
+ return v;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pceil<Packet4f>(const Packet4f& a) {
+ Packet4f v = a;
+ int32_t old_mode, new_mode;
+ asm volatile(
+ "cfcmsa %[old_mode], $1\n"
+ "ori %[new_mode], %[old_mode], 3\n"
+ "xori %[new_mode], %[new_mode], 1\n" // 2 = round towards +INFINITY.
+ "ctcmsa $1, %[new_mode]\n"
+ "frint.w %w[v], %w[v]\n"
+ "ctcmsa $1, %[old_mode]\n"
+ : // outputs
+ [old_mode] "=r"(old_mode), [new_mode] "=r"(new_mode),
+ [v] "+f"(v)
+ : // inputs
+ : // clobbers
+ );
+ return v;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pround<Packet4f>(const Packet4f& a) {
+ Packet4f v = a;
+ int32_t old_mode, new_mode;
+ asm volatile(
+ "cfcmsa %[old_mode], $1\n"
+ "ori %[new_mode], %[old_mode], 3\n"
+ "xori %[new_mode], %[new_mode], 3\n" // 0 = round to nearest, ties to even.
+ "ctcmsa $1, %[new_mode]\n"
+ "frint.w %w[v], %w[v]\n"
+ "ctcmsa $1, %[old_mode]\n"
+ : // outputs
+ [old_mode] "=r"(old_mode), [new_mode] "=r"(new_mode),
+ [v] "+f"(v)
+ : // inputs
+ : // clobbers
+ );
+ return v;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, const Packet4f& thenPacket,
+ const Packet4f& elsePacket) {
+ Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2],
+ ifPacket.select[3] };
+ Packet4i mask = __builtin_msa_ceqi_w((Packet4i)select, 0);
+ return (Packet4f)__builtin_msa_bsel_v((v16u8)mask, (v16u8)thenPacket, (v16u8)elsePacket);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4i pblend(const Selector<4>& ifPacket, const Packet4i& thenPacket,
+ const Packet4i& elsePacket) {
+ Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2],
+ ifPacket.select[3] };
+ Packet4i mask = __builtin_msa_ceqi_w((Packet4i)select, 0);
+ return (Packet4i)__builtin_msa_bsel_v((v16u8)mask, (v16u8)thenPacket, (v16u8)elsePacket);
+}
+
+//---------- double ----------
+
+typedef v2f64 Packet2d;
+typedef v2i64 Packet2l;
+typedef v2u64 Packet2ul;
+
+#define _EIGEN_DECLARE_CONST_Packet2d(NAME, X) const Packet2d p2d_##NAME = { X, X }
+#define _EIGEN_DECLARE_CONST_Packet2l(NAME, X) const Packet2l p2l_##NAME = { X, X }
+#define _EIGEN_DECLARE_CONST_Packet2ul(NAME, X) const Packet2ul p2ul_##NAME = { X, X }
+
+inline std::ostream& operator<<(std::ostream& os, const Packet2d& value) {
+ os << "[ " << value[0] << ", " << value[1] << " ]";
+ return os;
+}
+
+inline std::ostream& operator<<(std::ostream& os, const Packet2l& value) {
+ os << "[ " << value[0] << ", " << value[1] << " ]";
+ return os;
+}
+
+inline std::ostream& operator<<(std::ostream& os, const Packet2ul& value) {
+ os << "[ " << value[0] << ", " << value[1] << " ]";
+ return os;
+}
+
+template <>
+struct packet_traits<double> : default_packet_traits {
+ typedef Packet2d type;
+ typedef Packet2d half;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 2,
+ HasHalfPacket = 0,
+ // FIXME check the Has*
+ HasDiv = 1,
+ HasExp = 1,
+ HasSqrt = 1,
+ HasRsqrt = 1,
+ HasRound = 1,
+ HasFloor = 1,
+ HasCeil = 1,
+ HasBlend = 1
+ };
+};
+
+template <>
+struct unpacket_traits<Packet2d> {
+ typedef double type;
+ enum { size = 2, alignment = Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false };
+ typedef Packet2d half;
+};
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) {
+ EIGEN_MSA_DEBUG;
+
+ Packet2d value = { from, from };
+ return value;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_fadd_d(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d plset<Packet2d>(const double& a) {
+ EIGEN_MSA_DEBUG;
+
+ static const Packet2d countdown = { 0.0, 1.0 };
+ return padd(pset1<Packet2d>(a), countdown);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_fsub_d(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet2d)__builtin_msa_bnegi_d((v2u64)a, 63);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) {
+ EIGEN_MSA_DEBUG;
+
+ return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_fmul_d(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_fdiv_d(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_fmadd_d(c, a, b);
+}
+
+// Logical Operations are not supported for float, so we have to reinterpret casts using MSA
+// intrinsics
+template <>
+EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet2d)__builtin_msa_and_v((v16u8)a, (v16u8)b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet2d)__builtin_msa_or_v((v16u8)a, (v16u8)b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet2d)__builtin_msa_xor_v((v16u8)a, (v16u8)b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) {
+ EIGEN_MSA_DEBUG;
+
+ return pand(a, (Packet2d)__builtin_msa_xori_b((v16u8)b, 255));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_UNALIGNED_LOAD return (Packet2d)__builtin_msa_ld_d(const_cast<double*>(from), 0);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) {
+ EIGEN_MSA_DEBUG;
+
+#if EIGEN_FAST_MATH
+ // This prefers numbers to NaNs.
+ return __builtin_msa_fmin_d(a, b);
+#else
+ // This prefers NaNs to numbers.
+ v2i64 aNaN = __builtin_msa_fcun_d(a, a);
+ v2i64 aMinOrNaN = por(__builtin_msa_fclt_d(a, b), aNaN);
+ return (Packet2d)__builtin_msa_bsel_v((v16u8)aMinOrNaN, (v16u8)b, (v16u8)a);
+#endif
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) {
+ EIGEN_MSA_DEBUG;
+
+#if EIGEN_FAST_MATH
+ // This prefers numbers to NaNs.
+ return __builtin_msa_fmax_d(a, b);
+#else
+ // This prefers NaNs to numbers.
+ v2i64 aNaN = __builtin_msa_fcun_d(a, a);
+ v2i64 aMaxOrNaN = por(__builtin_msa_fclt_d(b, a), aNaN);
+ return (Packet2d)__builtin_msa_bsel_v((v16u8)aMaxOrNaN, (v16u8)b, (v16u8)a);
+#endif
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_UNALIGNED_LOAD return (Packet2d)__builtin_msa_ld_d(const_cast<double*>(from), 0);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double* from) {
+ EIGEN_MSA_DEBUG;
+
+ Packet2d value = { *from, *from };
+ return value;
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_ALIGNED_STORE __builtin_msa_st_d((v2i64)from, to, 0);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) {
+ EIGEN_MSA_DEBUG;
+
+ EIGEN_DEBUG_UNALIGNED_STORE __builtin_msa_st_d((v2i64)from, to, 0);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, Index stride) {
+ EIGEN_MSA_DEBUG;
+
+ Packet2d value;
+ value[0] = *from;
+ from += stride;
+ value[1] = *from;
+ return value;
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from,
+ Index stride) {
+ EIGEN_MSA_DEBUG;
+
+ *to = from[0];
+ to += stride;
+ *to = from[1];
+}
+
+template <>
+EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) {
+ EIGEN_MSA_DEBUG;
+
+ __builtin_prefetch(addr);
+}
+
+template <>
+EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) {
+ EIGEN_MSA_DEBUG;
+
+ return a[0];
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet2d)__builtin_msa_shf_w((v4i32)a, EIGEN_MSA_SHF_I8(2, 3, 0, 1));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a) {
+ EIGEN_MSA_DEBUG;
+
+ return (Packet2d)__builtin_msa_bclri_d((v2u64)a, 63);
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a) {
+ EIGEN_MSA_DEBUG;
+
+ Packet2d s = padd(a, preverse(a));
+ return s[0];
+}
+
+// Other reduction functions:
+// mul
+template <>
+EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a) {
+ EIGEN_MSA_DEBUG;
+
+ Packet2d p = pmul(a, preverse(a));
+ return p[0];
+}
+
+// min
+template <>
+EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a) {
+ EIGEN_MSA_DEBUG;
+
+#if EIGEN_FAST_MATH
+ Packet2d swapped = (Packet2d)__builtin_msa_shf_w((Packet4i)a, EIGEN_MSA_SHF_I8(2, 3, 0, 1));
+ Packet2d v = __builtin_msa_fmin_d(a, swapped);
+ return v[0];
+#else
+ double a0 = a[0], a1 = a[1];
+ return ((numext::isnan)(a0) || a0 < a1) ? a0 : a1;
+#endif
+}
+
+// max
+template <>
+EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a) {
+ EIGEN_MSA_DEBUG;
+
+#if EIGEN_FAST_MATH
+ Packet2d swapped = (Packet2d)__builtin_msa_shf_w((Packet4i)a, EIGEN_MSA_SHF_I8(2, 3, 0, 1));
+ Packet2d v = __builtin_msa_fmax_d(a, swapped);
+ return v[0];
+#else
+ double a0 = a[0], a1 = a[1];
+ return ((numext::isnan)(a0) || a0 > a1) ? a0 : a1;
+#endif
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d psqrt(const Packet2d& a) {
+ EIGEN_MSA_DEBUG;
+
+ return __builtin_msa_fsqrt_d(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d prsqrt(const Packet2d& a) {
+ EIGEN_MSA_DEBUG;
+
+#if EIGEN_FAST_MATH
+ return __builtin_msa_frsqrt_d(a);
+#else
+ Packet2d ones = __builtin_msa_ffint_s_d(__builtin_msa_ldi_d(1));
+ return pdiv(ones, psqrt(a));
+#endif
+}
+
+inline std::ostream& operator<<(std::ostream& os, const PacketBlock<Packet2d, 2>& value) {
+ os << "[ " << value.packet[0] << "," << std::endl << " " << value.packet[1] << " ]";
+ return os;
+}
+
+EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet2d, 2>& kernel) {
+ EIGEN_MSA_DEBUG;
+
+ Packet2d trn1 = (Packet2d)__builtin_msa_ilvev_d((v2i64)kernel.packet[1], (v2i64)kernel.packet[0]);
+ Packet2d trn2 = (Packet2d)__builtin_msa_ilvod_d((v2i64)kernel.packet[1], (v2i64)kernel.packet[0]);
+ kernel.packet[0] = trn1;
+ kernel.packet[1] = trn2;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pfloor<Packet2d>(const Packet2d& a) {
+ Packet2d v = a;
+ int32_t old_mode, new_mode;
+ asm volatile(
+ "cfcmsa %[old_mode], $1\n"
+ "ori %[new_mode], %[old_mode], 3\n" // 3 = round towards -INFINITY.
+ "ctcmsa $1, %[new_mode]\n"
+ "frint.d %w[v], %w[v]\n"
+ "ctcmsa $1, %[old_mode]\n"
+ : // outputs
+ [old_mode] "=r"(old_mode), [new_mode] "=r"(new_mode),
+ [v] "+f"(v)
+ : // inputs
+ : // clobbers
+ );
+ return v;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pceil<Packet2d>(const Packet2d& a) {
+ Packet2d v = a;
+ int32_t old_mode, new_mode;
+ asm volatile(
+ "cfcmsa %[old_mode], $1\n"
+ "ori %[new_mode], %[old_mode], 3\n"
+ "xori %[new_mode], %[new_mode], 1\n" // 2 = round towards +INFINITY.
+ "ctcmsa $1, %[new_mode]\n"
+ "frint.d %w[v], %w[v]\n"
+ "ctcmsa $1, %[old_mode]\n"
+ : // outputs
+ [old_mode] "=r"(old_mode), [new_mode] "=r"(new_mode),
+ [v] "+f"(v)
+ : // inputs
+ : // clobbers
+ );
+ return v;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pround<Packet2d>(const Packet2d& a) {
+ Packet2d v = a;
+ int32_t old_mode, new_mode;
+ asm volatile(
+ "cfcmsa %[old_mode], $1\n"
+ "ori %[new_mode], %[old_mode], 3\n"
+ "xori %[new_mode], %[new_mode], 3\n" // 0 = round to nearest, ties to even.
+ "ctcmsa $1, %[new_mode]\n"
+ "frint.d %w[v], %w[v]\n"
+ "ctcmsa $1, %[old_mode]\n"
+ : // outputs
+ [old_mode] "=r"(old_mode), [new_mode] "=r"(new_mode),
+ [v] "+f"(v)
+ : // inputs
+ : // clobbers
+ );
+ return v;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d pblend(const Selector<2>& ifPacket, const Packet2d& thenPacket,
+ const Packet2d& elsePacket) {
+ Packet2ul select = { ifPacket.select[0], ifPacket.select[1] };
+ Packet2l mask = __builtin_msa_ceqi_d((Packet2l)select, 0);
+ return (Packet2d)__builtin_msa_bsel_v((v16u8)mask, (v16u8)thenPacket, (v16u8)elsePacket);
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_PACKET_MATH_MSA_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/NEON/Complex.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/NEON/Complex.h
index ef50ba303..f40af7f87 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/arch/NEON/Complex.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/NEON/Complex.h
@@ -15,9 +15,10 @@ namespace Eigen {
namespace internal {
-inline uint32x4_t p4ui_CONJ_XOR() {
+inline uint32x4_t p4ui_CONJ_XOR()
+{
// See bug 1325, clang fails to call vld1q_u64.
-#if EIGEN_COMP_CLANG
+#if EIGEN_COMP_CLANG || EIGEN_COMP_CASTXML
uint32x4_t ret = { 0x00000000, 0x80000000, 0x00000000, 0x80000000 };
return ret;
#else
@@ -26,61 +27,136 @@ inline uint32x4_t p4ui_CONJ_XOR() {
#endif
}
-inline uint32x2_t p2ui_CONJ_XOR() {
+inline uint32x2_t p2ui_CONJ_XOR()
+{
static const uint32_t conj_XOR_DATA[] = { 0x00000000, 0x80000000 };
return vld1_u32( conj_XOR_DATA );
}
//---------- float ----------
+
+struct Packet1cf
+{
+ EIGEN_STRONG_INLINE Packet1cf() {}
+ EIGEN_STRONG_INLINE explicit Packet1cf(const Packet2f& a) : v(a) {}
+ Packet2f v;
+};
struct Packet2cf
{
EIGEN_STRONG_INLINE Packet2cf() {}
EIGEN_STRONG_INLINE explicit Packet2cf(const Packet4f& a) : v(a) {}
- Packet4f v;
+ Packet4f v;
};
-template<> struct packet_traits<std::complex<float> > : default_packet_traits
+template<> struct packet_traits<std::complex<float> > : default_packet_traits
{
typedef Packet2cf type;
- typedef Packet2cf half;
- enum {
+ typedef Packet1cf half;
+ enum
+ {
Vectorizable = 1,
AlignedOnScalar = 1,
size = 2,
- HasHalfPacket = 0,
-
- HasAdd = 1,
- HasSub = 1,
- HasMul = 1,
- HasDiv = 1,
- HasNegate = 1,
- HasAbs = 0,
- HasAbs2 = 0,
- HasMin = 0,
- HasMax = 0,
+ HasHalfPacket = 1,
+
+ HasAdd = 1,
+ HasSub = 1,
+ HasMul = 1,
+ HasDiv = 1,
+ HasNegate = 1,
+ HasAbs = 0,
+ HasAbs2 = 0,
+ HasMin = 0,
+ HasMax = 0,
HasSetLinear = 0
};
};
-template<> struct unpacket_traits<Packet2cf> { typedef std::complex<float> type; enum {size=2, alignment=Aligned16}; typedef Packet2cf half; };
-
-template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>& from)
+template<> struct unpacket_traits<Packet1cf>
{
- float32x2_t r64;
- r64 = vld1_f32((float *)&from);
+ typedef std::complex<float> type;
+ typedef Packet1cf half;
+ typedef Packet2f as_real;
+ enum
+ {
+ size = 1,
+ alignment = Aligned16,
+ vectorizable = true,
+ masked_load_available = false,
+ masked_store_available = false
+ };
+};
+template<> struct unpacket_traits<Packet2cf>
+{
+ typedef std::complex<float> type;
+ typedef Packet1cf half;
+ typedef Packet4f as_real;
+ enum
+ {
+ size = 2,
+ alignment = Aligned16,
+ vectorizable = true,
+ masked_load_available = false,
+ masked_store_available = false
+ };
+};
+
+template<> EIGEN_STRONG_INLINE Packet1cf pcast<float,Packet1cf>(const float& a)
+{ return Packet1cf(vset_lane_f32(a, vdup_n_f32(0.f), 0)); }
+template<> EIGEN_STRONG_INLINE Packet2cf pcast<Packet2f,Packet2cf>(const Packet2f& a)
+{ return Packet2cf(vreinterpretq_f32_u64(vmovl_u32(vreinterpret_u32_f32(a)))); }
+template<> EIGEN_STRONG_INLINE Packet1cf pset1<Packet1cf>(const std::complex<float>& from)
+{ return Packet1cf(vld1_f32(reinterpret_cast<const float*>(&from))); }
+template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>& from)
+{
+ const float32x2_t r64 = vld1_f32(reinterpret_cast<const float*>(&from));
return Packet2cf(vcombine_f32(r64, r64));
}
-template<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(padd<Packet4f>(a.v,b.v)); }
-template<> EIGEN_STRONG_INLINE Packet2cf psub<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(psub<Packet4f>(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet1cf padd<Packet1cf>(const Packet1cf& a, const Packet1cf& b)
+{ return Packet1cf(padd<Packet2f>(a.v, b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{ return Packet2cf(padd<Packet4f>(a.v, b.v)); }
+
+template<> EIGEN_STRONG_INLINE Packet1cf psub<Packet1cf>(const Packet1cf& a, const Packet1cf& b)
+{ return Packet1cf(psub<Packet2f>(a.v, b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf psub<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{ return Packet2cf(psub<Packet4f>(a.v, b.v)); }
+
+template<> EIGEN_STRONG_INLINE Packet1cf pnegate(const Packet1cf& a) { return Packet1cf(pnegate<Packet2f>(a.v)); }
template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) { return Packet2cf(pnegate<Packet4f>(a.v)); }
+
+template<> EIGEN_STRONG_INLINE Packet1cf pconj(const Packet1cf& a)
+{
+ const Packet2ui b = vreinterpret_u32_f32(a.v);
+ return Packet1cf(vreinterpret_f32_u32(veor_u32(b, p2ui_CONJ_XOR())));
+}
template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a)
{
- Packet4ui b = vreinterpretq_u32_f32(a.v);
+ const Packet4ui b = vreinterpretq_u32_f32(a.v);
return Packet2cf(vreinterpretq_f32_u32(veorq_u32(b, p4ui_CONJ_XOR())));
}
+template<> EIGEN_STRONG_INLINE Packet1cf pmul<Packet1cf>(const Packet1cf& a, const Packet1cf& b)
+{
+ Packet2f v1, v2;
+
+ // Get the real values of a | a1_re | a1_re |
+ v1 = vdup_lane_f32(a.v, 0);
+ // Get the imag values of a | a1_im | a1_im |
+ v2 = vdup_lane_f32(a.v, 1);
+ // Multiply the real a with b
+ v1 = vmul_f32(v1, b.v);
+ // Multiply the imag a with b
+ v2 = vmul_f32(v2, b.v);
+ // Conjugate v2
+ v2 = vreinterpret_f32_u32(veor_u32(vreinterpret_u32_f32(v2), p2ui_CONJ_XOR()));
+ // Swap real/imag elements in v2.
+ v2 = vrev64_f32(v2);
+ // Add and return the result
+ return Packet1cf(vadd_f32(v1, v2));
+}
template<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
{
Packet4f v1, v2;
@@ -93,7 +169,7 @@ template<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, con
v1 = vmulq_f32(v1, b.v);
// Multiply the imag a with b
v2 = vmulq_f32(v2, b.v);
- // Conjugate v2
+ // Conjugate v2
v2 = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(v2), p4ui_CONJ_XOR()));
// Swap real/imag elements in v2.
v2 = vrev64q_f32(v2);
@@ -101,98 +177,144 @@ template<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, con
return Packet2cf(vaddq_f32(v1, v2));
}
-template<> EIGEN_STRONG_INLINE Packet2cf pand <Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+template<> EIGEN_STRONG_INLINE Packet1cf pcmp_eq(const Packet1cf& a, const Packet1cf& b)
{
- return Packet2cf(vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v))));
+ // Compare real and imaginary parts of a and b to get the mask vector:
+ // [re(a[0])==re(b[0]), im(a[0])==im(b[0])]
+ Packet2f eq = pcmp_eq<Packet2f>(a.v, b.v);
+ // Swap real/imag elements in the mask in to get:
+ // [im(a[0])==im(b[0]), re(a[0])==re(b[0])]
+ Packet2f eq_swapped = vrev64_f32(eq);
+ // Return re(a)==re(b) && im(a)==im(b) by computing bitwise AND of eq and eq_swapped
+ return Packet1cf(pand<Packet2f>(eq, eq_swapped));
}
-template<> EIGEN_STRONG_INLINE Packet2cf por <Packet2cf>(const Packet2cf& a, const Packet2cf& b)
-{
- return Packet2cf(vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v))));
-}
-template<> EIGEN_STRONG_INLINE Packet2cf pxor <Packet2cf>(const Packet2cf& a, const Packet2cf& b)
-{
- return Packet2cf(vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v))));
-}
-template<> EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
-{
- return Packet2cf(vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v))));
+template<> EIGEN_STRONG_INLINE Packet2cf pcmp_eq(const Packet2cf& a, const Packet2cf& b)
+{
+ // Compare real and imaginary parts of a and b to get the mask vector:
+ // [re(a[0])==re(b[0]), im(a[0])==im(b[0]), re(a[1])==re(b[1]), im(a[1])==im(b[1])]
+ Packet4f eq = pcmp_eq<Packet4f>(a.v, b.v);
+ // Swap real/imag elements in the mask in to get:
+ // [im(a[0])==im(b[0]), re(a[0])==re(b[0]), im(a[1])==im(b[1]), re(a[1])==re(b[1])]
+ Packet4f eq_swapped = vrev64q_f32(eq);
+ // Return re(a)==re(b) && im(a)==im(b) by computing bitwise AND of eq and eq_swapped
+ return Packet2cf(pand<Packet4f>(eq, eq_swapped));
}
-template<> EIGEN_STRONG_INLINE Packet2cf pload<Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>((const float*)from)); }
-template<> EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>((const float*)from)); }
+template<> EIGEN_STRONG_INLINE Packet1cf pand<Packet1cf>(const Packet1cf& a, const Packet1cf& b)
+{ return Packet1cf(vreinterpret_f32_u32(vand_u32(vreinterpret_u32_f32(a.v), vreinterpret_u32_f32(b.v)))); }
+template<> EIGEN_STRONG_INLINE Packet2cf pand<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{ return Packet2cf(vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(a.v), vreinterpretq_u32_f32(b.v)))); }
-template<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<float>* from) { return pset1<Packet2cf>(*from); }
+template<> EIGEN_STRONG_INLINE Packet1cf por<Packet1cf>(const Packet1cf& a, const Packet1cf& b)
+{ return Packet1cf(vreinterpret_f32_u32(vorr_u32(vreinterpret_u32_f32(a.v), vreinterpret_u32_f32(b.v)))); }
+template<> EIGEN_STRONG_INLINE Packet2cf por<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{ return Packet2cf(vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a.v), vreinterpretq_u32_f32(b.v)))); }
-template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); }
-template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); }
+template<> EIGEN_STRONG_INLINE Packet1cf pxor<Packet1cf>(const Packet1cf& a, const Packet1cf& b)
+{ return Packet1cf(vreinterpret_f32_u32(veor_u32(vreinterpret_u32_f32(a.v), vreinterpret_u32_f32(b.v)))); }
+template<> EIGEN_STRONG_INLINE Packet2cf pxor<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{ return Packet2cf(vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a.v), vreinterpretq_u32_f32(b.v)))); }
-template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, Index stride)
+template<> EIGEN_STRONG_INLINE Packet1cf pandnot<Packet1cf>(const Packet1cf& a, const Packet1cf& b)
+{ return Packet1cf(vreinterpret_f32_u32(vbic_u32(vreinterpret_u32_f32(a.v), vreinterpret_u32_f32(b.v)))); }
+template<> EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{ return Packet2cf(vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a.v), vreinterpretq_u32_f32(b.v)))); }
+
+template<> EIGEN_STRONG_INLINE Packet1cf pload<Packet1cf>(const std::complex<float>* from)
+{ EIGEN_DEBUG_ALIGNED_LOAD return Packet1cf(pload<Packet2f>((const float*)from)); }
+template<> EIGEN_STRONG_INLINE Packet2cf pload<Packet2cf>(const std::complex<float>* from)
+{ EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>(reinterpret_cast<const float*>(from))); }
+
+template<> EIGEN_STRONG_INLINE Packet1cf ploadu<Packet1cf>(const std::complex<float>* from)
+{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cf(ploadu<Packet2f>((const float*)from)); }
+template<> EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from)
+{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>(reinterpret_cast<const float*>(from))); }
+
+template<> EIGEN_STRONG_INLINE Packet1cf ploaddup<Packet1cf>(const std::complex<float>* from)
+{ return pset1<Packet1cf>(*from); }
+template<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<float>* from)
+{ return pset1<Packet2cf>(*from); }
+
+template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> *to, const Packet1cf& from)
+{ EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); }
+template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> *to, const Packet2cf& from)
+{ EIGEN_DEBUG_ALIGNED_STORE pstore(reinterpret_cast<float*>(to), from.v); }
+
+template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> *to, const Packet1cf& from)
+{ EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); }
+template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> *to, const Packet2cf& from)
+{ EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<float*>(to), from.v); }
+
+template<> EIGEN_DEVICE_FUNC inline Packet1cf pgather<std::complex<float>, Packet1cf>(
+ const std::complex<float>* from, Index stride)
+{
+ const Packet2f tmp = vdup_n_f32(std::real(from[0*stride]));
+ return Packet1cf(vset_lane_f32(std::imag(from[0*stride]), tmp, 1));
+}
+template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(
+ const std::complex<float>* from, Index stride)
{
- Packet4f res = pset1<Packet4f>(0.f);
- res = vsetq_lane_f32(std::real(from[0*stride]), res, 0);
+ Packet4f res = vdupq_n_f32(std::real(from[0*stride]));
res = vsetq_lane_f32(std::imag(from[0*stride]), res, 1);
res = vsetq_lane_f32(std::real(from[1*stride]), res, 2);
res = vsetq_lane_f32(std::imag(from[1*stride]), res, 3);
return Packet2cf(res);
}
-template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, Index stride)
+template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet1cf>(
+ std::complex<float>* to, const Packet1cf& from, Index stride)
+{ to[stride*0] = std::complex<float>(vget_lane_f32(from.v, 0), vget_lane_f32(from.v, 1)); }
+template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(
+ std::complex<float>* to, const Packet2cf& from, Index stride)
{
to[stride*0] = std::complex<float>(vgetq_lane_f32(from.v, 0), vgetq_lane_f32(from.v, 1));
to[stride*1] = std::complex<float>(vgetq_lane_f32(from.v, 2), vgetq_lane_f32(from.v, 3));
}
-template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> * addr) { EIGEN_ARM_PREFETCH((float *)addr); }
+template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> *addr)
+{ EIGEN_ARM_PREFETCH(reinterpret_cast<const float*>(addr)); }
-template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet2cf>(const Packet2cf& a)
+template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet1cf>(const Packet1cf& a)
{
- std::complex<float> EIGEN_ALIGN16 x[2];
- vst1q_f32((float *)x, a.v);
+ EIGEN_ALIGN16 std::complex<float> x;
+ vst1_f32(reinterpret_cast<float*>(&x), a.v);
+ return x;
+}
+template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet2cf>(const Packet2cf& a)
+{
+ EIGEN_ALIGN16 std::complex<float> x[2];
+ vst1q_f32(reinterpret_cast<float*>(x), a.v);
return x[0];
}
+template<> EIGEN_STRONG_INLINE Packet1cf preverse(const Packet1cf& a) { return a; }
template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a)
-{
- float32x2_t a_lo, a_hi;
- Packet4f a_r128;
-
- a_lo = vget_low_f32(a.v);
- a_hi = vget_high_f32(a.v);
- a_r128 = vcombine_f32(a_hi, a_lo);
-
- return Packet2cf(a_r128);
-}
+{ return Packet2cf(vcombine_f32(vget_high_f32(a.v), vget_low_f32(a.v))); }
+template<> EIGEN_STRONG_INLINE Packet1cf pcplxflip<Packet1cf>(const Packet1cf& a)
+{ return Packet1cf(vrev64_f32(a.v)); }
template<> EIGEN_STRONG_INLINE Packet2cf pcplxflip<Packet2cf>(const Packet2cf& a)
+{ return Packet2cf(vrev64q_f32(a.v)); }
+
+template<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet1cf>(const Packet1cf& a)
{
- return Packet2cf(vrev64q_f32(a.v));
+ std::complex<float> s;
+ vst1_f32((float *)&s, a.v);
+ return s;
}
-
template<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet2cf>(const Packet2cf& a)
{
- float32x2_t a1, a2;
std::complex<float> s;
-
- a1 = vget_low_f32(a.v);
- a2 = vget_high_f32(a.v);
- a2 = vadd_f32(a1, a2);
- vst1_f32((float *)&s, a2);
-
+ vst1_f32(reinterpret_cast<float*>(&s), vadd_f32(vget_low_f32(a.v), vget_high_f32(a.v)));
return s;
}
-template<> EIGEN_STRONG_INLINE Packet2cf preduxp<Packet2cf>(const Packet2cf* vecs)
+template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet1cf>(const Packet1cf& a)
{
- Packet4f sum1, sum2, sum;
-
- // Add the first two 64-bit float32x2_t of vecs[0]
- sum1 = vcombine_f32(vget_low_f32(vecs[0].v), vget_low_f32(vecs[1].v));
- sum2 = vcombine_f32(vget_high_f32(vecs[0].v), vget_high_f32(vecs[1].v));
- sum = vaddq_f32(sum1, sum2);
-
- return Packet2cf(sum);
+ std::complex<float> s;
+ vst1_f32((float *)&s, a.v);
+ return s;
}
-
template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const Packet2cf& a)
{
float32x2_t a1, a2, v1, v2, prod;
@@ -208,90 +330,67 @@ template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const P
v1 = vmul_f32(v1, a2);
// Multiply the imag a with b
v2 = vmul_f32(v2, a2);
- // Conjugate v2
+ // Conjugate v2
v2 = vreinterpret_f32_u32(veor_u32(vreinterpret_u32_f32(v2), p2ui_CONJ_XOR()));
// Swap real/imag elements in v2.
v2 = vrev64_f32(v2);
// Add v1, v2
prod = vadd_f32(v1, v2);
- vst1_f32((float *)&s, prod);
+ vst1_f32(reinterpret_cast<float*>(&s), prod);
return s;
}
-template<int Offset>
-struct palign_impl<Offset,Packet2cf>
-{
- EIGEN_STRONG_INLINE static void run(Packet2cf& first, const Packet2cf& second)
- {
- if (Offset==1)
- {
- first.v = vextq_f32(first.v, second.v, 2);
- }
- }
-};
-
-template<> struct conj_helper<Packet2cf, Packet2cf, false,true>
-{
- EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
- { return padd(pmul(x,y),c); }
-
- EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
- {
- return internal::pmul(a, pconj(b));
- }
-};
-
-template<> struct conj_helper<Packet2cf, Packet2cf, true,false>
-{
- EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
- { return padd(pmul(x,y),c); }
-
- EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
- {
- return internal::pmul(pconj(a), b);
- }
-};
+EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cf,Packet2f)
+EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf,Packet4f)
-template<> struct conj_helper<Packet2cf, Packet2cf, true,true>
+template<> EIGEN_STRONG_INLINE Packet1cf pdiv<Packet1cf>(const Packet1cf& a, const Packet1cf& b)
{
- EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
- { return padd(pmul(x,y),c); }
-
- EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
- {
- return pconj(internal::pmul(a, b));
- }
-};
+ // TODO optimize it for NEON
+ Packet1cf res = pmul(a, pconj(b));
+ Packet2f s, rev_s;
-EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf,Packet4f)
+ // this computes the norm
+ s = vmul_f32(b.v, b.v);
+ rev_s = vrev64_f32(s);
+ return Packet1cf(pdiv<Packet2f>(res.v, vadd_f32(s, rev_s)));
+}
template<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
{
// TODO optimize it for NEON
- Packet2cf res = conj_helper<Packet2cf,Packet2cf,false,true>().pmul(a,b);
+ Packet2cf res = pmul(a,pconj(b));
Packet4f s, rev_s;
// this computes the norm
s = vmulq_f32(b.v, b.v);
rev_s = vrev64q_f32(s);
- return Packet2cf(pdiv(res.v, vaddq_f32(s,rev_s)));
+ return Packet2cf(pdiv<Packet4f>(res.v, vaddq_f32(s, rev_s)));
}
-EIGEN_DEVICE_FUNC inline void
-ptranspose(PacketBlock<Packet2cf,2>& kernel) {
+EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet1cf, 1>& /*kernel*/) {}
+EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet2cf, 2>& kernel)
+{
Packet4f tmp = vcombine_f32(vget_high_f32(kernel.packet[0].v), vget_high_f32(kernel.packet[1].v));
kernel.packet[0].v = vcombine_f32(vget_low_f32(kernel.packet[0].v), vget_low_f32(kernel.packet[1].v));
kernel.packet[1].v = tmp;
}
+template<> EIGEN_STRONG_INLINE Packet1cf psqrt<Packet1cf>(const Packet1cf& a) {
+ return psqrt_complex<Packet1cf>(a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf psqrt<Packet2cf>(const Packet2cf& a) {
+ return psqrt_complex<Packet2cf>(a);
+}
+
//---------- double ----------
#if EIGEN_ARCH_ARM64 && !EIGEN_APPLE_DOUBLE_NEON_BUG
// See bug 1325, clang fails to call vld1q_u64.
-#if EIGEN_COMP_CLANG
+#if EIGEN_COMP_CLANG || EIGEN_COMP_CASTXML
static uint64x2_t p2ul_CONJ_XOR = {0x0, 0x8000000000000000};
#else
const uint64_t p2ul_conj_XOR_DATA[] = { 0x0, 0x8000000000000000 };
@@ -309,7 +408,8 @@ template<> struct packet_traits<std::complex<double> > : default_packet_traits
{
typedef Packet1cd type;
typedef Packet1cd half;
- enum {
+ enum
+ {
Vectorizable = 1,
AlignedOnScalar = 0,
size = 1,
@@ -328,24 +428,50 @@ template<> struct packet_traits<std::complex<double> > : default_packet_traits
};
};
-template<> struct unpacket_traits<Packet1cd> { typedef std::complex<double> type; enum {size=1, alignment=Aligned16}; typedef Packet1cd half; };
+template<> struct unpacket_traits<Packet1cd>
+{
+ typedef std::complex<double> type;
+ typedef Packet1cd half;
+ typedef Packet2d as_real;
+ enum
+ {
+ size=1,
+ alignment=Aligned16,
+ vectorizable=true,
+ masked_load_available=false,
+ masked_store_available=false
+ };
+};
+
+template<> EIGEN_STRONG_INLINE Packet1cd pload<Packet1cd>(const std::complex<double>* from)
+{ EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(pload<Packet2d>(reinterpret_cast<const double*>(from))); }
+
+template<> EIGEN_STRONG_INLINE Packet1cd ploadu<Packet1cd>(const std::complex<double>* from)
+{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ploadu<Packet2d>(reinterpret_cast<const double*>(from))); }
+
+template<> EIGEN_STRONG_INLINE Packet1cd pset1<Packet1cd>(const std::complex<double>& from)
+{
+ /* here we really have to use unaligned loads :( */
+ return ploadu<Packet1cd>(&from);
+}
-template<> EIGEN_STRONG_INLINE Packet1cd pload<Packet1cd>(const std::complex<double>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(pload<Packet2d>((const double*)from)); }
-template<> EIGEN_STRONG_INLINE Packet1cd ploadu<Packet1cd>(const std::complex<double>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ploadu<Packet2d>((const double*)from)); }
+template<> EIGEN_STRONG_INLINE Packet1cd padd<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
+{ return Packet1cd(padd<Packet2d>(a.v, b.v)); }
-template<> EIGEN_STRONG_INLINE Packet1cd pset1<Packet1cd>(const std::complex<double>& from)
-{ /* here we really have to use unaligned loads :( */ return ploadu<Packet1cd>(&from); }
+template<> EIGEN_STRONG_INLINE Packet1cd psub<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
+{ return Packet1cd(psub<Packet2d>(a.v, b.v)); }
-template<> EIGEN_STRONG_INLINE Packet1cd padd<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(padd<Packet2d>(a.v,b.v)); }
-template<> EIGEN_STRONG_INLINE Packet1cd psub<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(psub<Packet2d>(a.v,b.v)); }
-template<> EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a) { return Packet1cd(pnegate<Packet2d>(a.v)); }
-template<> EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a) { return Packet1cd(vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(a.v), p2ul_CONJ_XOR))); }
+template<> EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a)
+{ return Packet1cd(pnegate<Packet2d>(a.v)); }
+
+template<> EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a)
+{ return Packet1cd(vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(a.v), p2ul_CONJ_XOR))); }
template<> EIGEN_STRONG_INLINE Packet1cd pmul<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
{
Packet2d v1, v2;
- // Get the real values of a
+ // Get the real values of a
v1 = vdupq_lane_f64(vget_low_f64(a.v), 0);
// Get the imag values of a
v2 = vdupq_lane_f64(vget_high_f64(a.v), 0);
@@ -353,7 +479,7 @@ template<> EIGEN_STRONG_INLINE Packet1cd pmul<Packet1cd>(const Packet1cd& a, con
v1 = vmulq_f64(v1, b.v);
// Multiply the imag a with b
v2 = vmulq_f64(v2, b.v);
- // Conjugate v2
+ // Conjugate v2
v2 = vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(v2), p2ul_CONJ_XOR));
// Swap real/imag elements in v2.
v2 = preverse<Packet2d>(v2);
@@ -361,31 +487,44 @@ template<> EIGEN_STRONG_INLINE Packet1cd pmul<Packet1cd>(const Packet1cd& a, con
return Packet1cd(vaddq_f64(v1, v2));
}
-template<> EIGEN_STRONG_INLINE Packet1cd pand <Packet1cd>(const Packet1cd& a, const Packet1cd& b)
-{
- return Packet1cd(vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v))));
-}
-template<> EIGEN_STRONG_INLINE Packet1cd por <Packet1cd>(const Packet1cd& a, const Packet1cd& b)
-{
- return Packet1cd(vreinterpretq_f64_u64(vorrq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v))));
-}
-template<> EIGEN_STRONG_INLINE Packet1cd pxor <Packet1cd>(const Packet1cd& a, const Packet1cd& b)
+template<> EIGEN_STRONG_INLINE Packet1cd pcmp_eq(const Packet1cd& a, const Packet1cd& b)
{
- return Packet1cd(vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v))));
+ // Compare real and imaginary parts of a and b to get the mask vector:
+ // [re(a)==re(b), im(a)==im(b)]
+ Packet2d eq = pcmp_eq<Packet2d>(a.v, b.v);
+ // Swap real/imag elements in the mask in to get:
+ // [im(a)==im(b), re(a)==re(b)]
+ Packet2d eq_swapped = vreinterpretq_f64_u32(vrev64q_u32(vreinterpretq_u32_f64(eq)));
+ // Return re(a)==re(b) & im(a)==im(b) by computing bitwise AND of eq and eq_swapped
+ return Packet1cd(pand<Packet2d>(eq, eq_swapped));
}
+
+template<> EIGEN_STRONG_INLINE Packet1cd pand<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
+{ return Packet1cd(vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v)))); }
+
+template<> EIGEN_STRONG_INLINE Packet1cd por<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
+{ return Packet1cd(vreinterpretq_f64_u64(vorrq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v)))); }
+
+template<> EIGEN_STRONG_INLINE Packet1cd pxor<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
+{ return Packet1cd(vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v)))); }
+
template<> EIGEN_STRONG_INLINE Packet1cd pandnot<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
-{
- return Packet1cd(vreinterpretq_f64_u64(vbicq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v))));
-}
+{ return Packet1cd(vreinterpretq_f64_u64(vbicq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v)))); }
+
+template<> EIGEN_STRONG_INLINE Packet1cd ploaddup<Packet1cd>(const std::complex<double>* from)
+{ return pset1<Packet1cd>(*from); }
-template<> EIGEN_STRONG_INLINE Packet1cd ploaddup<Packet1cd>(const std::complex<double>* from) { return pset1<Packet1cd>(*from); }
+template<> EIGEN_STRONG_INLINE void pstore <std::complex<double> >(std::complex<double> *to, const Packet1cd& from)
+{ EIGEN_DEBUG_ALIGNED_STORE pstore(reinterpret_cast<double*>(to), from.v); }
-template<> EIGEN_STRONG_INLINE void pstore <std::complex<double> >(std::complex<double> * to, const Packet1cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); }
-template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double> * to, const Packet1cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); }
+template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double> *to, const Packet1cd& from)
+{ EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<double*>(to), from.v); }
-template<> EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::complex<double> * addr) { EIGEN_ARM_PREFETCH((double *)addr); }
+template<> EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::complex<double> *addr)
+{ EIGEN_ARM_PREFETCH(reinterpret_cast<const double*>(addr)); }
-template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Packet1cd>(const std::complex<double>* from, Index stride)
+template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Packet1cd>(
+ const std::complex<double>* from, Index stride)
{
Packet2d res = pset1<Packet2d>(0.0);
res = vsetq_lane_f64(std::real(from[0*stride]), res, 0);
@@ -393,17 +532,14 @@ template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Pack
return Packet1cd(res);
}
-template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet1cd>(std::complex<double>* to, const Packet1cd& from, Index stride)
-{
- to[stride*0] = std::complex<double>(vgetq_lane_f64(from.v, 0), vgetq_lane_f64(from.v, 1));
-}
+template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet1cd>(
+ std::complex<double>* to, const Packet1cd& from, Index stride)
+{ to[stride*0] = std::complex<double>(vgetq_lane_f64(from.v, 0), vgetq_lane_f64(from.v, 1)); }
-
-template<> EIGEN_STRONG_INLINE std::complex<double> pfirst<Packet1cd>(const Packet1cd& a)
+template<> EIGEN_STRONG_INLINE std::complex<double> pfirst<Packet1cd>(const Packet1cd& a)
{
- std::complex<double> EIGEN_ALIGN16 res;
+ EIGEN_ALIGN16 std::complex<double> res;
pstore<std::complex<double> >(&res, a);
-
return res;
}
@@ -411,59 +547,14 @@ template<> EIGEN_STRONG_INLINE Packet1cd preverse(const Packet1cd& a) { return a
template<> EIGEN_STRONG_INLINE std::complex<double> predux<Packet1cd>(const Packet1cd& a) { return pfirst(a); }
-template<> EIGEN_STRONG_INLINE Packet1cd preduxp<Packet1cd>(const Packet1cd* vecs) { return vecs[0]; }
-
template<> EIGEN_STRONG_INLINE std::complex<double> predux_mul<Packet1cd>(const Packet1cd& a) { return pfirst(a); }
-template<int Offset>
-struct palign_impl<Offset,Packet1cd>
-{
- static EIGEN_STRONG_INLINE void run(Packet1cd& /*first*/, const Packet1cd& /*second*/)
- {
- // FIXME is it sure we never have to align a Packet1cd?
- // Even though a std::complex<double> has 16 bytes, it is not necessarily aligned on a 16 bytes boundary...
- }
-};
-
-template<> struct conj_helper<Packet1cd, Packet1cd, false,true>
-{
- EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const
- { return padd(pmul(x,y),c); }
-
- EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
- {
- return internal::pmul(a, pconj(b));
- }
-};
-
-template<> struct conj_helper<Packet1cd, Packet1cd, true,false>
-{
- EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const
- { return padd(pmul(x,y),c); }
-
- EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
- {
- return internal::pmul(pconj(a), b);
- }
-};
-
-template<> struct conj_helper<Packet1cd, Packet1cd, true,true>
-{
- EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const
- { return padd(pmul(x,y),c); }
-
- EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
- {
- return pconj(internal::pmul(a, b));
- }
-};
-
EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cd,Packet2d)
template<> EIGEN_STRONG_INLINE Packet1cd pdiv<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
{
// TODO optimize it for NEON
- Packet1cd res = conj_helper<Packet1cd,Packet1cd,false,true>().pmul(a,b);
+ Packet1cd res = pmul(a,pconj(b));
Packet2d s = pmul<Packet2d>(b.v, b.v);
Packet2d rev_s = preverse<Packet2d>(s);
@@ -471,9 +562,7 @@ template<> EIGEN_STRONG_INLINE Packet1cd pdiv<Packet1cd>(const Packet1cd& a, con
}
EIGEN_STRONG_INLINE Packet1cd pcplxflip/*<Packet1cd>*/(const Packet1cd& x)
-{
- return Packet1cd(preverse(Packet2d(x.v)));
-}
+{ return Packet1cd(preverse(Packet2d(x.v))); }
EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet1cd,2>& kernel)
{
@@ -481,6 +570,11 @@ EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet1cd,2>& kernel)
kernel.packet[0].v = vcombine_f64(vget_low_f64(kernel.packet[0].v), vget_low_f64(kernel.packet[1].v));
kernel.packet[1].v = tmp;
}
+
+template<> EIGEN_STRONG_INLINE Packet1cd psqrt<Packet1cd>(const Packet1cd& a) {
+ return psqrt_complex<Packet1cd>(a);
+}
+
#endif // EIGEN_ARCH_ARM64
} // end namespace internal
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/NEON/GeneralBlockPanelKernel.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/NEON/GeneralBlockPanelKernel.h
new file mode 100644
index 000000000..3481f337e
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/NEON/GeneralBlockPanelKernel.h
@@ -0,0 +1,183 @@
+namespace Eigen {
+namespace internal {
+
+#if EIGEN_ARCH_ARM && EIGEN_COMP_CLANG
+
+// Clang seems to excessively spill registers in the GEBP kernel on 32-bit arm.
+// Here we specialize gebp_traits to eliminate these register spills.
+// See #2138.
+template<>
+struct gebp_traits <float,float,false,false,Architecture::NEON,GEBPPacketFull>
+ : gebp_traits<float,float,false,false,Architecture::Generic,GEBPPacketFull>
+{
+ EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const
+ {
+ // This volatile inline ASM both acts as a barrier to prevent reordering,
+ // as well as enforces strict register use.
+ asm volatile(
+ "vmla.f32 %q[r], %q[c], %q[alpha]"
+ : [r] "+w" (r)
+ : [c] "w" (c),
+ [alpha] "w" (alpha)
+ : );
+ }
+
+ template <typename LaneIdType>
+ EIGEN_STRONG_INLINE void madd(const Packet4f& a, const Packet4f& b,
+ Packet4f& c, Packet4f& tmp,
+ const LaneIdType&) const {
+ acc(a, b, c);
+ }
+
+ template <typename LaneIdType>
+ EIGEN_STRONG_INLINE void madd(const Packet4f& a, const QuadPacket<Packet4f>& b,
+ Packet4f& c, Packet4f& tmp,
+ const LaneIdType& lane) const {
+ madd(a, b.get(lane), c, tmp, lane);
+ }
+};
+
+#endif // EIGEN_ARCH_ARM && EIGEN_COMP_CLANG
+
+#if EIGEN_ARCH_ARM64
+
+template<>
+struct gebp_traits <float,float,false,false,Architecture::NEON,GEBPPacketFull>
+ : gebp_traits<float,float,false,false,Architecture::Generic,GEBPPacketFull>
+{
+ typedef float RhsPacket;
+ typedef float32x4_t RhsPacketx4;
+
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
+ {
+ dest = *b;
+ }
+
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketx4& dest) const
+ {
+ dest = vld1q_f32(b);
+ }
+
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar* b, RhsPacket& dest) const
+ {
+ dest = *b;
+ }
+
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar*, RhsPacketx4&) const
+ {}
+
+ EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const
+ {
+ loadRhs(b,dest);
+ }
+
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<0>&) const
+ {
+ c = vfmaq_n_f32(c, a, b);
+ }
+
+ // NOTE: Template parameter inference failed when compiled with Android NDK:
+ // "candidate template ignored: could not match 'FixedInt<N>' against 'Eigen::internal::FixedInt<0>".
+
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<0>&) const
+ { madd_helper<0>(a, b, c); }
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<1>&) const
+ { madd_helper<1>(a, b, c); }
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<2>&) const
+ { madd_helper<2>(a, b, c); }
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<3>&) const
+ { madd_helper<3>(a, b, c); }
+
+ private:
+ template<int LaneID>
+ EIGEN_STRONG_INLINE void madd_helper(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c) const
+ {
+ #if EIGEN_COMP_GNUC_STRICT && !(EIGEN_GNUC_AT_LEAST(9,0))
+ // workaround gcc issue https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89101
+ // vfmaq_laneq_f32 is implemented through a costly dup
+ if(LaneID==0) asm("fmla %0.4s, %1.4s, %2.s[0]\n" : "+w" (c) : "w" (a), "w" (b) : );
+ else if(LaneID==1) asm("fmla %0.4s, %1.4s, %2.s[1]\n" : "+w" (c) : "w" (a), "w" (b) : );
+ else if(LaneID==2) asm("fmla %0.4s, %1.4s, %2.s[2]\n" : "+w" (c) : "w" (a), "w" (b) : );
+ else if(LaneID==3) asm("fmla %0.4s, %1.4s, %2.s[3]\n" : "+w" (c) : "w" (a), "w" (b) : );
+ #else
+ c = vfmaq_laneq_f32(c, a, b, LaneID);
+ #endif
+ }
+};
+
+
+template<>
+struct gebp_traits <double,double,false,false,Architecture::NEON>
+ : gebp_traits<double,double,false,false,Architecture::Generic>
+{
+ typedef double RhsPacket;
+
+ struct RhsPacketx4 {
+ float64x2_t B_0, B_1;
+ };
+
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
+ {
+ dest = *b;
+ }
+
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketx4& dest) const
+ {
+ dest.B_0 = vld1q_f64(b);
+ dest.B_1 = vld1q_f64(b+2);
+ }
+
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar* b, RhsPacket& dest) const
+ {
+ loadRhs(b,dest);
+ }
+
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar*, RhsPacketx4&) const
+ {}
+
+ EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const
+ {
+ loadRhs(b,dest);
+ }
+
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<0>&) const
+ {
+ c = vfmaq_n_f64(c, a, b);
+ }
+
+ // NOTE: Template parameter inference failed when compiled with Android NDK:
+ // "candidate template ignored: could not match 'FixedInt<N>' against 'Eigen::internal::FixedInt<0>".
+
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<0>&) const
+ { madd_helper<0>(a, b, c); }
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<1>&) const
+ { madd_helper<1>(a, b, c); }
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<2>&) const
+ { madd_helper<2>(a, b, c); }
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<3>&) const
+ { madd_helper<3>(a, b, c); }
+
+ private:
+ template <int LaneID>
+ EIGEN_STRONG_INLINE void madd_helper(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c) const
+ {
+ #if EIGEN_COMP_GNUC_STRICT && !(EIGEN_GNUC_AT_LEAST(9,0))
+ // workaround gcc issue https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89101
+ // vfmaq_laneq_f64 is implemented through a costly dup
+ if(LaneID==0) asm("fmla %0.2d, %1.2d, %2.d[0]\n" : "+w" (c) : "w" (a), "w" (b.B_0) : );
+ else if(LaneID==1) asm("fmla %0.2d, %1.2d, %2.d[1]\n" : "+w" (c) : "w" (a), "w" (b.B_0) : );
+ else if(LaneID==2) asm("fmla %0.2d, %1.2d, %2.d[0]\n" : "+w" (c) : "w" (a), "w" (b.B_1) : );
+ else if(LaneID==3) asm("fmla %0.2d, %1.2d, %2.d[1]\n" : "+w" (c) : "w" (a), "w" (b.B_1) : );
+ #else
+ if(LaneID==0) c = vfmaq_laneq_f64(c, a, b.B_0, 0);
+ else if(LaneID==1) c = vfmaq_laneq_f64(c, a, b.B_0, 1);
+ else if(LaneID==2) c = vfmaq_laneq_f64(c, a, b.B_1, 0);
+ else if(LaneID==3) c = vfmaq_laneq_f64(c, a, b.B_1, 1);
+ #endif
+ }
+};
+
+#endif // EIGEN_ARCH_ARM64
+
+} // namespace internal
+} // namespace Eigen
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/NEON/MathFunctions.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/NEON/MathFunctions.h
index 6bb05bb92..fa6615a85 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/arch/NEON/MathFunctions.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/NEON/MathFunctions.h
@@ -5,10 +5,6 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-/* The sin, cos, exp, and log functions of this file come from
- * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/
- */
-
#ifndef EIGEN_MATH_FUNCTIONS_NEON_H
#define EIGEN_MATH_FUNCTIONS_NEON_H
@@ -16,74 +12,62 @@ namespace Eigen {
namespace internal {
-template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
-Packet4f pexp<Packet4f>(const Packet4f& _x)
-{
- Packet4f x = _x;
- Packet4f tmp, fx;
-
- _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
- _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
- _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f);
- _EIGEN_DECLARE_CONST_Packet4f(exp_hi, 88.3762626647950f);
- _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -88.3762626647949f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500E-4f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507E-3f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073E-3f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894E-2f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f);
-
- x = vminq_f32(x, p4f_exp_hi);
- x = vmaxq_f32(x, p4f_exp_lo);
-
- /* express exp(x) as exp(g + n*log(2)) */
- fx = vmlaq_f32(p4f_half, x, p4f_cephes_LOG2EF);
-
- /* perform a floorf */
- tmp = vcvtq_f32_s32(vcvtq_s32_f32(fx));
-
- /* if greater, substract 1 */
- Packet4ui mask = vcgtq_f32(tmp, fx);
- mask = vandq_u32(mask, vreinterpretq_u32_f32(p4f_1));
-
- fx = vsubq_f32(tmp, vreinterpretq_f32_u32(mask));
-
- tmp = vmulq_f32(fx, p4f_cephes_exp_C1);
- Packet4f z = vmulq_f32(fx, p4f_cephes_exp_C2);
- x = vsubq_f32(x, tmp);
- x = vsubq_f32(x, z);
-
- Packet4f y = vmulq_f32(p4f_cephes_exp_p0, x);
- z = vmulq_f32(x, x);
- y = vaddq_f32(y, p4f_cephes_exp_p1);
- y = vmulq_f32(y, x);
- y = vaddq_f32(y, p4f_cephes_exp_p2);
- y = vmulq_f32(y, x);
- y = vaddq_f32(y, p4f_cephes_exp_p3);
- y = vmulq_f32(y, x);
- y = vaddq_f32(y, p4f_cephes_exp_p4);
- y = vmulq_f32(y, x);
- y = vaddq_f32(y, p4f_cephes_exp_p5);
-
- y = vmulq_f32(y, z);
- y = vaddq_f32(y, x);
- y = vaddq_f32(y, p4f_1);
-
- /* build 2^n */
- int32x4_t mm;
- mm = vcvtq_s32_f32(fx);
- mm = vaddq_s32(mm, p4i_0x7f);
- mm = vshlq_n_s32(mm, 23);
- Packet4f pow2n = vreinterpretq_f32_s32(mm);
-
- y = vmulq_f32(y, pow2n);
- return y;
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2f pexp<Packet2f>(const Packet2f& x)
+{ return pexp_float(x); }
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f pexp<Packet4f>(const Packet4f& x)
+{ return pexp_float(x); }
+
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2f plog<Packet2f>(const Packet2f& x)
+{ return plog_float(x); }
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f plog<Packet4f>(const Packet4f& x)
+{ return plog_float(x); }
+
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2f psin<Packet2f>(const Packet2f& x)
+{ return psin_float(x); }
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f psin<Packet4f>(const Packet4f& x)
+{ return psin_float(x); }
+
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2f pcos<Packet2f>(const Packet2f& x)
+{ return pcos_float(x); }
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f pcos<Packet4f>(const Packet4f& x)
+{ return pcos_float(x); }
+
+// Hyperbolic Tangent function.
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2f ptanh<Packet2f>(const Packet2f& x)
+{ return internal::generic_fast_tanh_float(x); }
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f ptanh<Packet4f>(const Packet4f& x)
+{ return internal::generic_fast_tanh_float(x); }
+
+BF16_PACKET_FUNCTION(Packet4f, Packet4bf, psin)
+BF16_PACKET_FUNCTION(Packet4f, Packet4bf, pcos)
+BF16_PACKET_FUNCTION(Packet4f, Packet4bf, plog)
+BF16_PACKET_FUNCTION(Packet4f, Packet4bf, pexp)
+BF16_PACKET_FUNCTION(Packet4f, Packet4bf, ptanh)
+
+template <>
+EIGEN_STRONG_INLINE Packet4bf pfrexp(const Packet4bf& a, Packet4bf& exponent) {
+ Packet4f fexponent;
+ const Packet4bf out = F32ToBf16(pfrexp<Packet4f>(Bf16ToF32(a), fexponent));
+ exponent = F32ToBf16(fexponent);
+ return out;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4bf pldexp(const Packet4bf& a, const Packet4bf& exponent) {
+ return F32ToBf16(pldexp<Packet4f>(Bf16ToF32(a), Bf16ToF32(exponent)));
}
+//---------- double ----------
+
+#if EIGEN_ARCH_ARM64 && !EIGEN_APPLE_DOUBLE_NEON_BUG
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2d pexp<Packet2d>(const Packet2d& x)
+{ return pexp_double(x); }
+
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2d plog<Packet2d>(const Packet2d& x)
+{ return plog_double(x); }
+
+#endif
+
} // end namespace internal
} // end namespace Eigen
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/NEON/PacketMath.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/NEON/PacketMath.h
index 836fbc0dd..6996cc8d3 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/arch/NEON/PacketMath.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/NEON/PacketMath.h
@@ -24,23 +24,118 @@ namespace internal {
#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
#endif
-#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD
-#define EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD
-#endif
-
#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
#if EIGEN_ARCH_ARM64
#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32
#else
-#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16
+#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16
#endif
#endif
-typedef float32x2_t Packet2f;
-typedef float32x4_t Packet4f;
-typedef int32x4_t Packet4i;
-typedef int32x2_t Packet2i;
-typedef uint32x4_t Packet4ui;
+#if EIGEN_COMP_MSVC_STRICT
+
+// In MSVC's arm_neon.h header file, all NEON vector types
+// are aliases to the same underlying type __n128.
+// We thus have to wrap them to make them different C++ types.
+// (See also bug 1428)
+typedef eigen_packet_wrapper<float32x2_t,0> Packet2f;
+typedef eigen_packet_wrapper<float32x4_t,1> Packet4f;
+typedef eigen_packet_wrapper<int32_t ,2> Packet4c;
+typedef eigen_packet_wrapper<int8x8_t ,3> Packet8c;
+typedef eigen_packet_wrapper<int8x16_t ,4> Packet16c;
+typedef eigen_packet_wrapper<uint32_t ,5> Packet4uc;
+typedef eigen_packet_wrapper<uint8x8_t ,6> Packet8uc;
+typedef eigen_packet_wrapper<uint8x16_t ,7> Packet16uc;
+typedef eigen_packet_wrapper<int16x4_t ,8> Packet4s;
+typedef eigen_packet_wrapper<int16x8_t ,9> Packet8s;
+typedef eigen_packet_wrapper<uint16x4_t ,10> Packet4us;
+typedef eigen_packet_wrapper<uint16x8_t ,11> Packet8us;
+typedef eigen_packet_wrapper<int32x2_t ,12> Packet2i;
+typedef eigen_packet_wrapper<int32x4_t ,13> Packet4i;
+typedef eigen_packet_wrapper<uint32x2_t ,14> Packet2ui;
+typedef eigen_packet_wrapper<uint32x4_t ,15> Packet4ui;
+typedef eigen_packet_wrapper<int64x2_t ,16> Packet2l;
+typedef eigen_packet_wrapper<uint64x2_t ,17> Packet2ul;
+
+#else
+
+typedef float32x2_t Packet2f;
+typedef float32x4_t Packet4f;
+typedef eigen_packet_wrapper<int32_t ,2> Packet4c;
+typedef int8x8_t Packet8c;
+typedef int8x16_t Packet16c;
+typedef eigen_packet_wrapper<uint32_t ,5> Packet4uc;
+typedef uint8x8_t Packet8uc;
+typedef uint8x16_t Packet16uc;
+typedef int16x4_t Packet4s;
+typedef int16x8_t Packet8s;
+typedef uint16x4_t Packet4us;
+typedef uint16x8_t Packet8us;
+typedef int32x2_t Packet2i;
+typedef int32x4_t Packet4i;
+typedef uint32x2_t Packet2ui;
+typedef uint32x4_t Packet4ui;
+typedef int64x2_t Packet2l;
+typedef uint64x2_t Packet2ul;
+
+#endif // EIGEN_COMP_MSVC_STRICT
+
+EIGEN_STRONG_INLINE Packet4f shuffle1(const Packet4f& m, int mask){
+ const float* a = reinterpret_cast<const float*>(&m);
+ Packet4f res = {*(a + (mask & 3)), *(a + ((mask >> 2) & 3)), *(a + ((mask >> 4) & 3 )), *(a + ((mask >> 6) & 3))};
+ return res;
+}
+
+// fuctionally equivalent to _mm_shuffle_ps in SSE when interleave
+// == false (i.e. shuffle<false>(m, n, mask) equals _mm_shuffle_ps(m, n, mask)),
+// interleave m and n when interleave == true. Currently used in LU/arch/InverseSize4.h
+// to enable a shared implementation for fast inversion of matrices of size 4.
+template<bool interleave>
+EIGEN_STRONG_INLINE Packet4f shuffle2(const Packet4f &m, const Packet4f &n, int mask)
+{
+ const float* a = reinterpret_cast<const float*>(&m);
+ const float* b = reinterpret_cast<const float*>(&n);
+ Packet4f res = {*(a + (mask & 3)), *(a + ((mask >> 2) & 3)), *(b + ((mask >> 4) & 3)), *(b + ((mask >> 6) & 3))};
+ return res;
+}
+
+template<>
+EIGEN_STRONG_INLINE Packet4f shuffle2<true>(const Packet4f &m, const Packet4f &n, int mask)
+{
+ const float* a = reinterpret_cast<const float*>(&m);
+ const float* b = reinterpret_cast<const float*>(&n);
+ Packet4f res = {*(a + (mask & 3)), *(b + ((mask >> 2) & 3)), *(a + ((mask >> 4) & 3)), *(b + ((mask >> 6) & 3))};
+ return res;
+}
+
+EIGEN_STRONG_INLINE static int eigen_neon_shuffle_mask(int p, int q, int r, int s) {return ((s)<<6|(r)<<4|(q)<<2|(p));}
+
+EIGEN_STRONG_INLINE Packet4f vec4f_swizzle1(const Packet4f& a, int p, int q, int r, int s)
+{
+ return shuffle1(a, eigen_neon_shuffle_mask(p, q, r, s));
+}
+EIGEN_STRONG_INLINE Packet4f vec4f_swizzle2(const Packet4f& a, const Packet4f& b, int p, int q, int r, int s)
+{
+ return shuffle2<false>(a,b,eigen_neon_shuffle_mask(p, q, r, s));
+}
+EIGEN_STRONG_INLINE Packet4f vec4f_movelh(const Packet4f& a, const Packet4f& b)
+{
+ return shuffle2<false>(a,b,eigen_neon_shuffle_mask(0, 1, 0, 1));
+}
+EIGEN_STRONG_INLINE Packet4f vec4f_movehl(const Packet4f& a, const Packet4f& b)
+{
+ return shuffle2<false>(b,a,eigen_neon_shuffle_mask(2, 3, 2, 3));
+}
+EIGEN_STRONG_INLINE Packet4f vec4f_unpacklo(const Packet4f& a, const Packet4f& b)
+{
+ return shuffle2<true>(a,b,eigen_neon_shuffle_mask(0, 0, 1, 1));
+}
+EIGEN_STRONG_INLINE Packet4f vec4f_unpackhi(const Packet4f& a, const Packet4f& b)
+{
+ return shuffle2<true>(a,b,eigen_neon_shuffle_mask(2, 2, 3, 3));
+}
+#define vec4f_duplane(a, p) \
+ vdupq_lane_f32(vget_low_f32(a), p)
#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
const Packet4f p4f_##NAME = pset1<Packet4f>(X)
@@ -60,88 +155,823 @@ typedef uint32x4_t Packet4ui;
#define EIGEN_ARM_PREFETCH(ADDR) __builtin_prefetch(ADDR);
#elif defined __pld
#define EIGEN_ARM_PREFETCH(ADDR) __pld(ADDR)
-#elif EIGEN_ARCH_ARM32
+#elif EIGEN_ARCH_ARM
#define EIGEN_ARM_PREFETCH(ADDR) __asm__ __volatile__ ("pld [%[addr]]\n" :: [addr] "r" (ADDR) : );
#else
// by default no explicit prefetching
#define EIGEN_ARM_PREFETCH(ADDR)
#endif
-template<> struct packet_traits<float> : default_packet_traits
+template <>
+struct packet_traits<float> : default_packet_traits
{
typedef Packet4f type;
- typedef Packet4f half; // Packet2f intrinsics not implemented yet
- enum {
+ typedef Packet2f half;
+ enum
+ {
Vectorizable = 1,
AlignedOnScalar = 1,
size = 4,
- HasHalfPacket=0, // Packet2f intrinsics not implemented yet
-
- HasDiv = 1,
- // FIXME check the Has*
- HasSin = 0,
- HasCos = 0,
- HasLog = 0,
+ HasHalfPacket = 1,
+
+ HasAdd = 1,
+ HasSub = 1,
+ HasShift = 1,
+ HasMul = 1,
+ HasNegate = 1,
+ HasAbs = 1,
+ HasArg = 0,
+ HasAbs2 = 1,
+ HasAbsDiff = 1,
+ HasMin = 1,
+ HasMax = 1,
+ HasConj = 1,
+ HasSetLinear = 0,
+ HasBlend = 0,
+
+ HasDiv = 1,
+ HasFloor = 1,
+ HasCeil = 1,
+ HasRint = 1,
+
+ HasSin = EIGEN_FAST_MATH,
+ HasCos = EIGEN_FAST_MATH,
+ HasLog = 1,
HasExp = 1,
- HasSqrt = 0
+ HasSqrt = 1,
+ HasRsqrt = 1,
+ HasTanh = EIGEN_FAST_MATH,
+ HasErf = EIGEN_FAST_MATH,
+ HasBessel = 0, // Issues with accuracy.
+ HasNdtri = 0
};
};
-template<> struct packet_traits<int32_t> : default_packet_traits
+
+template <>
+struct packet_traits<int8_t> : default_packet_traits
+{
+ typedef Packet16c type;
+ typedef Packet8c half;
+ enum
+ {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 16,
+ HasHalfPacket = 1,
+
+ HasAdd = 1,
+ HasSub = 1,
+ HasShift = 1,
+ HasMul = 1,
+ HasNegate = 1,
+ HasAbs = 1,
+ HasAbsDiff = 1,
+ HasArg = 0,
+ HasAbs2 = 1,
+ HasMin = 1,
+ HasMax = 1,
+ HasConj = 1,
+ HasSetLinear = 0,
+ HasBlend = 0
+ };
+};
+
+template <>
+struct packet_traits<uint8_t> : default_packet_traits
+{
+ typedef Packet16uc type;
+ typedef Packet8uc half;
+ enum
+ {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 16,
+ HasHalfPacket = 1,
+
+ HasAdd = 1,
+ HasSub = 1,
+ HasShift = 1,
+ HasMul = 1,
+ HasNegate = 0,
+ HasAbs = 1,
+ HasAbsDiff = 1,
+ HasArg = 0,
+ HasAbs2 = 1,
+ HasMin = 1,
+ HasMax = 1,
+ HasConj = 1,
+ HasSetLinear = 0,
+ HasBlend = 0,
+
+ HasSqrt = 1
+ };
+};
+
+template <>
+struct packet_traits<int16_t> : default_packet_traits
+{
+ typedef Packet8s type;
+ typedef Packet4s half;
+ enum
+ {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 8,
+ HasHalfPacket = 1,
+
+ HasAdd = 1,
+ HasSub = 1,
+ HasShift = 1,
+ HasMul = 1,
+ HasNegate = 1,
+ HasAbs = 1,
+ HasAbsDiff = 1,
+ HasArg = 0,
+ HasAbs2 = 1,
+ HasMin = 1,
+ HasMax = 1,
+ HasConj = 1,
+ HasSetLinear = 0,
+ HasBlend = 0
+ };
+};
+
+template <>
+struct packet_traits<uint16_t> : default_packet_traits
+{
+ typedef Packet8us type;
+ typedef Packet4us half;
+ enum
+ {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 8,
+ HasHalfPacket = 1,
+
+ HasAdd = 1,
+ HasSub = 1,
+ HasShift = 1,
+ HasMul = 1,
+ HasNegate = 0,
+ HasAbs = 0,
+ HasAbsDiff = 1,
+ HasArg = 0,
+ HasAbs2 = 1,
+ HasMin = 1,
+ HasMax = 1,
+ HasConj = 1,
+ HasSetLinear = 0,
+ HasBlend = 0,
+ HasSqrt = 1
+ };
+};
+
+template <>
+struct packet_traits<int32_t> : default_packet_traits
{
typedef Packet4i type;
- typedef Packet4i half; // Packet2i intrinsics not implemented yet
- enum {
+ typedef Packet2i half;
+ enum
+ {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 4,
+ HasHalfPacket = 1,
+
+ HasAdd = 1,
+ HasSub = 1,
+ HasShift = 1,
+ HasMul = 1,
+ HasNegate = 1,
+ HasAbs = 1,
+ HasArg = 0,
+ HasAbs2 = 1,
+ HasAbsDiff = 1,
+ HasMin = 1,
+ HasMax = 1,
+ HasConj = 1,
+ HasSetLinear = 0,
+ HasBlend = 0
+ };
+};
+
+template <>
+struct packet_traits<uint32_t> : default_packet_traits
+{
+ typedef Packet4ui type;
+ typedef Packet2ui half;
+ enum
+ {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 4,
+ HasHalfPacket = 1,
+
+ HasAdd = 1,
+ HasSub = 1,
+ HasShift = 1,
+ HasMul = 1,
+ HasNegate = 0,
+ HasAbs = 0,
+ HasArg = 0,
+ HasAbs2 = 1,
+ HasAbsDiff = 1,
+ HasMin = 1,
+ HasMax = 1,
+ HasConj = 1,
+ HasSetLinear = 0,
+ HasBlend = 0,
+
+ HasSqrt = 1
+ };
+};
+
+template <>
+struct packet_traits<int64_t> : default_packet_traits
+{
+ typedef Packet2l type;
+ typedef Packet2l half;
+ enum
+ {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 2,
+ HasHalfPacket = 0,
+
+ HasCmp = 1,
+ HasAdd = 1,
+ HasSub = 1,
+ HasShift = 1,
+ HasMul = 1,
+ HasNegate = 1,
+ HasAbs = 1,
+ HasArg = 0,
+ HasAbs2 = 1,
+ HasAbsDiff = 1,
+ HasMin = 1,
+ HasMax = 1,
+ HasConj = 1,
+ HasSetLinear = 0,
+ HasBlend = 0
+ };
+};
+
+template <>
+struct packet_traits<uint64_t> : default_packet_traits
+{
+ typedef Packet2ul type;
+ typedef Packet2ul half;
+ enum
+ {
Vectorizable = 1,
AlignedOnScalar = 1,
- size=4,
- HasHalfPacket=0 // Packet2i intrinsics not implemented yet
- // FIXME check the Has*
+ size = 2,
+ HasHalfPacket = 0,
+
+ HasCmp = 1,
+ HasAdd = 1,
+ HasSub = 1,
+ HasShift = 1,
+ HasMul = 1,
+ HasNegate = 0,
+ HasAbs = 0,
+ HasArg = 0,
+ HasAbs2 = 1,
+ HasAbsDiff = 1,
+ HasMin = 1,
+ HasMax = 1,
+ HasConj = 1,
+ HasSetLinear = 0,
+ HasBlend = 0
};
};
-#if EIGEN_GNUC_AT_MOST(4,4) && !EIGEN_COMP_LLVM
-// workaround gcc 4.2, 4.3 and 4.4 compilatin issue
+#if EIGEN_GNUC_AT_MOST(4, 4) && !EIGEN_COMP_LLVM
+// workaround gcc 4.2, 4.3 and 4.4 compilation issue
EIGEN_STRONG_INLINE float32x4_t vld1q_f32(const float* x) { return ::vld1q_f32((const float32_t*)x); }
-EIGEN_STRONG_INLINE float32x2_t vld1_f32 (const float* x) { return ::vld1_f32 ((const float32_t*)x); }
-EIGEN_STRONG_INLINE float32x2_t vld1_dup_f32 (const float* x) { return ::vld1_dup_f32 ((const float32_t*)x); }
-EIGEN_STRONG_INLINE void vst1q_f32(float* to, float32x4_t from) { ::vst1q_f32((float32_t*)to,from); }
-EIGEN_STRONG_INLINE void vst1_f32 (float* to, float32x2_t from) { ::vst1_f32 ((float32_t*)to,from); }
+EIGEN_STRONG_INLINE float32x2_t vld1_f32(const float* x) { return ::vld1_f32 ((const float32_t*)x); }
+EIGEN_STRONG_INLINE float32x2_t vld1_dup_f32(const float* x) { return ::vld1_dup_f32 ((const float32_t*)x); }
+EIGEN_STRONG_INLINE void vst1q_f32(float* to, float32x4_t from) { ::vst1q_f32((float32_t*)to,from); }
+EIGEN_STRONG_INLINE void vst1_f32 (float* to, float32x2_t from) { ::vst1_f32 ((float32_t*)to,from); }
#endif
-template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4, alignment=Aligned16}; typedef Packet4f half; };
-template<> struct unpacket_traits<Packet4i> { typedef int32_t type; enum {size=4, alignment=Aligned16}; typedef Packet4i half; };
+template<> struct unpacket_traits<Packet2f>
+{
+ typedef float type;
+ typedef Packet2f half;
+ typedef Packet2i integer_packet;
+ enum
+ {
+ size = 2,
+ alignment = Aligned16,
+ vectorizable = true,
+ masked_load_available = false,
+ masked_store_available = false
+ };
+};
+template<> struct unpacket_traits<Packet4f>
+{
+ typedef float type;
+ typedef Packet2f half;
+ typedef Packet4i integer_packet;
+ enum
+ {
+ size = 4,
+ alignment = Aligned16,
+ vectorizable = true,
+ masked_load_available = false,
+ masked_store_available = false
+ };
+};
+template<> struct unpacket_traits<Packet4c>
+{
+ typedef int8_t type;
+ typedef Packet4c half;
+ enum
+ {
+ size = 4,
+ alignment = Unaligned,
+ vectorizable = true,
+ masked_load_available = false,
+ masked_store_available = false
+ };
+};
+template<> struct unpacket_traits<Packet8c>
+{
+ typedef int8_t type;
+ typedef Packet4c half;
+ enum
+ {
+ size = 8,
+ alignment = Aligned16,
+ vectorizable = true,
+ masked_load_available = false,
+ masked_store_available = false
+ };
+};
+template<> struct unpacket_traits<Packet16c>
+{
+ typedef int8_t type;
+ typedef Packet8c half;
+ enum
+ {
+ size = 16,
+ alignment = Aligned16,
+ vectorizable = true,
+ masked_load_available = false,
+ masked_store_available = false
+ };
+};
+template<> struct unpacket_traits<Packet4uc>
+{
+ typedef uint8_t type;
+ typedef Packet4uc half;
+ enum
+ {
+ size = 4,
+ alignment = Unaligned,
+ vectorizable = true,
+ masked_load_available = false,
+ masked_store_available = false
+ };
+};
+template<> struct unpacket_traits<Packet8uc>
+{
+ typedef uint8_t type;
+ typedef Packet4uc half;
+ enum
+ {
+ size = 8,
+ alignment = Aligned16,
+ vectorizable = true,
+ masked_load_available = false,
+ masked_store_available = false
+ };
+};
+template<> struct unpacket_traits<Packet16uc>
+{
+ typedef uint8_t type;
+ typedef Packet8uc half;
+ enum
+ {
+ size = 16,
+ alignment = Aligned16,
+ vectorizable = true,
+ masked_load_available = false,
+ masked_store_available = false};
+};
+template<> struct unpacket_traits<Packet4s>
+{
+ typedef int16_t type;
+ typedef Packet4s half;
+ enum
+ {
+ size = 4,
+ alignment = Aligned16,
+ vectorizable = true,
+ masked_load_available = false,
+ masked_store_available = false
+ };
+};
+template<> struct unpacket_traits<Packet8s>
+{
+ typedef int16_t type;
+ typedef Packet4s half;
+ enum
+ {
+ size = 8,
+ alignment = Aligned16,
+ vectorizable = true,
+ masked_load_available = false,
+ masked_store_available = false
+ };
+};
+template<> struct unpacket_traits<Packet4us>
+{
+ typedef uint16_t type;
+ typedef Packet4us half;
+ enum
+ {
+ size = 4,
+ alignment = Aligned16,
+ vectorizable = true,
+ masked_load_available = false,
+ masked_store_available = false
+ };
+};
+template<> struct unpacket_traits<Packet8us>
+{
+ typedef uint16_t type;
+ typedef Packet4us half;
+ enum
+ {
+ size = 8,
+ alignment = Aligned16,
+ vectorizable = true,
+ masked_load_available = false,
+ masked_store_available = false
+ };
+};
+template<> struct unpacket_traits<Packet2i>
+{
+ typedef int32_t type;
+ typedef Packet2i half;
+ enum
+ {
+ size = 2,
+ alignment = Aligned16,
+ vectorizable = true,
+ masked_load_available = false,
+ masked_store_available = false
+ };
+};
+template<> struct unpacket_traits<Packet4i>
+{
+ typedef int32_t type;
+ typedef Packet2i half;
+ enum
+ {
+ size = 4,
+ alignment = Aligned16,
+ vectorizable = true,
+ masked_load_available = false,
+ masked_store_available = false
+ };
+};
+template<> struct unpacket_traits<Packet2ui>
+{
+ typedef uint32_t type;
+ typedef Packet2ui half;
+ enum
+ {
+ size = 2,
+ alignment = Aligned16,
+ vectorizable = true,
+ masked_load_available = false,
+ masked_store_available = false
+ };
+};
+template<> struct unpacket_traits<Packet4ui>
+{
+ typedef uint32_t type;
+ typedef Packet2ui half;
+ enum
+ {
+ size = 4,
+ alignment = Aligned16,
+ vectorizable = true,
+ masked_load_available = false,
+ masked_store_available = false
+ };
+};
+template<> struct unpacket_traits<Packet2l>
+{
+ typedef int64_t type;
+ typedef Packet2l half;
+ enum
+ {
+ size = 2,
+ alignment = Aligned16,
+ vectorizable = true,
+ masked_load_available = false,
+ masked_store_available = false
+ };
+};
+template<> struct unpacket_traits<Packet2ul>
+{
+ typedef uint64_t type;
+ typedef Packet2ul half;
+ enum
+ {
+ size = 2,
+ alignment = Aligned16,
+ vectorizable = true,
+ masked_load_available = false,
+ masked_store_available = false
+ };
+};
+
+template<> EIGEN_STRONG_INLINE Packet2f pset1<Packet2f>(const float& from) { return vdup_n_f32(from); }
+template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return vdupq_n_f32(from); }
+template<> EIGEN_STRONG_INLINE Packet4c pset1<Packet4c>(const int8_t& from)
+{ return vget_lane_s32(vreinterpret_s32_s8(vdup_n_s8(from)), 0); }
+template<> EIGEN_STRONG_INLINE Packet8c pset1<Packet8c>(const int8_t& from) { return vdup_n_s8(from); }
+template<> EIGEN_STRONG_INLINE Packet16c pset1<Packet16c>(const int8_t& from) { return vdupq_n_s8(from); }
+template<> EIGEN_STRONG_INLINE Packet4uc pset1<Packet4uc>(const uint8_t& from)
+{ return vget_lane_u32(vreinterpret_u32_u8(vdup_n_u8(from)), 0); }
+template<> EIGEN_STRONG_INLINE Packet8uc pset1<Packet8uc>(const uint8_t& from) { return vdup_n_u8(from); }
+template<> EIGEN_STRONG_INLINE Packet16uc pset1<Packet16uc>(const uint8_t& from) { return vdupq_n_u8(from); }
+template<> EIGEN_STRONG_INLINE Packet4s pset1<Packet4s>(const int16_t& from) { return vdup_n_s16(from); }
+template<> EIGEN_STRONG_INLINE Packet8s pset1<Packet8s>(const int16_t& from) { return vdupq_n_s16(from); }
+template<> EIGEN_STRONG_INLINE Packet4us pset1<Packet4us>(const uint16_t& from) { return vdup_n_u16(from); }
+template<> EIGEN_STRONG_INLINE Packet8us pset1<Packet8us>(const uint16_t& from) { return vdupq_n_u16(from); }
+template<> EIGEN_STRONG_INLINE Packet2i pset1<Packet2i>(const int32_t& from) { return vdup_n_s32(from); }
+template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int32_t& from) { return vdupq_n_s32(from); }
+template<> EIGEN_STRONG_INLINE Packet2ui pset1<Packet2ui>(const uint32_t& from) { return vdup_n_u32(from); }
+template<> EIGEN_STRONG_INLINE Packet4ui pset1<Packet4ui>(const uint32_t& from) { return vdupq_n_u32(from); }
+template<> EIGEN_STRONG_INLINE Packet2l pset1<Packet2l>(const int64_t& from) { return vdupq_n_s64(from); }
+template<> EIGEN_STRONG_INLINE Packet2ul pset1<Packet2ul>(const uint64_t& from) { return vdupq_n_u64(from); }
-template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return vdupq_n_f32(from); }
-template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int32_t& from) { return vdupq_n_s32(from); }
+template<> EIGEN_STRONG_INLINE Packet2f pset1frombits<Packet2f>(unsigned int from)
+{ return vreinterpret_f32_u32(vdup_n_u32(from)); }
+template<> EIGEN_STRONG_INLINE Packet4f pset1frombits<Packet4f>(unsigned int from)
+{ return vreinterpretq_f32_u32(vdupq_n_u32(from)); }
+template<> EIGEN_STRONG_INLINE Packet2f plset<Packet2f>(const float& a)
+{
+ const float c[] = {0.0f,1.0f};
+ return vadd_f32(pset1<Packet2f>(a), vld1_f32(c));
+}
template<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(const float& a)
{
- const float f[] = {0, 1, 2, 3};
- Packet4f countdown = vld1q_f32(f);
- return vaddq_f32(pset1<Packet4f>(a), countdown);
+ const float c[] = {0.0f,1.0f,2.0f,3.0f};
+ return vaddq_f32(pset1<Packet4f>(a), vld1q_f32(c));
+}
+template<> EIGEN_STRONG_INLINE Packet4c plset<Packet4c>(const int8_t& a)
+{ return vget_lane_s32(vreinterpret_s32_s8(vadd_s8(vreinterpret_s8_u32(vdup_n_u32(0x03020100)), vdup_n_s8(a))), 0); }
+template<> EIGEN_STRONG_INLINE Packet8c plset<Packet8c>(const int8_t& a)
+{
+ const int8_t c[] = {0,1,2,3,4,5,6,7};
+ return vadd_s8(pset1<Packet8c>(a), vld1_s8(c));
+}
+template<> EIGEN_STRONG_INLINE Packet16c plset<Packet16c>(const int8_t& a)
+{
+ const int8_t c[] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
+ return vaddq_s8(pset1<Packet16c>(a), vld1q_s8(c));
+}
+template<> EIGEN_STRONG_INLINE Packet4uc plset<Packet4uc>(const uint8_t& a)
+{ return vget_lane_u32(vreinterpret_u32_u8(vadd_u8(vreinterpret_u8_u32(vdup_n_u32(0x03020100)), vdup_n_u8(a))), 0); }
+template<> EIGEN_STRONG_INLINE Packet8uc plset<Packet8uc>(const uint8_t& a)
+{
+ const uint8_t c[] = {0,1,2,3,4,5,6,7};
+ return vadd_u8(pset1<Packet8uc>(a), vld1_u8(c));
+}
+template<> EIGEN_STRONG_INLINE Packet16uc plset<Packet16uc>(const uint8_t& a)
+{
+ const uint8_t c[] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
+ return vaddq_u8(pset1<Packet16uc>(a), vld1q_u8(c));
+}
+template<> EIGEN_STRONG_INLINE Packet4s plset<Packet4s>(const int16_t& a)
+{
+ const int16_t c[] = {0,1,2,3};
+ return vadd_s16(pset1<Packet4s>(a), vld1_s16(c));
+}
+template<> EIGEN_STRONG_INLINE Packet4us plset<Packet4us>(const uint16_t& a)
+{
+ const uint16_t c[] = {0,1,2,3};
+ return vadd_u16(pset1<Packet4us>(a), vld1_u16(c));
+}
+template<> EIGEN_STRONG_INLINE Packet8s plset<Packet8s>(const int16_t& a)
+{
+ const int16_t c[] = {0,1,2,3,4,5,6,7};
+ return vaddq_s16(pset1<Packet8s>(a), vld1q_s16(c));
+}
+template<> EIGEN_STRONG_INLINE Packet8us plset<Packet8us>(const uint16_t& a)
+{
+ const uint16_t c[] = {0,1,2,3,4,5,6,7};
+ return vaddq_u16(pset1<Packet8us>(a), vld1q_u16(c));
+}
+template<> EIGEN_STRONG_INLINE Packet2i plset<Packet2i>(const int32_t& a)
+{
+ const int32_t c[] = {0,1};
+ return vadd_s32(pset1<Packet2i>(a), vld1_s32(c));
}
template<> EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(const int32_t& a)
{
- const int32_t i[] = {0, 1, 2, 3};
- Packet4i countdown = vld1q_s32(i);
- return vaddq_s32(pset1<Packet4i>(a), countdown);
+ const int32_t c[] = {0,1,2,3};
+ return vaddq_s32(pset1<Packet4i>(a), vld1q_s32(c));
+}
+template<> EIGEN_STRONG_INLINE Packet2ui plset<Packet2ui>(const uint32_t& a)
+{
+ const uint32_t c[] = {0,1};
+ return vadd_u32(pset1<Packet2ui>(a), vld1_u32(c));
+}
+template<> EIGEN_STRONG_INLINE Packet4ui plset<Packet4ui>(const uint32_t& a)
+{
+ const uint32_t c[] = {0,1,2,3};
+ return vaddq_u32(pset1<Packet4ui>(a), vld1q_u32(c));
+}
+template<> EIGEN_STRONG_INLINE Packet2l plset<Packet2l>(const int64_t& a)
+{
+ const int64_t c[] = {0,1};
+ return vaddq_s64(pset1<Packet2l>(a), vld1q_s64(c));
+}
+template<> EIGEN_STRONG_INLINE Packet2ul plset<Packet2ul>(const uint64_t& a)
+{
+ const uint64_t c[] = {0,1};
+ return vaddq_u64(pset1<Packet2ul>(a), vld1q_u64(c));
}
+template<> EIGEN_STRONG_INLINE Packet2f padd<Packet2f>(const Packet2f& a, const Packet2f& b) { return vadd_f32(a,b); }
template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return vaddq_f32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4c padd<Packet4c>(const Packet4c& a, const Packet4c& b)
+{
+ return vget_lane_s32(vreinterpret_s32_s8(vadd_s8(
+ vreinterpret_s8_s32(vdup_n_s32(a)),
+ vreinterpret_s8_s32(vdup_n_s32(b)))), 0);
+}
+template<> EIGEN_STRONG_INLINE Packet8c padd<Packet8c>(const Packet8c& a, const Packet8c& b) { return vadd_s8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16c padd<Packet16c>(const Packet16c& a, const Packet16c& b) { return vaddq_s8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4uc padd<Packet4uc>(const Packet4uc& a, const Packet4uc& b)
+{
+ return vget_lane_u32(vreinterpret_u32_u8(vadd_u8(
+ vreinterpret_u8_u32(vdup_n_u32(a)),
+ vreinterpret_u8_u32(vdup_n_u32(b)))), 0);
+}
+template<> EIGEN_STRONG_INLINE Packet8uc padd<Packet8uc>(const Packet8uc& a, const Packet8uc& b) { return vadd_u8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16uc padd<Packet16uc>(const Packet16uc& a, const Packet16uc& b) { return vaddq_u8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4s padd<Packet4s>(const Packet4s& a, const Packet4s& b) { return vadd_s16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8s padd<Packet8s>(const Packet8s& a, const Packet8s& b) { return vaddq_s16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4us padd<Packet4us>(const Packet4us& a, const Packet4us& b) { return vadd_u16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8us padd<Packet8us>(const Packet8us& a, const Packet8us& b) { return vaddq_u16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2i padd<Packet2i>(const Packet2i& a, const Packet2i& b) { return vadd_s32(a,b); }
template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return vaddq_s32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2ui padd<Packet2ui>(const Packet2ui& a, const Packet2ui& b) { return vadd_u32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4ui padd<Packet4ui>(const Packet4ui& a, const Packet4ui& b) { return vaddq_u32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2l padd<Packet2l>(const Packet2l& a, const Packet2l& b) { return vaddq_s64(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2ul padd<Packet2ul>(const Packet2ul& a, const Packet2ul& b) { return vaddq_u64(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2f psub<Packet2f>(const Packet2f& a, const Packet2f& b) { return vsub_f32(a,b); }
template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return vsubq_f32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4c psub<Packet4c>(const Packet4c& a, const Packet4c& b)
+{
+ return vget_lane_s32(vreinterpret_s32_s8(vsub_s8(
+ vreinterpret_s8_s32(vdup_n_s32(a)),
+ vreinterpret_s8_s32(vdup_n_s32(b)))), 0);
+}
+template<> EIGEN_STRONG_INLINE Packet8c psub<Packet8c>(const Packet8c& a, const Packet8c& b) { return vsub_s8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16c psub<Packet16c>(const Packet16c& a, const Packet16c& b) { return vsubq_s8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4uc psub<Packet4uc>(const Packet4uc& a, const Packet4uc& b)
+{
+ return vget_lane_u32(vreinterpret_u32_u8(vsub_u8(
+ vreinterpret_u8_u32(vdup_n_u32(a)),
+ vreinterpret_u8_u32(vdup_n_u32(b)))), 0);
+}
+template<> EIGEN_STRONG_INLINE Packet8uc psub<Packet8uc>(const Packet8uc& a, const Packet8uc& b) { return vsub_u8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16uc psub<Packet16uc>(const Packet16uc& a, const Packet16uc& b) { return vsubq_u8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4s psub<Packet4s>(const Packet4s& a, const Packet4s& b) { return vsub_s16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8s psub<Packet8s>(const Packet8s& a, const Packet8s& b) { return vsubq_s16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4us psub<Packet4us>(const Packet4us& a, const Packet4us& b) { return vsub_u16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8us psub<Packet8us>(const Packet8us& a, const Packet8us& b) { return vsubq_u16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2i psub<Packet2i>(const Packet2i& a, const Packet2i& b) { return vsub_s32(a,b); }
template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return vsubq_s32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2ui psub<Packet2ui>(const Packet2ui& a, const Packet2ui& b) { return vsub_u32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4ui psub<Packet4ui>(const Packet4ui& a, const Packet4ui& b) { return vsubq_u32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2l psub<Packet2l>(const Packet2l& a, const Packet2l& b) { return vsubq_s64(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2ul psub<Packet2ul>(const Packet2ul& a, const Packet2ul& b) { return vsubq_u64(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2f pxor<Packet2f>(const Packet2f& a, const Packet2f& b);
+template<> EIGEN_STRONG_INLINE Packet2f paddsub<Packet2f>(const Packet2f& a, const Packet2f & b) {
+ Packet2f mask = {numext::bit_cast<float>(0x80000000u), 0.0f};
+ return padd(a, pxor(mask, b));
+}
+template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b);
+template<> EIGEN_STRONG_INLINE Packet4f paddsub<Packet4f>(const Packet4f& a, const Packet4f& b) {
+ Packet4f mask = {numext::bit_cast<float>(0x80000000u), 0.0f, numext::bit_cast<float>(0x80000000u), 0.0f};
+ return padd(a, pxor(mask, b));
+}
+
+template<> EIGEN_STRONG_INLINE Packet2f pnegate(const Packet2f& a) { return vneg_f32(a); }
template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { return vnegq_f32(a); }
+template<> EIGEN_STRONG_INLINE Packet4c pnegate(const Packet4c& a)
+{ return vget_lane_s32(vreinterpret_s32_s8(vneg_s8(vreinterpret_s8_s32(vdup_n_s32(a)))), 0); }
+template<> EIGEN_STRONG_INLINE Packet8c pnegate(const Packet8c& a) { return vneg_s8(a); }
+template<> EIGEN_STRONG_INLINE Packet16c pnegate(const Packet16c& a) { return vnegq_s8(a); }
+template<> EIGEN_STRONG_INLINE Packet4s pnegate(const Packet4s& a) { return vneg_s16(a); }
+template<> EIGEN_STRONG_INLINE Packet8s pnegate(const Packet8s& a) { return vnegq_s16(a); }
+template<> EIGEN_STRONG_INLINE Packet2i pnegate(const Packet2i& a) { return vneg_s32(a); }
template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return vnegq_s32(a); }
+template<> EIGEN_STRONG_INLINE Packet2l pnegate(const Packet2l& a) {
+#if EIGEN_ARCH_ARM64
+ return vnegq_s64(a);
+#else
+ return vcombine_s64(
+ vdup_n_s64(-vgetq_lane_s64(a, 0)),
+ vdup_n_s64(-vgetq_lane_s64(a, 1)));
+#endif
+}
+template<> EIGEN_STRONG_INLINE Packet2f pconj(const Packet2f& a) { return a; }
template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet4c pconj(const Packet4c& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet8c pconj(const Packet8c& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet16c pconj(const Packet16c& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet4uc pconj(const Packet4uc& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet8uc pconj(const Packet8uc& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet16uc pconj(const Packet16uc& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet4s pconj(const Packet4s& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet8s pconj(const Packet8s& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet4us pconj(const Packet4us& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet8us pconj(const Packet8us& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet2i pconj(const Packet2i& a) { return a; }
template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet2ui pconj(const Packet2ui& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet4ui pconj(const Packet4ui& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet2l pconj(const Packet2l& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet2ul pconj(const Packet2ul& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet2f pmul<Packet2f>(const Packet2f& a, const Packet2f& b) { return vmul_f32(a,b); }
template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return vmulq_f32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4c pmul<Packet4c>(const Packet4c& a, const Packet4c& b)
+{
+ return vget_lane_s32(vreinterpret_s32_s8(vmul_s8(
+ vreinterpret_s8_s32(vdup_n_s32(a)),
+ vreinterpret_s8_s32(vdup_n_s32(b)))), 0);
+}
+template<> EIGEN_STRONG_INLINE Packet8c pmul<Packet8c>(const Packet8c& a, const Packet8c& b) { return vmul_s8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16c pmul<Packet16c>(const Packet16c& a, const Packet16c& b) { return vmulq_s8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4uc pmul<Packet4uc>(const Packet4uc& a, const Packet4uc& b)
+{
+ return vget_lane_u32(vreinterpret_u32_u8(vmul_u8(
+ vreinterpret_u8_u32(vdup_n_u32(a)),
+ vreinterpret_u8_u32(vdup_n_u32(b)))), 0);
+}
+template<> EIGEN_STRONG_INLINE Packet8uc pmul<Packet8uc>(const Packet8uc& a, const Packet8uc& b) { return vmul_u8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16uc pmul<Packet16uc>(const Packet16uc& a, const Packet16uc& b) { return vmulq_u8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4s pmul<Packet4s>(const Packet4s& a, const Packet4s& b) { return vmul_s16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8s pmul<Packet8s>(const Packet8s& a, const Packet8s& b) { return vmulq_s16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4us pmul<Packet4us>(const Packet4us& a, const Packet4us& b) { return vmul_u16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8us pmul<Packet8us>(const Packet8us& a, const Packet8us& b) { return vmulq_u16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2i pmul<Packet2i>(const Packet2i& a, const Packet2i& b) { return vmul_s32(a,b); }
template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b) { return vmulq_s32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2ui pmul<Packet2ui>(const Packet2ui& a, const Packet2ui& b) { return vmul_u32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4ui pmul<Packet4ui>(const Packet4ui& a, const Packet4ui& b) { return vmulq_u32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2l pmul<Packet2l>(const Packet2l& a, const Packet2l& b) {
+ return vcombine_s64(
+ vdup_n_s64(vgetq_lane_s64(a, 0)*vgetq_lane_s64(b, 0)),
+ vdup_n_s64(vgetq_lane_s64(a, 1)*vgetq_lane_s64(b, 1)));
+}
+template<> EIGEN_STRONG_INLINE Packet2ul pmul<Packet2ul>(const Packet2ul& a, const Packet2ul& b) {
+ return vcombine_u64(
+ vdup_n_u64(vgetq_lane_u64(a, 0)*vgetq_lane_u64(b, 0)),
+ vdup_n_u64(vgetq_lane_u64(a, 1)*vgetq_lane_u64(b, 1)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet2f pdiv<Packet2f>(const Packet2f& a, const Packet2f& b)
+{
+#if EIGEN_ARCH_ARM64
+ return vdiv_f32(a,b);
+#else
+ Packet2f inv, restep, div;
+ // NEON does not offer a divide instruction, we have to do a reciprocal approximation
+ // However NEON in contrast to other SIMD engines (AltiVec/SSE), offers
+ // a reciprocal estimate AND a reciprocal step -which saves a few instructions
+ // vrecpeq_f32() returns an estimate to 1/b, which we will finetune with
+ // Newton-Raphson and vrecpsq_f32()
+ inv = vrecpe_f32(b);
+
+ // This returns a differential, by which we will have to multiply inv to get a better
+ // approximation of 1/b.
+ restep = vrecps_f32(b, inv);
+ inv = vmul_f32(restep, inv);
+
+ // Finally, multiply a by 1/b and get the wanted result of the division.
+ div = vmul_f32(a, inv);
+
+ return div;
+#endif
+}
template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b)
{
#if EIGEN_ARCH_ARM64
@@ -168,357 +998,2629 @@ template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const
#endif
}
+template<> EIGEN_STRONG_INLINE Packet4c pdiv<Packet4c>(const Packet4c& /*a*/, const Packet4c& /*b*/)
+{
+ eigen_assert(false && "packet integer division are not supported by NEON");
+ return pset1<Packet4c>(0);
+}
+template<> EIGEN_STRONG_INLINE Packet8c pdiv<Packet8c>(const Packet8c& /*a*/, const Packet8c& /*b*/)
+{
+ eigen_assert(false && "packet integer division are not supported by NEON");
+ return pset1<Packet8c>(0);
+}
+template<> EIGEN_STRONG_INLINE Packet16c pdiv<Packet16c>(const Packet16c& /*a*/, const Packet16c& /*b*/)
+{
+ eigen_assert(false && "packet integer division are not supported by NEON");
+ return pset1<Packet16c>(0);
+}
+template<> EIGEN_STRONG_INLINE Packet4uc pdiv<Packet4uc>(const Packet4uc& /*a*/, const Packet4uc& /*b*/)
+{
+ eigen_assert(false && "packet integer division are not supported by NEON");
+ return pset1<Packet4uc>(0);
+}
+template<> EIGEN_STRONG_INLINE Packet8uc pdiv<Packet8uc>(const Packet8uc& /*a*/, const Packet8uc& /*b*/)
+{
+ eigen_assert(false && "packet integer division are not supported by NEON");
+ return pset1<Packet8uc>(0);
+}
+template<> EIGEN_STRONG_INLINE Packet16uc pdiv<Packet16uc>(const Packet16uc& /*a*/, const Packet16uc& /*b*/)
+{
+ eigen_assert(false && "packet integer division are not supported by NEON");
+ return pset1<Packet16uc>(0);
+}
+template<> EIGEN_STRONG_INLINE Packet4s pdiv<Packet4s>(const Packet4s& /*a*/, const Packet4s& /*b*/)
+{
+ eigen_assert(false && "packet integer division are not supported by NEON");
+ return pset1<Packet4s>(0);
+}
+template<> EIGEN_STRONG_INLINE Packet8s pdiv<Packet8s>(const Packet8s& /*a*/, const Packet8s& /*b*/)
+{
+ eigen_assert(false && "packet integer division are not supported by NEON");
+ return pset1<Packet8s>(0);
+}
+template<> EIGEN_STRONG_INLINE Packet4us pdiv<Packet4us>(const Packet4us& /*a*/, const Packet4us& /*b*/)
+{
+ eigen_assert(false && "packet integer division are not supported by NEON");
+ return pset1<Packet4us>(0);
+}
+template<> EIGEN_STRONG_INLINE Packet8us pdiv<Packet8us>(const Packet8us& /*a*/, const Packet8us& /*b*/)
+{
+ eigen_assert(false && "packet integer division are not supported by NEON");
+ return pset1<Packet8us>(0);
+}
+template<> EIGEN_STRONG_INLINE Packet2i pdiv<Packet2i>(const Packet2i& /*a*/, const Packet2i& /*b*/)
+{
+ eigen_assert(false && "packet integer division are not supported by NEON");
+ return pset1<Packet2i>(0);
+}
template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, const Packet4i& /*b*/)
-{ eigen_assert(false && "packet integer division are not supported by NEON");
+{
+ eigen_assert(false && "packet integer division are not supported by NEON");
return pset1<Packet4i>(0);
}
+template<> EIGEN_STRONG_INLINE Packet2ui pdiv<Packet2ui>(const Packet2ui& /*a*/, const Packet2ui& /*b*/)
+{
+ eigen_assert(false && "packet integer division are not supported by NEON");
+ return pset1<Packet2ui>(0);
+}
+template<> EIGEN_STRONG_INLINE Packet4ui pdiv<Packet4ui>(const Packet4ui& /*a*/, const Packet4ui& /*b*/)
+{
+ eigen_assert(false && "packet integer division are not supported by NEON");
+ return pset1<Packet4ui>(0);
+}
+template<> EIGEN_STRONG_INLINE Packet2l pdiv<Packet2l>(const Packet2l& /*a*/, const Packet2l& /*b*/)
+{
+ eigen_assert(false && "packet integer division are not supported by NEON");
+ return pset1<Packet2l>(0LL);
+}
+template<> EIGEN_STRONG_INLINE Packet2ul pdiv<Packet2ul>(const Packet2ul& /*a*/, const Packet2ul& /*b*/)
+{
+ eigen_assert(false && "packet integer division are not supported by NEON");
+ return pset1<Packet2ul>(0ULL);
+}
-// Clang/ARM wrongly advertises __ARM_FEATURE_FMA even when it's not available,
-// then implements a slow software scalar fallback calling fmaf()!
-// Filed LLVM bug:
-// https://llvm.org/bugs/show_bug.cgi?id=27216
-#if (defined __ARM_FEATURE_FMA) && !(EIGEN_COMP_CLANG && EIGEN_ARCH_ARM)
-// See bug 936.
-// FMA is available on VFPv4 i.e. when compiling with -mfpu=neon-vfpv4.
-// FMA is a true fused multiply-add i.e. only 1 rounding at the end, no intermediate rounding.
-// MLA is not fused i.e. does 2 roundings.
-// In addition to giving better accuracy, FMA also gives better performance here on a Krait (Nexus 4):
-// MLA: 10 GFlop/s ; FMA: 12 GFlops/s.
-template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return vfmaq_f32(c,a,b); }
-#else
-template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) {
-#if EIGEN_COMP_CLANG && EIGEN_ARCH_ARM
- // Clang/ARM will replace VMLA by VMUL+VADD at least for some values of -mcpu,
- // at least -mcpu=cortex-a8 and -mcpu=cortex-a7. Since the former is the default on
- // -march=armv7-a, that is a very common case.
- // See e.g. this thread:
- // http://lists.llvm.org/pipermail/llvm-dev/2013-December/068806.html
- // Filed LLVM bug:
- // https://llvm.org/bugs/show_bug.cgi?id=27219
- Packet4f r = c;
- asm volatile(
- "vmla.f32 %q[r], %q[a], %q[b]"
- : [r] "+w" (r)
- : [a] "w" (a),
- [b] "w" (b)
- : );
- return r;
+
+#ifdef __ARM_FEATURE_FMA
+template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c)
+{ return vfmaq_f32(c,a,b); }
+template<> EIGEN_STRONG_INLINE Packet2f pmadd(const Packet2f& a, const Packet2f& b, const Packet2f& c)
+{ return vfma_f32(c,a,b); }
#else
+template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c)
+{
return vmlaq_f32(c,a,b);
-#endif
+}
+template<> EIGEN_STRONG_INLINE Packet2f pmadd(const Packet2f& a, const Packet2f& b, const Packet2f& c)
+{
+ return vmla_f32(c,a,b);
}
#endif
// No FMA instruction for int, so use MLA unconditionally.
-template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return vmlaq_s32(c,a,b); }
+template<> EIGEN_STRONG_INLINE Packet4c pmadd(const Packet4c& a, const Packet4c& b, const Packet4c& c)
+{
+ return vget_lane_s32(vreinterpret_s32_s8(vmla_s8(
+ vreinterpret_s8_s32(vdup_n_s32(c)),
+ vreinterpret_s8_s32(vdup_n_s32(a)),
+ vreinterpret_s8_s32(vdup_n_s32(b)))), 0);
+}
+template<> EIGEN_STRONG_INLINE Packet8c pmadd(const Packet8c& a, const Packet8c& b, const Packet8c& c)
+{ return vmla_s8(c,a,b); }
+template<> EIGEN_STRONG_INLINE Packet16c pmadd(const Packet16c& a, const Packet16c& b, const Packet16c& c)
+{ return vmlaq_s8(c,a,b); }
+template<> EIGEN_STRONG_INLINE Packet4uc pmadd(const Packet4uc& a, const Packet4uc& b, const Packet4uc& c)
+{
+ return vget_lane_u32(vreinterpret_u32_u8(vmla_u8(
+ vreinterpret_u8_u32(vdup_n_u32(c)),
+ vreinterpret_u8_u32(vdup_n_u32(a)),
+ vreinterpret_u8_u32(vdup_n_u32(b)))), 0);
+}
+template<> EIGEN_STRONG_INLINE Packet8uc pmadd(const Packet8uc& a, const Packet8uc& b, const Packet8uc& c)
+{ return vmla_u8(c,a,b); }
+template<> EIGEN_STRONG_INLINE Packet16uc pmadd(const Packet16uc& a, const Packet16uc& b, const Packet16uc& c)
+{ return vmlaq_u8(c,a,b); }
+template<> EIGEN_STRONG_INLINE Packet4s pmadd(const Packet4s& a, const Packet4s& b, const Packet4s& c)
+{ return vmla_s16(c,a,b); }
+template<> EIGEN_STRONG_INLINE Packet8s pmadd(const Packet8s& a, const Packet8s& b, const Packet8s& c)
+{ return vmlaq_s16(c,a,b); }
+template<> EIGEN_STRONG_INLINE Packet4us pmadd(const Packet4us& a, const Packet4us& b, const Packet4us& c)
+{ return vmla_u16(c,a,b); }
+template<> EIGEN_STRONG_INLINE Packet8us pmadd(const Packet8us& a, const Packet8us& b, const Packet8us& c)
+{ return vmlaq_u16(c,a,b); }
+template<> EIGEN_STRONG_INLINE Packet2i pmadd(const Packet2i& a, const Packet2i& b, const Packet2i& c)
+{ return vmla_s32(c,a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c)
+{ return vmlaq_s32(c,a,b); }
+template<> EIGEN_STRONG_INLINE Packet2ui pmadd(const Packet2ui& a, const Packet2ui& b, const Packet2ui& c)
+{ return vmla_u32(c,a,b); }
+template<> EIGEN_STRONG_INLINE Packet4ui pmadd(const Packet4ui& a, const Packet4ui& b, const Packet4ui& c)
+{ return vmlaq_u32(c,a,b); }
+template<> EIGEN_STRONG_INLINE Packet2f pabsdiff<Packet2f>(const Packet2f& a, const Packet2f& b)
+{ return vabd_f32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4f pabsdiff<Packet4f>(const Packet4f& a, const Packet4f& b)
+{ return vabdq_f32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4c pabsdiff<Packet4c>(const Packet4c& a, const Packet4c& b)
+{
+ return vget_lane_s32(vreinterpret_s32_s8(vabd_s8(
+ vreinterpret_s8_s32(vdup_n_s32(a)),
+ vreinterpret_s8_s32(vdup_n_s32(b)))), 0);
+}
+template<> EIGEN_STRONG_INLINE Packet8c pabsdiff<Packet8c>(const Packet8c& a, const Packet8c& b)
+{ return vabd_s8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16c pabsdiff<Packet16c>(const Packet16c& a, const Packet16c& b)
+{ return vabdq_s8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4uc pabsdiff<Packet4uc>(const Packet4uc& a, const Packet4uc& b)
+{
+ return vget_lane_u32(vreinterpret_u32_u8(vabd_u8(
+ vreinterpret_u8_u32(vdup_n_u32(a)),
+ vreinterpret_u8_u32(vdup_n_u32(b)))), 0);
+}
+template<> EIGEN_STRONG_INLINE Packet8uc pabsdiff<Packet8uc>(const Packet8uc& a, const Packet8uc& b)
+{ return vabd_u8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16uc pabsdiff<Packet16uc>(const Packet16uc& a, const Packet16uc& b)
+{ return vabdq_u8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4s pabsdiff<Packet4s>(const Packet4s& a, const Packet4s& b)
+{ return vabd_s16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8s pabsdiff<Packet8s>(const Packet8s& a, const Packet8s& b)
+{ return vabdq_s16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4us pabsdiff<Packet4us>(const Packet4us& a, const Packet4us& b)
+{ return vabd_u16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8us pabsdiff<Packet8us>(const Packet8us& a, const Packet8us& b)
+{ return vabdq_u16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2i pabsdiff<Packet2i>(const Packet2i& a, const Packet2i& b)
+{ return vabd_s32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pabsdiff<Packet4i>(const Packet4i& a, const Packet4i& b)
+{ return vabdq_s32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2ui pabsdiff<Packet2ui>(const Packet2ui& a, const Packet2ui& b)
+{ return vabd_u32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4ui pabsdiff<Packet4ui>(const Packet4ui& a, const Packet4ui& b)
+{ return vabdq_u32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet2f pmin<Packet2f>(const Packet2f& a, const Packet2f& b) { return vmin_f32(a,b); }
template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return vminq_f32(a,b); }
+
+#ifdef __ARM_FEATURE_NUMERIC_MAXMIN
+// numeric max and min are only available if ARM_FEATURE_NUMERIC_MAXMIN is defined (which can only be the case for Armv8 systems).
+template<> EIGEN_STRONG_INLINE Packet4f pmin<PropagateNumbers, Packet4f>(const Packet4f& a, const Packet4f& b) { return vminnmq_f32(a, b); }
+template<> EIGEN_STRONG_INLINE Packet2f pmin<PropagateNumbers, Packet2f>(const Packet2f& a, const Packet2f& b) { return vminnm_f32(a, b); }
+#endif
+
+template<> EIGEN_STRONG_INLINE Packet4f pmin<PropagateNaN, Packet4f>(const Packet4f& a, const Packet4f& b) { return pmin<Packet4f>(a, b); }
+
+template<> EIGEN_STRONG_INLINE Packet2f pmin<PropagateNaN, Packet2f>(const Packet2f& a, const Packet2f& b) { return pmin<Packet2f>(a, b); }
+
+template<> EIGEN_STRONG_INLINE Packet4c pmin<Packet4c>(const Packet4c& a, const Packet4c& b)
+{
+ return vget_lane_s32(vreinterpret_s32_s8(vmin_s8(
+ vreinterpret_s8_s32(vdup_n_s32(a)),
+ vreinterpret_s8_s32(vdup_n_s32(b)))), 0);
+}
+template<> EIGEN_STRONG_INLINE Packet8c pmin<Packet8c>(const Packet8c& a, const Packet8c& b) { return vmin_s8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16c pmin<Packet16c>(const Packet16c& a, const Packet16c& b) { return vminq_s8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4uc pmin<Packet4uc>(const Packet4uc& a, const Packet4uc& b)
+{
+ return vget_lane_u32(vreinterpret_u32_u8(vmin_u8(
+ vreinterpret_u8_u32(vdup_n_u32(a)),
+ vreinterpret_u8_u32(vdup_n_u32(b)))), 0);
+}
+template<> EIGEN_STRONG_INLINE Packet8uc pmin<Packet8uc>(const Packet8uc& a, const Packet8uc& b) { return vmin_u8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16uc pmin<Packet16uc>(const Packet16uc& a, const Packet16uc& b) { return vminq_u8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4s pmin<Packet4s>(const Packet4s& a, const Packet4s& b) { return vmin_s16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8s pmin<Packet8s>(const Packet8s& a, const Packet8s& b) { return vminq_s16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4us pmin<Packet4us>(const Packet4us& a, const Packet4us& b) { return vmin_u16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8us pmin<Packet8us>(const Packet8us& a, const Packet8us& b) { return vminq_u16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2i pmin<Packet2i>(const Packet2i& a, const Packet2i& b) { return vmin_s32(a,b); }
template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) { return vminq_s32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2ui pmin<Packet2ui>(const Packet2ui& a, const Packet2ui& b) { return vmin_u32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4ui pmin<Packet4ui>(const Packet4ui& a, const Packet4ui& b) { return vminq_u32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2l pmin<Packet2l>(const Packet2l& a, const Packet2l& b) {
+ return vcombine_s64(
+ vdup_n_s64((std::min)(vgetq_lane_s64(a, 0), vgetq_lane_s64(b, 0))),
+ vdup_n_s64((std::min)(vgetq_lane_s64(a, 1), vgetq_lane_s64(b, 1))));
+}
+template<> EIGEN_STRONG_INLINE Packet2ul pmin<Packet2ul>(const Packet2ul& a, const Packet2ul& b) {
+ return vcombine_u64(
+ vdup_n_u64((std::min)(vgetq_lane_u64(a, 0), vgetq_lane_u64(b, 0))),
+ vdup_n_u64((std::min)(vgetq_lane_u64(a, 1), vgetq_lane_u64(b, 1))));
+}
+template<> EIGEN_STRONG_INLINE Packet2f pmax<Packet2f>(const Packet2f& a, const Packet2f& b) { return vmax_f32(a,b); }
template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return vmaxq_f32(a,b); }
+
+#ifdef __ARM_FEATURE_NUMERIC_MAXMIN
+// numeric max and min are only available if ARM_FEATURE_NUMERIC_MAXMIN is defined (which can only be the case for Armv8 systems).
+template<> EIGEN_STRONG_INLINE Packet4f pmax<PropagateNumbers, Packet4f>(const Packet4f& a, const Packet4f& b) { return vmaxnmq_f32(a, b); }
+template<> EIGEN_STRONG_INLINE Packet2f pmax<PropagateNumbers, Packet2f>(const Packet2f& a, const Packet2f& b) { return vmaxnm_f32(a, b); }
+#endif
+
+template<> EIGEN_STRONG_INLINE Packet4f pmax<PropagateNaN, Packet4f>(const Packet4f& a, const Packet4f& b) { return pmax<Packet4f>(a, b); }
+
+template<> EIGEN_STRONG_INLINE Packet2f pmax<PropagateNaN, Packet2f>(const Packet2f& a, const Packet2f& b) { return pmax<Packet2f>(a, b); }
+
+template<> EIGEN_STRONG_INLINE Packet4c pmax<Packet4c>(const Packet4c& a, const Packet4c& b)
+{
+ return vget_lane_s32(vreinterpret_s32_s8(vmax_s8(
+ vreinterpret_s8_s32(vdup_n_s32(a)),
+ vreinterpret_s8_s32(vdup_n_s32(b)))), 0);
+}
+template<> EIGEN_STRONG_INLINE Packet8c pmax<Packet8c>(const Packet8c& a, const Packet8c& b) { return vmax_s8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16c pmax<Packet16c>(const Packet16c& a, const Packet16c& b) { return vmaxq_s8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4uc pmax<Packet4uc>(const Packet4uc& a, const Packet4uc& b)
+{
+ return vget_lane_u32(vreinterpret_u32_u8(vmax_u8(
+ vreinterpret_u8_u32(vdup_n_u32(a)),
+ vreinterpret_u8_u32(vdup_n_u32(b)))), 0);
+}
+template<> EIGEN_STRONG_INLINE Packet8uc pmax<Packet8uc>(const Packet8uc& a, const Packet8uc& b) { return vmax_u8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16uc pmax<Packet16uc>(const Packet16uc& a, const Packet16uc& b) { return vmaxq_u8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4s pmax<Packet4s>(const Packet4s& a, const Packet4s& b) { return vmax_s16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8s pmax<Packet8s>(const Packet8s& a, const Packet8s& b) { return vmaxq_s16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4us pmax<Packet4us>(const Packet4us& a, const Packet4us& b) { return vmax_u16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8us pmax<Packet8us>(const Packet8us& a, const Packet8us& b) { return vmaxq_u16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2i pmax<Packet2i>(const Packet2i& a, const Packet2i& b) { return vmax_s32(a,b); }
template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) { return vmaxq_s32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2ui pmax<Packet2ui>(const Packet2ui& a, const Packet2ui& b) { return vmax_u32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4ui pmax<Packet4ui>(const Packet4ui& a, const Packet4ui& b) { return vmaxq_u32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2l pmax<Packet2l>(const Packet2l& a, const Packet2l& b) {
+ return vcombine_s64(
+ vdup_n_s64((std::max)(vgetq_lane_s64(a, 0), vgetq_lane_s64(b, 0))),
+ vdup_n_s64((std::max)(vgetq_lane_s64(a, 1), vgetq_lane_s64(b, 1))));
+}
+template<> EIGEN_STRONG_INLINE Packet2ul pmax<Packet2ul>(const Packet2ul& a, const Packet2ul& b) {
+ return vcombine_u64(
+ vdup_n_u64((std::max)(vgetq_lane_u64(a, 0), vgetq_lane_u64(b, 0))),
+ vdup_n_u64((std::max)(vgetq_lane_u64(a, 1), vgetq_lane_u64(b, 1))));
+}
-// Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics
-template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b)
+template<> EIGEN_STRONG_INLINE Packet2f pcmp_le<Packet2f>(const Packet2f& a, const Packet2f& b)
+{ return vreinterpret_f32_u32(vcle_f32(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet4f pcmp_le<Packet4f>(const Packet4f& a, const Packet4f& b)
+{ return vreinterpretq_f32_u32(vcleq_f32(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet4c pcmp_le<Packet4c>(const Packet4c& a, const Packet4c& b)
{
- return vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
+ return vget_lane_s32(vreinterpret_s32_u8(vcle_s8(
+ vreinterpret_s8_s32(vdup_n_s32(a)),
+ vreinterpret_s8_s32(vdup_n_s32(b)))), 0);
+}
+template<> EIGEN_STRONG_INLINE Packet8c pcmp_le<Packet8c>(const Packet8c& a, const Packet8c& b)
+{ return vreinterpret_s8_u8(vcle_s8(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet16c pcmp_le<Packet16c>(const Packet16c& a, const Packet16c& b)
+{ return vreinterpretq_s8_u8(vcleq_s8(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet4uc pcmp_le<Packet4uc>(const Packet4uc& a, const Packet4uc& b)
+{
+ return vget_lane_u32(vreinterpret_u32_u8(vcle_u8(
+ vreinterpret_u8_u32(vdup_n_u32(a)),
+ vreinterpret_u8_u32(vdup_n_u32(b)))), 0);
+}
+template<> EIGEN_STRONG_INLINE Packet8uc pcmp_le<Packet8uc>(const Packet8uc& a, const Packet8uc& b)
+{ return vcle_u8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16uc pcmp_le<Packet16uc>(const Packet16uc& a, const Packet16uc& b)
+{ return vcleq_u8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4s pcmp_le<Packet4s>(const Packet4s& a, const Packet4s& b)
+{ return vreinterpret_s16_u16(vcle_s16(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet8s pcmp_le<Packet8s>(const Packet8s& a, const Packet8s& b)
+{ return vreinterpretq_s16_u16(vcleq_s16(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet4us pcmp_le<Packet4us>(const Packet4us& a, const Packet4us& b)
+{ return vcle_u16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8us pcmp_le<Packet8us>(const Packet8us& a, const Packet8us& b)
+{ return vcleq_u16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2i pcmp_le<Packet2i>(const Packet2i& a, const Packet2i& b)
+{ return vreinterpret_s32_u32(vcle_s32(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet4i pcmp_le<Packet4i>(const Packet4i& a, const Packet4i& b)
+{ return vreinterpretq_s32_u32(vcleq_s32(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet2ui pcmp_le<Packet2ui>(const Packet2ui& a, const Packet2ui& b)
+{ return vcle_u32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4ui pcmp_le<Packet4ui>(const Packet4ui& a, const Packet4ui& b)
+{ return vcleq_u32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2l pcmp_le<Packet2l>(const Packet2l& a, const Packet2l& b)
+{
+#if EIGEN_ARCH_ARM64
+ return vreinterpretq_s64_u64(vcleq_s64(a,b));
+#else
+ return vcombine_s64(
+ vdup_n_s64(vgetq_lane_s64(a, 0) <= vgetq_lane_s64(b, 0) ? numext::int64_t(-1) : 0),
+ vdup_n_s64(vgetq_lane_s64(a, 1) <= vgetq_lane_s64(b, 1) ? numext::int64_t(-1) : 0));
+#endif
+}
+template<> EIGEN_STRONG_INLINE Packet2ul pcmp_le<Packet2ul>(const Packet2ul& a, const Packet2ul& b)
+{
+#if EIGEN_ARCH_ARM64
+ return vcleq_u64(a,b);
+#else
+ return vcombine_u64(
+ vdup_n_u64(vgetq_lane_u64(a, 0) <= vgetq_lane_u64(b, 0) ? numext::uint64_t(-1) : 0),
+ vdup_n_u64(vgetq_lane_u64(a, 1) <= vgetq_lane_u64(b, 1) ? numext::uint64_t(-1) : 0));
+#endif
}
-template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return vandq_s32(a,b); }
-template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b)
+template<> EIGEN_STRONG_INLINE Packet2f pcmp_lt<Packet2f>(const Packet2f& a, const Packet2f& b)
+{ return vreinterpret_f32_u32(vclt_f32(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt<Packet4f>(const Packet4f& a, const Packet4f& b)
+{ return vreinterpretq_f32_u32(vcltq_f32(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet4c pcmp_lt<Packet4c>(const Packet4c& a, const Packet4c& b)
{
- return vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
+ return vget_lane_s32(vreinterpret_s32_u8(vclt_s8(
+ vreinterpret_s8_s32(vdup_n_s32(a)),
+ vreinterpret_s8_s32(vdup_n_s32(b)))), 0);
+}
+template<> EIGEN_STRONG_INLINE Packet8c pcmp_lt<Packet8c>(const Packet8c& a, const Packet8c& b)
+{ return vreinterpret_s8_u8(vclt_s8(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet16c pcmp_lt<Packet16c>(const Packet16c& a, const Packet16c& b)
+{ return vreinterpretq_s8_u8(vcltq_s8(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet4uc pcmp_lt<Packet4uc>(const Packet4uc& a, const Packet4uc& b)
+{
+ return vget_lane_u32(vreinterpret_u32_u8(vclt_u8(
+ vreinterpret_u8_u32(vdup_n_u32(a)),
+ vreinterpret_u8_u32(vdup_n_u32(b)))), 0);
+}
+template<> EIGEN_STRONG_INLINE Packet8uc pcmp_lt<Packet8uc>(const Packet8uc& a, const Packet8uc& b)
+{ return vclt_u8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16uc pcmp_lt<Packet16uc>(const Packet16uc& a, const Packet16uc& b)
+{ return vcltq_u8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4s pcmp_lt<Packet4s>(const Packet4s& a, const Packet4s& b)
+{ return vreinterpret_s16_u16(vclt_s16(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet8s pcmp_lt<Packet8s>(const Packet8s& a, const Packet8s& b)
+{ return vreinterpretq_s16_u16(vcltq_s16(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet4us pcmp_lt<Packet4us>(const Packet4us& a, const Packet4us& b)
+{ return vclt_u16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8us pcmp_lt<Packet8us>(const Packet8us& a, const Packet8us& b)
+{ return vcltq_u16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2i pcmp_lt<Packet2i>(const Packet2i& a, const Packet2i& b)
+{ return vreinterpret_s32_u32(vclt_s32(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet4i pcmp_lt<Packet4i>(const Packet4i& a, const Packet4i& b)
+{ return vreinterpretq_s32_u32(vcltq_s32(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet2ui pcmp_lt<Packet2ui>(const Packet2ui& a, const Packet2ui& b)
+{ return vclt_u32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4ui pcmp_lt<Packet4ui>(const Packet4ui& a, const Packet4ui& b)
+{ return vcltq_u32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2l pcmp_lt<Packet2l>(const Packet2l& a, const Packet2l& b)
+{
+#if EIGEN_ARCH_ARM64
+ return vreinterpretq_s64_u64(vcltq_s64(a,b));
+#else
+ return vcombine_s64(
+ vdup_n_s64(vgetq_lane_s64(a, 0) < vgetq_lane_s64(b, 0) ? numext::int64_t(-1) : 0),
+ vdup_n_s64(vgetq_lane_s64(a, 1) < vgetq_lane_s64(b, 1) ? numext::int64_t(-1) : 0));
+#endif
+}
+template<> EIGEN_STRONG_INLINE Packet2ul pcmp_lt<Packet2ul>(const Packet2ul& a, const Packet2ul& b)
+{
+#if EIGEN_ARCH_ARM64
+ return vcltq_u64(a,b);
+#else
+ return vcombine_u64(
+ vdup_n_u64(vgetq_lane_u64(a, 0) < vgetq_lane_u64(b, 0) ? numext::uint64_t(-1) : 0),
+ vdup_n_u64(vgetq_lane_u64(a, 1) < vgetq_lane_u64(b, 1) ? numext::uint64_t(-1) : 0));
+#endif
}
-template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return vorrq_s32(a,b); }
-template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b)
+template<> EIGEN_STRONG_INLINE Packet2f pcmp_eq<Packet2f>(const Packet2f& a, const Packet2f& b)
+{ return vreinterpret_f32_u32(vceq_f32(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet4f pcmp_eq<Packet4f>(const Packet4f& a, const Packet4f& b)
+{ return vreinterpretq_f32_u32(vceqq_f32(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet4c pcmp_eq<Packet4c>(const Packet4c& a, const Packet4c& b)
+{
+ return vget_lane_s32(vreinterpret_s32_u8(vceq_s8(
+ vreinterpret_s8_s32(vdup_n_s32(a)),
+ vreinterpret_s8_s32(vdup_n_s32(b)))), 0);
+}
+template<> EIGEN_STRONG_INLINE Packet8c pcmp_eq<Packet8c>(const Packet8c& a, const Packet8c& b)
+{ return vreinterpret_s8_u8(vceq_s8(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet16c pcmp_eq<Packet16c>(const Packet16c& a, const Packet16c& b)
+{ return vreinterpretq_s8_u8(vceqq_s8(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet4uc pcmp_eq<Packet4uc>(const Packet4uc& a, const Packet4uc& b)
{
- return vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
+ return vget_lane_u32(vreinterpret_u32_u8(vceq_u8(
+ vreinterpret_u8_u32(vdup_n_u32(a)),
+ vreinterpret_u8_u32(vdup_n_u32(b)))), 0);
}
+template<> EIGEN_STRONG_INLINE Packet8uc pcmp_eq<Packet8uc>(const Packet8uc& a, const Packet8uc& b)
+{ return vceq_u8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16uc pcmp_eq<Packet16uc>(const Packet16uc& a, const Packet16uc& b)
+{ return vceqq_u8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4s pcmp_eq<Packet4s>(const Packet4s& a, const Packet4s& b)
+{ return vreinterpret_s16_u16(vceq_s16(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet8s pcmp_eq<Packet8s>(const Packet8s& a, const Packet8s& b)
+{ return vreinterpretq_s16_u16(vceqq_s16(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet4us pcmp_eq<Packet4us>(const Packet4us& a, const Packet4us& b)
+{ return vceq_u16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8us pcmp_eq<Packet8us>(const Packet8us& a, const Packet8us& b)
+{ return vceqq_u16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2i pcmp_eq<Packet2i>(const Packet2i& a, const Packet2i& b)
+{ return vreinterpret_s32_u32(vceq_s32(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet4i pcmp_eq<Packet4i>(const Packet4i& a, const Packet4i& b)
+{ return vreinterpretq_s32_u32(vceqq_s32(a,b)); }
+template<> EIGEN_STRONG_INLINE Packet2ui pcmp_eq<Packet2ui>(const Packet2ui& a, const Packet2ui& b)
+{ return vceq_u32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4ui pcmp_eq<Packet4ui>(const Packet4ui& a, const Packet4ui& b)
+{ return vceqq_u32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2l pcmp_eq<Packet2l>(const Packet2l& a, const Packet2l& b)
+{
+#if EIGEN_ARCH_ARM64
+ return vreinterpretq_s64_u64(vceqq_s64(a,b));
+#else
+ return vcombine_s64(
+ vdup_n_s64(vgetq_lane_s64(a, 0) == vgetq_lane_s64(b, 0) ? numext::int64_t(-1) : 0),
+ vdup_n_s64(vgetq_lane_s64(a, 1) == vgetq_lane_s64(b, 1) ? numext::int64_t(-1) : 0));
+#endif
+}
+template<> EIGEN_STRONG_INLINE Packet2ul pcmp_eq<Packet2ul>(const Packet2ul& a, const Packet2ul& b)
+{
+#if EIGEN_ARCH_ARM64
+ return vceqq_u64(a,b);
+#else
+ return vcombine_u64(
+ vdup_n_u64(vgetq_lane_u64(a, 0) == vgetq_lane_u64(b, 0) ? numext::uint64_t(-1) : 0),
+ vdup_n_u64(vgetq_lane_u64(a, 1) == vgetq_lane_u64(b, 1) ? numext::uint64_t(-1) : 0));
+#endif
+}
+
+template<> EIGEN_STRONG_INLINE Packet2f pcmp_lt_or_nan<Packet2f>(const Packet2f& a, const Packet2f& b)
+{ return vreinterpret_f32_u32(vmvn_u32(vcge_f32(a,b))); }
+template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt_or_nan<Packet4f>(const Packet4f& a, const Packet4f& b)
+{ return vreinterpretq_f32_u32(vmvnq_u32(vcgeq_f32(a,b))); }
+
+// Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics
+template<> EIGEN_STRONG_INLINE Packet2f pand<Packet2f>(const Packet2f& a, const Packet2f& b)
+{ return vreinterpret_f32_u32(vand_u32(vreinterpret_u32_f32(a),vreinterpret_u32_f32(b))); }
+template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b)
+{ return vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); }
+template<> EIGEN_STRONG_INLINE Packet4c pand<Packet4c>(const Packet4c& a, const Packet4c& b)
+{ return a & b; }
+template<> EIGEN_STRONG_INLINE Packet8c pand<Packet8c>(const Packet8c& a, const Packet8c& b)
+{ return vand_s8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16c pand<Packet16c>(const Packet16c& a, const Packet16c& b)
+{ return vandq_s8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4uc pand<Packet4uc>(const Packet4uc& a, const Packet4uc& b)
+{ return a & b; }
+template<> EIGEN_STRONG_INLINE Packet8uc pand<Packet8uc>(const Packet8uc& a, const Packet8uc& b)
+{ return vand_u8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16uc pand<Packet16uc>(const Packet16uc& a, const Packet16uc& b)
+{ return vandq_u8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4s pand<Packet4s>(const Packet4s& a, const Packet4s& b) { return vand_s16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8s pand<Packet8s>(const Packet8s& a, const Packet8s& b) { return vandq_s16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4us pand<Packet4us>(const Packet4us& a, const Packet4us& b)
+{ return vand_u16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8us pand<Packet8us>(const Packet8us& a, const Packet8us& b)
+{ return vandq_u16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2i pand<Packet2i>(const Packet2i& a, const Packet2i& b) { return vand_s32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return vandq_s32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2ui pand<Packet2ui>(const Packet2ui& a, const Packet2ui& b)
+{ return vand_u32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4ui pand<Packet4ui>(const Packet4ui& a, const Packet4ui& b)
+{ return vandq_u32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2l pand<Packet2l>(const Packet2l& a, const Packet2l& b) { return vandq_s64(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2ul pand<Packet2ul>(const Packet2ul& a, const Packet2ul& b)
+{ return vandq_u64(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet2f por<Packet2f>(const Packet2f& a, const Packet2f& b)
+{ return vreinterpret_f32_u32(vorr_u32(vreinterpret_u32_f32(a),vreinterpret_u32_f32(b))); }
+template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b)
+{ return vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); }
+template<> EIGEN_STRONG_INLINE Packet4c por<Packet4c>(const Packet4c& a, const Packet4c& b)
+{ return a | b; }
+template<> EIGEN_STRONG_INLINE Packet8c por<Packet8c>(const Packet8c& a, const Packet8c& b) { return vorr_s8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16c por<Packet16c>(const Packet16c& a, const Packet16c& b)
+{ return vorrq_s8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4uc por<Packet4uc>(const Packet4uc& a, const Packet4uc& b)
+{ return a | b; }
+template<> EIGEN_STRONG_INLINE Packet8uc por<Packet8uc>(const Packet8uc& a, const Packet8uc& b)
+{ return vorr_u8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16uc por<Packet16uc>(const Packet16uc& a, const Packet16uc& b)
+{ return vorrq_u8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4s por<Packet4s>(const Packet4s& a, const Packet4s& b)
+{ return vorr_s16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8s por<Packet8s>(const Packet8s& a, const Packet8s& b)
+{ return vorrq_s16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4us por<Packet4us>(const Packet4us& a, const Packet4us& b)
+{ return vorr_u16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8us por<Packet8us>(const Packet8us& a, const Packet8us& b)
+{ return vorrq_u16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2i por<Packet2i>(const Packet2i& a, const Packet2i& b) { return vorr_s32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return vorrq_s32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2ui por<Packet2ui>(const Packet2ui& a, const Packet2ui& b)
+{ return vorr_u32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4ui por<Packet4ui>(const Packet4ui& a, const Packet4ui& b)
+{ return vorrq_u32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2l por<Packet2l>(const Packet2l& a, const Packet2l& b)
+{ return vorrq_s64(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2ul por<Packet2ul>(const Packet2ul& a, const Packet2ul& b)
+{ return vorrq_u64(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet2f pxor<Packet2f>(const Packet2f& a, const Packet2f& b)
+{ return vreinterpret_f32_u32(veor_u32(vreinterpret_u32_f32(a),vreinterpret_u32_f32(b))); }
+template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b)
+{ return vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); }
+template<> EIGEN_STRONG_INLINE Packet4c pxor<Packet4c>(const Packet4c& a, const Packet4c& b)
+{ return a ^ b; }
+template<> EIGEN_STRONG_INLINE Packet8c pxor<Packet8c>(const Packet8c& a, const Packet8c& b)
+{ return veor_s8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16c pxor<Packet16c>(const Packet16c& a, const Packet16c& b)
+{ return veorq_s8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4uc pxor<Packet4uc>(const Packet4uc& a, const Packet4uc& b)
+{ return a ^ b; }
+template<> EIGEN_STRONG_INLINE Packet8uc pxor<Packet8uc>(const Packet8uc& a, const Packet8uc& b)
+{ return veor_u8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16uc pxor<Packet16uc>(const Packet16uc& a, const Packet16uc& b)
+{ return veorq_u8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4s pxor<Packet4s>(const Packet4s& a, const Packet4s& b) { return veor_s16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8s pxor<Packet8s>(const Packet8s& a, const Packet8s& b) { return veorq_s16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4us pxor<Packet4us>(const Packet4us& a, const Packet4us& b)
+{ return veor_u16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8us pxor<Packet8us>(const Packet8us& a, const Packet8us& b)
+{ return veorq_u16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2i pxor<Packet2i>(const Packet2i& a, const Packet2i& b) { return veor_s32(a,b); }
template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return veorq_s32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2ui pxor<Packet2ui>(const Packet2ui& a, const Packet2ui& b)
+{ return veor_u32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4ui pxor<Packet4ui>(const Packet4ui& a, const Packet4ui& b)
+{ return veorq_u32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2l pxor<Packet2l>(const Packet2l& a, const Packet2l& b)
+{ return veorq_s64(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2ul pxor<Packet2ul>(const Packet2ul& a, const Packet2ul& b)
+{ return veorq_u64(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2f pandnot<Packet2f>(const Packet2f& a, const Packet2f& b)
+{ return vreinterpret_f32_u32(vbic_u32(vreinterpret_u32_f32(a),vreinterpret_u32_f32(b))); }
template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b)
+{ return vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); }
+template<> EIGEN_STRONG_INLINE Packet4c pandnot<Packet4c>(const Packet4c& a, const Packet4c& b)
+{ return a & ~b; }
+template<> EIGEN_STRONG_INLINE Packet8c pandnot<Packet8c>(const Packet8c& a, const Packet8c& b) { return vbic_s8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16c pandnot<Packet16c>(const Packet16c& a, const Packet16c& b) { return vbicq_s8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4uc pandnot<Packet4uc>(const Packet4uc& a, const Packet4uc& b)
+{ return a & ~b; }
+template<> EIGEN_STRONG_INLINE Packet8uc pandnot<Packet8uc>(const Packet8uc& a, const Packet8uc& b)
+{ return vbic_u8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16uc pandnot<Packet16uc>(const Packet16uc& a, const Packet16uc& b)
+{ return vbicq_u8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4s pandnot<Packet4s>(const Packet4s& a, const Packet4s& b)
+{ return vbic_s16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8s pandnot<Packet8s>(const Packet8s& a, const Packet8s& b)
+{ return vbicq_s16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4us pandnot<Packet4us>(const Packet4us& a, const Packet4us& b)
+{ return vbic_u16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8us pandnot<Packet8us>(const Packet8us& a, const Packet8us& b)
+{ return vbicq_u16(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2i pandnot<Packet2i>(const Packet2i& a, const Packet2i& b)
+{ return vbic_s32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b)
+{ return vbicq_s32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2ui pandnot<Packet2ui>(const Packet2ui& a, const Packet2ui& b)
+{ return vbic_u32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4ui pandnot<Packet4ui>(const Packet4ui& a, const Packet4ui& b)
+{ return vbicq_u32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2l pandnot<Packet2l>(const Packet2l& a, const Packet2l& b)
+{ return vbicq_s64(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2ul pandnot<Packet2ul>(const Packet2ul& a, const Packet2ul& b)
+{ return vbicq_u64(a,b); }
+
+
+template<int N> EIGEN_STRONG_INLINE Packet4c parithmetic_shift_right(Packet4c& a)
+{ return vget_lane_s32(vreinterpret_s32_s8(vshr_n_s8(vreinterpret_s8_s32(vdup_n_s32(a)), N)), 0); }
+template<int N> EIGEN_STRONG_INLINE Packet8c parithmetic_shift_right(Packet8c a) { return vshr_n_s8(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet16c parithmetic_shift_right(Packet16c a) { return vshrq_n_s8(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet4uc parithmetic_shift_right(Packet4uc& a)
+{ return vget_lane_u32(vreinterpret_u32_u8(vshr_n_u8(vreinterpret_u8_u32(vdup_n_u32(a)), N)), 0); }
+template<int N> EIGEN_STRONG_INLINE Packet8uc parithmetic_shift_right(Packet8uc a) { return vshr_n_u8(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet16uc parithmetic_shift_right(Packet16uc a) { return vshrq_n_u8(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet4s parithmetic_shift_right(Packet4s a) { return vshr_n_s16(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet8s parithmetic_shift_right(Packet8s a) { return vshrq_n_s16(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet4us parithmetic_shift_right(Packet4us a) { return vshr_n_u16(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet8us parithmetic_shift_right(Packet8us a) { return vshrq_n_u16(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet2i parithmetic_shift_right(Packet2i a) { return vshr_n_s32(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet4i parithmetic_shift_right(Packet4i a) { return vshrq_n_s32(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet2ui parithmetic_shift_right(Packet2ui a) { return vshr_n_u32(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet4ui parithmetic_shift_right(Packet4ui a) { return vshrq_n_u32(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet2l parithmetic_shift_right(Packet2l a) { return vshrq_n_s64(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet2ul parithmetic_shift_right(Packet2ul a) { return vshrq_n_u64(a,N); }
+
+template<int N> EIGEN_STRONG_INLINE Packet4c plogical_shift_right(Packet4c& a)
+{ return vget_lane_s32(vreinterpret_s32_u8(vshr_n_u8(vreinterpret_u8_s32(vdup_n_s32(a)), N)), 0); }
+template<int N> EIGEN_STRONG_INLINE Packet8c plogical_shift_right(Packet8c a)
+{ return vreinterpret_s8_u8(vshr_n_u8(vreinterpret_u8_s8(a),N)); }
+template<int N> EIGEN_STRONG_INLINE Packet16c plogical_shift_right(Packet16c a)
+{ return vreinterpretq_s8_u8(vshrq_n_u8(vreinterpretq_u8_s8(a),N)); }
+template<int N> EIGEN_STRONG_INLINE Packet4uc plogical_shift_right(Packet4uc& a)
+{ return vget_lane_u32(vreinterpret_u32_s8(vshr_n_s8(vreinterpret_s8_u32(vdup_n_u32(a)), N)), 0); }
+template<int N> EIGEN_STRONG_INLINE Packet8uc plogical_shift_right(Packet8uc a) { return vshr_n_u8(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet16uc plogical_shift_right(Packet16uc a) { return vshrq_n_u8(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet4s plogical_shift_right(Packet4s a)
+{ return vreinterpret_s16_u16(vshr_n_u16(vreinterpret_u16_s16(a),N)); }
+template<int N> EIGEN_STRONG_INLINE Packet8s plogical_shift_right(Packet8s a)
+{ return vreinterpretq_s16_u16(vshrq_n_u16(vreinterpretq_u16_s16(a),N)); }
+template<int N> EIGEN_STRONG_INLINE Packet4us plogical_shift_right(Packet4us a) { return vshr_n_u16(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet8us plogical_shift_right(Packet8us a) { return vshrq_n_u16(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet2i plogical_shift_right(Packet2i a)
+{ return vreinterpret_s32_u32(vshr_n_u32(vreinterpret_u32_s32(a),N)); }
+template<int N> EIGEN_STRONG_INLINE Packet4i plogical_shift_right(Packet4i a)
+{ return vreinterpretq_s32_u32(vshrq_n_u32(vreinterpretq_u32_s32(a),N)); }
+template<int N> EIGEN_STRONG_INLINE Packet2ui plogical_shift_right(Packet2ui a) { return vshr_n_u32(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet4ui plogical_shift_right(Packet4ui a) { return vshrq_n_u32(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet2l plogical_shift_right(Packet2l a)
+{ return vreinterpretq_s64_u64(vshrq_n_u64(vreinterpretq_u64_s64(a),N)); }
+template<int N> EIGEN_STRONG_INLINE Packet2ul plogical_shift_right(Packet2ul a) { return vshrq_n_u64(a,N); }
+
+template<int N> EIGEN_STRONG_INLINE Packet4c plogical_shift_left(Packet4c& a)
+{ return vget_lane_s32(vreinterpret_s32_s8(vshl_n_s8(vreinterpret_s8_s32(vdup_n_s32(a)), N)), 0); }
+template<int N> EIGEN_STRONG_INLINE Packet8c plogical_shift_left(Packet8c a) { return vshl_n_s8(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet16c plogical_shift_left(Packet16c a) { return vshlq_n_s8(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet4uc plogical_shift_left(Packet4uc& a)
+{ return vget_lane_u32(vreinterpret_u32_u8(vshl_n_u8(vreinterpret_u8_u32(vdup_n_u32(a)), N)), 0); }
+template<int N> EIGEN_STRONG_INLINE Packet8uc plogical_shift_left(Packet8uc a) { return vshl_n_u8(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet16uc plogical_shift_left(Packet16uc a) { return vshlq_n_u8(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet4s plogical_shift_left(Packet4s a) { return vshl_n_s16(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet8s plogical_shift_left(Packet8s a) { return vshlq_n_s16(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet4us plogical_shift_left(Packet4us a) { return vshl_n_u16(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet8us plogical_shift_left(Packet8us a) { return vshlq_n_u16(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet2i plogical_shift_left(Packet2i a) { return vshl_n_s32(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet4i plogical_shift_left(Packet4i a) { return vshlq_n_s32(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet2ui plogical_shift_left(Packet2ui a) { return vshl_n_u32(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet4ui plogical_shift_left(Packet4ui a) { return vshlq_n_u32(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet2l plogical_shift_left(Packet2l a) { return vshlq_n_s64(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet2ul plogical_shift_left(Packet2ul a) { return vshlq_n_u64(a,N); }
+
+template<> EIGEN_STRONG_INLINE Packet2f pload<Packet2f>(const float* from)
+{ EIGEN_DEBUG_ALIGNED_LOAD return vld1_f32(from); }
+template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from)
+{ EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f32(from); }
+template<> EIGEN_STRONG_INLINE Packet4c pload<Packet4c>(const int8_t* from)
+{
+ Packet4c res;
+ memcpy(&res, from, sizeof(Packet4c));
+ return res;
+}
+template<> EIGEN_STRONG_INLINE Packet8c pload<Packet8c>(const int8_t* from)
+{ EIGEN_DEBUG_ALIGNED_LOAD return vld1_s8(from); }
+template<> EIGEN_STRONG_INLINE Packet16c pload<Packet16c>(const int8_t* from)
+{ EIGEN_DEBUG_ALIGNED_LOAD return vld1q_s8(from); }
+template<> EIGEN_STRONG_INLINE Packet4uc pload<Packet4uc>(const uint8_t* from)
{
- return vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
+ Packet4uc res;
+ memcpy(&res, from, sizeof(Packet4uc));
+ return res;
}
-template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return vbicq_s32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet8uc pload<Packet8uc>(const uint8_t* from)
+{ EIGEN_DEBUG_ALIGNED_LOAD return vld1_u8(from); }
+template<> EIGEN_STRONG_INLINE Packet16uc pload<Packet16uc>(const uint8_t* from)
+{ EIGEN_DEBUG_ALIGNED_LOAD return vld1q_u8(from); }
+template<> EIGEN_STRONG_INLINE Packet4s pload<Packet4s>(const int16_t* from)
+{ EIGEN_DEBUG_ALIGNED_LOAD return vld1_s16(from); }
+template<> EIGEN_STRONG_INLINE Packet8s pload<Packet8s>(const int16_t* from)
+{ EIGEN_DEBUG_ALIGNED_LOAD return vld1q_s16(from); }
+template<> EIGEN_STRONG_INLINE Packet4us pload<Packet4us>(const uint16_t* from)
+{ EIGEN_DEBUG_ALIGNED_LOAD return vld1_u16(from); }
+template<> EIGEN_STRONG_INLINE Packet8us pload<Packet8us>(const uint16_t* from)
+{ EIGEN_DEBUG_ALIGNED_LOAD return vld1q_u16(from); }
+template<> EIGEN_STRONG_INLINE Packet2i pload<Packet2i>(const int32_t* from)
+{ EIGEN_DEBUG_ALIGNED_LOAD return vld1_s32(from); }
+template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int32_t* from)
+{ EIGEN_DEBUG_ALIGNED_LOAD return vld1q_s32(from); }
+template<> EIGEN_STRONG_INLINE Packet2ui pload<Packet2ui>(const uint32_t* from)
+{ EIGEN_DEBUG_ALIGNED_LOAD return vld1_u32(from); }
+template<> EIGEN_STRONG_INLINE Packet4ui pload<Packet4ui>(const uint32_t* from)
+{ EIGEN_DEBUG_ALIGNED_LOAD return vld1q_u32(from); }
+template<> EIGEN_STRONG_INLINE Packet2l pload<Packet2l>(const int64_t* from)
+{ EIGEN_DEBUG_ALIGNED_LOAD return vld1q_s64(from); }
+template<> EIGEN_STRONG_INLINE Packet2ul pload<Packet2ul>(const uint64_t* from)
+{ EIGEN_DEBUG_ALIGNED_LOAD return vld1q_u64(from); }
-template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f32(from); }
-template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int32_t* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_s32(from); }
-
-template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f32(from); }
-template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int32_t* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_s32(from); }
+template<> EIGEN_STRONG_INLINE Packet2f ploadu<Packet2f>(const float* from)
+{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1_f32(from); }
+template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from)
+{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f32(from); }
+template<> EIGEN_STRONG_INLINE Packet4c ploadu<Packet4c>(const int8_t* from)
+{
+ Packet4c res;
+ memcpy(&res, from, sizeof(Packet4c));
+ return res;
+}
+template<> EIGEN_STRONG_INLINE Packet8c ploadu<Packet8c>(const int8_t* from)
+{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1_s8(from); }
+template<> EIGEN_STRONG_INLINE Packet16c ploadu<Packet16c>(const int8_t* from)
+{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_s8(from); }
+template<> EIGEN_STRONG_INLINE Packet4uc ploadu<Packet4uc>(const uint8_t* from)
+{
+ Packet4uc res;
+ memcpy(&res, from, sizeof(Packet4uc));
+ return res;
+}
+template<> EIGEN_STRONG_INLINE Packet8uc ploadu<Packet8uc>(const uint8_t* from)
+{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1_u8(from); }
+template<> EIGEN_STRONG_INLINE Packet16uc ploadu<Packet16uc>(const uint8_t* from)
+{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_u8(from); }
+template<> EIGEN_STRONG_INLINE Packet4s ploadu<Packet4s>(const int16_t* from)
+{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1_s16(from); }
+template<> EIGEN_STRONG_INLINE Packet8s ploadu<Packet8s>(const int16_t* from)
+{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_s16(from); }
+template<> EIGEN_STRONG_INLINE Packet4us ploadu<Packet4us>(const uint16_t* from)
+{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1_u16(from); }
+template<> EIGEN_STRONG_INLINE Packet8us ploadu<Packet8us>(const uint16_t* from)
+{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_u16(from); }
+template<> EIGEN_STRONG_INLINE Packet2i ploadu<Packet2i>(const int32_t* from)
+{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1_s32(from); }
+template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int32_t* from)
+{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_s32(from); }
+template<> EIGEN_STRONG_INLINE Packet2ui ploadu<Packet2ui>(const uint32_t* from)
+{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1_u32(from); }
+template<> EIGEN_STRONG_INLINE Packet4ui ploadu<Packet4ui>(const uint32_t* from)
+{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_u32(from); }
+template<> EIGEN_STRONG_INLINE Packet2l ploadu<Packet2l>(const int64_t* from)
+{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_s64(from); }
+template<> EIGEN_STRONG_INLINE Packet2ul ploadu<Packet2ul>(const uint64_t* from)
+{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_u64(from); }
+template<> EIGEN_STRONG_INLINE Packet2f ploaddup<Packet2f>(const float* from)
+{ return vld1_dup_f32(from); }
template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
+{ return vcombine_f32(vld1_dup_f32(from), vld1_dup_f32(from+1)); }
+template<> EIGEN_STRONG_INLINE Packet4c ploaddup<Packet4c>(const int8_t* from)
+{
+ const int8x8_t a = vreinterpret_s8_s32(vdup_n_s32(pload<Packet4c>(from)));
+ return vget_lane_s32(vreinterpret_s32_s8(vzip_s8(a,a).val[0]), 0);
+}
+template<> EIGEN_STRONG_INLINE Packet8c ploaddup<Packet8c>(const int8_t* from)
+{
+ const int8x8_t a = vld1_s8(from);
+ return vzip_s8(a,a).val[0];
+}
+template<> EIGEN_STRONG_INLINE Packet16c ploaddup<Packet16c>(const int8_t* from)
+{
+ const int8x8_t a = vld1_s8(from);
+ const int8x8x2_t b = vzip_s8(a,a);
+ return vcombine_s8(b.val[0], b.val[1]);
+}
+template<> EIGEN_STRONG_INLINE Packet4uc ploaddup<Packet4uc>(const uint8_t* from)
+{
+ const uint8x8_t a = vreinterpret_u8_u32(vdup_n_u32(pload<Packet4uc>(from)));
+ return vget_lane_u32(vreinterpret_u32_u8(vzip_u8(a,a).val[0]), 0);
+}
+template<> EIGEN_STRONG_INLINE Packet8uc ploaddup<Packet8uc>(const uint8_t* from)
{
- float32x2_t lo, hi;
- lo = vld1_dup_f32(from);
- hi = vld1_dup_f32(from+1);
- return vcombine_f32(lo, hi);
+ const uint8x8_t a = vld1_u8(from);
+ return vzip_u8(a,a).val[0];
}
+template<> EIGEN_STRONG_INLINE Packet16uc ploaddup<Packet16uc>(const uint8_t* from)
+{
+ const uint8x8_t a = vld1_u8(from);
+ const uint8x8x2_t b = vzip_u8(a,a);
+ return vcombine_u8(b.val[0], b.val[1]);
+}
+template<> EIGEN_STRONG_INLINE Packet4s ploaddup<Packet4s>(const int16_t* from)
+{
+ return vreinterpret_s16_u32(vzip_u32(vreinterpret_u32_s16(vld1_dup_s16(from)),
+ vreinterpret_u32_s16(vld1_dup_s16(from+1))).val[0]);
+}
+template<> EIGEN_STRONG_INLINE Packet8s ploaddup<Packet8s>(const int16_t* from)
+{
+ const int16x4_t a = vld1_s16(from);
+ const int16x4x2_t b = vzip_s16(a,a);
+ return vcombine_s16(b.val[0], b.val[1]);
+}
+template<> EIGEN_STRONG_INLINE Packet4us ploaddup<Packet4us>(const uint16_t* from)
+{
+ return vreinterpret_u16_u32(vzip_u32(vreinterpret_u32_u16(vld1_dup_u16(from)),
+ vreinterpret_u32_u16(vld1_dup_u16(from+1))).val[0]);
+}
+template<> EIGEN_STRONG_INLINE Packet8us ploaddup<Packet8us>(const uint16_t* from)
+{
+ const uint16x4_t a = vld1_u16(from);
+ const uint16x4x2_t b = vzip_u16(a,a);
+ return vcombine_u16(b.val[0], b.val[1]);
+}
+template<> EIGEN_STRONG_INLINE Packet2i ploaddup<Packet2i>(const int32_t* from)
+{ return vld1_dup_s32(from); }
template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int32_t* from)
+{ return vcombine_s32(vld1_dup_s32(from), vld1_dup_s32(from+1)); }
+template<> EIGEN_STRONG_INLINE Packet2ui ploaddup<Packet2ui>(const uint32_t* from)
+{ return vld1_dup_u32(from); }
+template<> EIGEN_STRONG_INLINE Packet4ui ploaddup<Packet4ui>(const uint32_t* from)
+{ return vcombine_u32(vld1_dup_u32(from), vld1_dup_u32(from+1)); }
+template<> EIGEN_STRONG_INLINE Packet2l ploaddup<Packet2l>(const int64_t* from)
+{ return vld1q_dup_s64(from); }
+template<> EIGEN_STRONG_INLINE Packet2ul ploaddup<Packet2ul>(const uint64_t* from)
+{ return vld1q_dup_u64(from); }
+
+template<> EIGEN_STRONG_INLINE Packet4f ploadquad<Packet4f>(const float* from) { return vld1q_dup_f32(from); }
+template<> EIGEN_STRONG_INLINE Packet4c ploadquad<Packet4c>(const int8_t* from)
+{ return vget_lane_s32(vreinterpret_s32_s8(vld1_dup_s8(from)), 0); }
+template<> EIGEN_STRONG_INLINE Packet8c ploadquad<Packet8c>(const int8_t* from)
{
- int32x2_t lo, hi;
- lo = vld1_dup_s32(from);
- hi = vld1_dup_s32(from+1);
- return vcombine_s32(lo, hi);
+ return vreinterpret_s8_u32(vzip_u32(
+ vreinterpret_u32_s8(vld1_dup_s8(from)),
+ vreinterpret_u32_s8(vld1_dup_s8(from+1))).val[0]);
}
+template<> EIGEN_STRONG_INLINE Packet16c ploadquad<Packet16c>(const int8_t* from)
+{
+ const int8x8_t a = vreinterpret_s8_u32(vzip_u32(
+ vreinterpret_u32_s8(vld1_dup_s8(from)),
+ vreinterpret_u32_s8(vld1_dup_s8(from+1))).val[0]);
+ const int8x8_t b = vreinterpret_s8_u32(vzip_u32(
+ vreinterpret_u32_s8(vld1_dup_s8(from+2)),
+ vreinterpret_u32_s8(vld1_dup_s8(from+3))).val[0]);
+ return vcombine_s8(a,b);
+}
+template<> EIGEN_STRONG_INLINE Packet4uc ploadquad<Packet4uc>(const uint8_t* from)
+{ return vget_lane_u32(vreinterpret_u32_u8(vld1_dup_u8(from)), 0); }
+template<> EIGEN_STRONG_INLINE Packet8uc ploadquad<Packet8uc>(const uint8_t* from)
+{
+ return vreinterpret_u8_u32(vzip_u32(
+ vreinterpret_u32_u8(vld1_dup_u8(from)),
+ vreinterpret_u32_u8(vld1_dup_u8(from+1))).val[0]);
+}
+template<> EIGEN_STRONG_INLINE Packet16uc ploadquad<Packet16uc>(const uint8_t* from)
+{
+ const uint8x8_t a = vreinterpret_u8_u32(vzip_u32(
+ vreinterpret_u32_u8(vld1_dup_u8(from)),
+ vreinterpret_u32_u8(vld1_dup_u8(from+1))).val[0]);
+ const uint8x8_t b = vreinterpret_u8_u32(vzip_u32(
+ vreinterpret_u32_u8(vld1_dup_u8(from+2)),
+ vreinterpret_u32_u8(vld1_dup_u8(from+3))).val[0]);
+ return vcombine_u8(a,b);
+}
+template<> EIGEN_STRONG_INLINE Packet8s ploadquad<Packet8s>(const int16_t* from)
+{ return vcombine_s16(vld1_dup_s16(from), vld1_dup_s16(from+1)); }
+template<> EIGEN_STRONG_INLINE Packet8us ploadquad<Packet8us>(const uint16_t* from)
+{ return vcombine_u16(vld1_dup_u16(from), vld1_dup_u16(from+1)); }
+template<> EIGEN_STRONG_INLINE Packet4i ploadquad<Packet4i>(const int32_t* from) { return vld1q_dup_s32(from); }
+template<> EIGEN_STRONG_INLINE Packet4ui ploadquad<Packet4ui>(const uint32_t* from) { return vld1q_dup_u32(from); }
-template<> EIGEN_STRONG_INLINE void pstore<float> (float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_f32(to, from); }
-template<> EIGEN_STRONG_INLINE void pstore<int32_t>(int32_t* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_s32(to, from); }
+template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet2f& from)
+{ EIGEN_DEBUG_ALIGNED_STORE vst1_f32(to,from); }
+template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from)
+{ EIGEN_DEBUG_ALIGNED_STORE vst1q_f32(to,from); }
+template<> EIGEN_STRONG_INLINE void pstore<int8_t>(int8_t* to, const Packet4c& from)
+{ memcpy(to, &from, sizeof(from)); }
+template<> EIGEN_STRONG_INLINE void pstore<int8_t>(int8_t* to, const Packet8c& from)
+{ EIGEN_DEBUG_ALIGNED_STORE vst1_s8(to,from); }
+template<> EIGEN_STRONG_INLINE void pstore<int8_t>(int8_t* to, const Packet16c& from)
+{ EIGEN_DEBUG_ALIGNED_STORE vst1q_s8(to,from); }
+template<> EIGEN_STRONG_INLINE void pstore<uint8_t>(uint8_t* to, const Packet4uc& from)
+{ memcpy(to, &from, sizeof(from)); }
+template<> EIGEN_STRONG_INLINE void pstore<uint8_t>(uint8_t* to, const Packet8uc& from)
+{ EIGEN_DEBUG_ALIGNED_STORE vst1_u8(to,from); }
+template<> EIGEN_STRONG_INLINE void pstore<uint8_t>(uint8_t* to, const Packet16uc& from)
+{ EIGEN_DEBUG_ALIGNED_STORE vst1q_u8(to,from); }
+template<> EIGEN_STRONG_INLINE void pstore<int16_t>(int16_t* to, const Packet4s& from)
+{ EIGEN_DEBUG_ALIGNED_STORE vst1_s16(to,from); }
+template<> EIGEN_STRONG_INLINE void pstore<int16_t>(int16_t* to, const Packet8s& from)
+{ EIGEN_DEBUG_ALIGNED_STORE vst1q_s16(to,from); }
+template<> EIGEN_STRONG_INLINE void pstore<uint16_t>(uint16_t* to, const Packet4us& from)
+{ EIGEN_DEBUG_ALIGNED_STORE vst1_u16(to,from); }
+template<> EIGEN_STRONG_INLINE void pstore<uint16_t>(uint16_t* to, const Packet8us& from)
+{ EIGEN_DEBUG_ALIGNED_STORE vst1q_u16(to,from); }
+template<> EIGEN_STRONG_INLINE void pstore<int32_t>(int32_t* to, const Packet2i& from)
+{ EIGEN_DEBUG_ALIGNED_STORE vst1_s32(to,from); }
+template<> EIGEN_STRONG_INLINE void pstore<int32_t>(int32_t* to, const Packet4i& from)
+{ EIGEN_DEBUG_ALIGNED_STORE vst1q_s32(to,from); }
+template<> EIGEN_STRONG_INLINE void pstore<uint32_t>(uint32_t* to, const Packet2ui& from)
+{ EIGEN_DEBUG_ALIGNED_STORE vst1_u32(to,from); }
+template<> EIGEN_STRONG_INLINE void pstore<uint32_t>(uint32_t* to, const Packet4ui& from)
+{ EIGEN_DEBUG_ALIGNED_STORE vst1q_u32(to,from); }
+template<> EIGEN_STRONG_INLINE void pstore<int64_t>(int64_t* to, const Packet2l& from)
+{ EIGEN_DEBUG_ALIGNED_STORE vst1q_s64(to,from); }
+template<> EIGEN_STRONG_INLINE void pstore<uint64_t>(uint64_t* to, const Packet2ul& from)
+{ EIGEN_DEBUG_ALIGNED_STORE vst1q_u64(to,from); }
-template<> EIGEN_STRONG_INLINE void pstoreu<float> (float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to, from); }
-template<> EIGEN_STRONG_INLINE void pstoreu<int32_t>(int32_t* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to, from); }
+template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet2f& from)
+{ EIGEN_DEBUG_UNALIGNED_STORE vst1_f32(to,from); }
+template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from)
+{ EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to,from); }
+template<> EIGEN_STRONG_INLINE void pstoreu<int8_t>(int8_t* to, const Packet4c& from)
+{ memcpy(to, &from, sizeof(from)); }
+template<> EIGEN_STRONG_INLINE void pstoreu<int8_t>(int8_t* to, const Packet8c& from)
+{ EIGEN_DEBUG_UNALIGNED_STORE vst1_s8(to,from); }
+template<> EIGEN_STRONG_INLINE void pstoreu<int8_t>(int8_t* to, const Packet16c& from)
+{ EIGEN_DEBUG_UNALIGNED_STORE vst1q_s8(to,from); }
+template<> EIGEN_STRONG_INLINE void pstoreu<uint8_t>(uint8_t* to, const Packet4uc& from)
+{ memcpy(to, &from, sizeof(from)); }
+template<> EIGEN_STRONG_INLINE void pstoreu<uint8_t>(uint8_t* to, const Packet8uc& from)
+{ EIGEN_DEBUG_UNALIGNED_STORE vst1_u8(to,from); }
+template<> EIGEN_STRONG_INLINE void pstoreu<uint8_t>(uint8_t* to, const Packet16uc& from)
+{ EIGEN_DEBUG_UNALIGNED_STORE vst1q_u8(to,from); }
+template<> EIGEN_STRONG_INLINE void pstoreu<int16_t>(int16_t* to, const Packet4s& from)
+{ EIGEN_DEBUG_UNALIGNED_STORE vst1_s16(to,from); }
+template<> EIGEN_STRONG_INLINE void pstoreu<int16_t>(int16_t* to, const Packet8s& from)
+{ EIGEN_DEBUG_UNALIGNED_STORE vst1q_s16(to,from); }
+template<> EIGEN_STRONG_INLINE void pstoreu<uint16_t>(uint16_t* to, const Packet4us& from)
+{ EIGEN_DEBUG_UNALIGNED_STORE vst1_u16(to,from); }
+template<> EIGEN_STRONG_INLINE void pstoreu<uint16_t>(uint16_t* to, const Packet8us& from)
+{ EIGEN_DEBUG_UNALIGNED_STORE vst1q_u16(to,from); }
+template<> EIGEN_STRONG_INLINE void pstoreu<int32_t>(int32_t* to, const Packet2i& from)
+{ EIGEN_DEBUG_UNALIGNED_STORE vst1_s32(to,from); }
+template<> EIGEN_STRONG_INLINE void pstoreu<int32_t>(int32_t* to, const Packet4i& from)
+{ EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to,from); }
+template<> EIGEN_STRONG_INLINE void pstoreu<uint32_t>(uint32_t* to, const Packet2ui& from)
+{ EIGEN_DEBUG_UNALIGNED_STORE vst1_u32(to,from); }
+template<> EIGEN_STRONG_INLINE void pstoreu<uint32_t>(uint32_t* to, const Packet4ui& from)
+{ EIGEN_DEBUG_UNALIGNED_STORE vst1q_u32(to,from); }
+template<> EIGEN_STRONG_INLINE void pstoreu<int64_t>(int64_t* to, const Packet2l& from)
+{ EIGEN_DEBUG_UNALIGNED_STORE vst1q_s64(to,from); }
+template<> EIGEN_STRONG_INLINE void pstoreu<uint64_t>(uint64_t* to, const Packet2ul& from)
+{ EIGEN_DEBUG_UNALIGNED_STORE vst1q_u64(to,from); }
-template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet2f pgather<float, Packet2f>(const float* from, Index stride)
{
- Packet4f res = pset1<Packet4f>(0.f);
- res = vsetq_lane_f32(from[0*stride], res, 0);
- res = vsetq_lane_f32(from[1*stride], res, 1);
- res = vsetq_lane_f32(from[2*stride], res, 2);
- res = vsetq_lane_f32(from[3*stride], res, 3);
+ Packet2f res = vld1_dup_f32(from);
+ res = vld1_lane_f32(from + 1*stride, res, 1);
return res;
}
-template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int32_t, Packet4i>(const int32_t* from, Index stride)
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4f pgather<float, Packet4f>(const float* from, Index stride)
{
- Packet4i res = pset1<Packet4i>(0);
- res = vsetq_lane_s32(from[0*stride], res, 0);
- res = vsetq_lane_s32(from[1*stride], res, 1);
- res = vsetq_lane_s32(from[2*stride], res, 2);
- res = vsetq_lane_s32(from[3*stride], res, 3);
+ Packet4f res = vld1q_dup_f32(from);
+ res = vld1q_lane_f32(from + 1*stride, res, 1);
+ res = vld1q_lane_f32(from + 2*stride, res, 2);
+ res = vld1q_lane_f32(from + 3*stride, res, 3);
return res;
}
-
-template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4c pgather<int8_t, Packet4c>(const int8_t* from, Index stride)
{
- to[stride*0] = vgetq_lane_f32(from, 0);
- to[stride*1] = vgetq_lane_f32(from, 1);
- to[stride*2] = vgetq_lane_f32(from, 2);
- to[stride*3] = vgetq_lane_f32(from, 3);
+ Packet4c res;
+ for (int i = 0; i != 4; i++)
+ reinterpret_cast<int8_t*>(&res)[i] = *(from + i * stride);
+ return res;
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet8c pgather<int8_t, Packet8c>(const int8_t* from, Index stride)
+{
+ Packet8c res = vld1_dup_s8(from);
+ res = vld1_lane_s8(from + 1*stride, res, 1);
+ res = vld1_lane_s8(from + 2*stride, res, 2);
+ res = vld1_lane_s8(from + 3*stride, res, 3);
+ res = vld1_lane_s8(from + 4*stride, res, 4);
+ res = vld1_lane_s8(from + 5*stride, res, 5);
+ res = vld1_lane_s8(from + 6*stride, res, 6);
+ res = vld1_lane_s8(from + 7*stride, res, 7);
+ return res;
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet16c pgather<int8_t, Packet16c>(const int8_t* from, Index stride)
+{
+ Packet16c res = vld1q_dup_s8(from);
+ res = vld1q_lane_s8(from + 1*stride, res, 1);
+ res = vld1q_lane_s8(from + 2*stride, res, 2);
+ res = vld1q_lane_s8(from + 3*stride, res, 3);
+ res = vld1q_lane_s8(from + 4*stride, res, 4);
+ res = vld1q_lane_s8(from + 5*stride, res, 5);
+ res = vld1q_lane_s8(from + 6*stride, res, 6);
+ res = vld1q_lane_s8(from + 7*stride, res, 7);
+ res = vld1q_lane_s8(from + 8*stride, res, 8);
+ res = vld1q_lane_s8(from + 9*stride, res, 9);
+ res = vld1q_lane_s8(from + 10*stride, res, 10);
+ res = vld1q_lane_s8(from + 11*stride, res, 11);
+ res = vld1q_lane_s8(from + 12*stride, res, 12);
+ res = vld1q_lane_s8(from + 13*stride, res, 13);
+ res = vld1q_lane_s8(from + 14*stride, res, 14);
+ res = vld1q_lane_s8(from + 15*stride, res, 15);
+ return res;
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4uc pgather<uint8_t, Packet4uc>(const uint8_t* from, Index stride)
+{
+ Packet4uc res;
+ for (int i = 0; i != 4; i++)
+ reinterpret_cast<uint8_t*>(&res)[i] = *(from + i * stride);
+ return res;
}
-template<> EIGEN_DEVICE_FUNC inline void pscatter<int32_t, Packet4i>(int32_t* to, const Packet4i& from, Index stride)
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet8uc pgather<uint8_t, Packet8uc>(const uint8_t* from, Index stride)
{
- to[stride*0] = vgetq_lane_s32(from, 0);
- to[stride*1] = vgetq_lane_s32(from, 1);
- to[stride*2] = vgetq_lane_s32(from, 2);
- to[stride*3] = vgetq_lane_s32(from, 3);
+ Packet8uc res = vld1_dup_u8(from);
+ res = vld1_lane_u8(from + 1*stride, res, 1);
+ res = vld1_lane_u8(from + 2*stride, res, 2);
+ res = vld1_lane_u8(from + 3*stride, res, 3);
+ res = vld1_lane_u8(from + 4*stride, res, 4);
+ res = vld1_lane_u8(from + 5*stride, res, 5);
+ res = vld1_lane_u8(from + 6*stride, res, 6);
+ res = vld1_lane_u8(from + 7*stride, res, 7);
+ return res;
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet16uc pgather<uint8_t, Packet16uc>(const uint8_t* from, Index stride)
+{
+ Packet16uc res = vld1q_dup_u8(from);
+ res = vld1q_lane_u8(from + 1*stride, res, 1);
+ res = vld1q_lane_u8(from + 2*stride, res, 2);
+ res = vld1q_lane_u8(from + 3*stride, res, 3);
+ res = vld1q_lane_u8(from + 4*stride, res, 4);
+ res = vld1q_lane_u8(from + 5*stride, res, 5);
+ res = vld1q_lane_u8(from + 6*stride, res, 6);
+ res = vld1q_lane_u8(from + 7*stride, res, 7);
+ res = vld1q_lane_u8(from + 8*stride, res, 8);
+ res = vld1q_lane_u8(from + 9*stride, res, 9);
+ res = vld1q_lane_u8(from + 10*stride, res, 10);
+ res = vld1q_lane_u8(from + 11*stride, res, 11);
+ res = vld1q_lane_u8(from + 12*stride, res, 12);
+ res = vld1q_lane_u8(from + 13*stride, res, 13);
+ res = vld1q_lane_u8(from + 14*stride, res, 14);
+ res = vld1q_lane_u8(from + 15*stride, res, 15);
+ return res;
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4s pgather<int16_t, Packet4s>(const int16_t* from, Index stride)
+{
+ Packet4s res = vld1_dup_s16(from);
+ res = vld1_lane_s16(from + 1*stride, res, 1);
+ res = vld1_lane_s16(from + 2*stride, res, 2);
+ res = vld1_lane_s16(from + 3*stride, res, 3);
+ return res;
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet8s pgather<int16_t, Packet8s>(const int16_t* from, Index stride)
+{
+ Packet8s res = vld1q_dup_s16(from);
+ res = vld1q_lane_s16(from + 1*stride, res, 1);
+ res = vld1q_lane_s16(from + 2*stride, res, 2);
+ res = vld1q_lane_s16(from + 3*stride, res, 3);
+ res = vld1q_lane_s16(from + 4*stride, res, 4);
+ res = vld1q_lane_s16(from + 5*stride, res, 5);
+ res = vld1q_lane_s16(from + 6*stride, res, 6);
+ res = vld1q_lane_s16(from + 7*stride, res, 7);
+ return res;
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4us pgather<uint16_t, Packet4us>(const uint16_t* from, Index stride)
+{
+ Packet4us res = vld1_dup_u16(from);
+ res = vld1_lane_u16(from + 1*stride, res, 1);
+ res = vld1_lane_u16(from + 2*stride, res, 2);
+ res = vld1_lane_u16(from + 3*stride, res, 3);
+ return res;
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet8us pgather<uint16_t, Packet8us>(const uint16_t* from, Index stride)
+{
+ Packet8us res = vld1q_dup_u16(from);
+ res = vld1q_lane_u16(from + 1*stride, res, 1);
+ res = vld1q_lane_u16(from + 2*stride, res, 2);
+ res = vld1q_lane_u16(from + 3*stride, res, 3);
+ res = vld1q_lane_u16(from + 4*stride, res, 4);
+ res = vld1q_lane_u16(from + 5*stride, res, 5);
+ res = vld1q_lane_u16(from + 6*stride, res, 6);
+ res = vld1q_lane_u16(from + 7*stride, res, 7);
+ return res;
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet2i pgather<int32_t, Packet2i>(const int32_t* from, Index stride)
+{
+ Packet2i res = vld1_dup_s32(from);
+ res = vld1_lane_s32(from + 1*stride, res, 1);
+ return res;
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4i pgather<int32_t, Packet4i>(const int32_t* from, Index stride)
+{
+ Packet4i res = vld1q_dup_s32(from);
+ res = vld1q_lane_s32(from + 1*stride, res, 1);
+ res = vld1q_lane_s32(from + 2*stride, res, 2);
+ res = vld1q_lane_s32(from + 3*stride, res, 3);
+ return res;
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet2ui pgather<uint32_t, Packet2ui>(const uint32_t* from, Index stride)
+{
+ Packet2ui res = vld1_dup_u32(from);
+ res = vld1_lane_u32(from + 1*stride, res, 1);
+ return res;
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4ui pgather<uint32_t, Packet4ui>(const uint32_t* from, Index stride)
+{
+ Packet4ui res = vld1q_dup_u32(from);
+ res = vld1q_lane_u32(from + 1*stride, res, 1);
+ res = vld1q_lane_u32(from + 2*stride, res, 2);
+ res = vld1q_lane_u32(from + 3*stride, res, 3);
+ return res;
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet2l pgather<int64_t, Packet2l>(const int64_t* from, Index stride)
+{
+ Packet2l res = vld1q_dup_s64(from);
+ res = vld1q_lane_s64(from + 1*stride, res, 1);
+ return res;
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet2ul pgather<uint64_t, Packet2ul>(const uint64_t* from, Index stride)
+{
+ Packet2ul res = vld1q_dup_u64(from);
+ res = vld1q_lane_u64(from + 1*stride, res, 1);
+ return res;
}
-template<> EIGEN_STRONG_INLINE void prefetch<float> (const float* addr) { EIGEN_ARM_PREFETCH(addr); }
-template<> EIGEN_STRONG_INLINE void prefetch<int32_t>(const int32_t* addr) { EIGEN_ARM_PREFETCH(addr); }
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter<float, Packet2f>(float* to, const Packet2f& from, Index stride)
+{
+ vst1_lane_f32(to + stride*0, from, 0);
+ vst1_lane_f32(to + stride*1, from, 1);
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)
+{
+ vst1q_lane_f32(to + stride*0, from, 0);
+ vst1q_lane_f32(to + stride*1, from, 1);
+ vst1q_lane_f32(to + stride*2, from, 2);
+ vst1q_lane_f32(to + stride*3, from, 3);
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter<int8_t, Packet4c>(int8_t* to, const Packet4c& from, Index stride)
+{
+ for (int i = 0; i != 4; i++)
+ *(to + i * stride) = reinterpret_cast<const int8_t*>(&from)[i];
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter<int8_t, Packet8c>(int8_t* to, const Packet8c& from, Index stride)
+{
+ vst1_lane_s8(to + stride*0, from, 0);
+ vst1_lane_s8(to + stride*1, from, 1);
+ vst1_lane_s8(to + stride*2, from, 2);
+ vst1_lane_s8(to + stride*3, from, 3);
+ vst1_lane_s8(to + stride*4, from, 4);
+ vst1_lane_s8(to + stride*5, from, 5);
+ vst1_lane_s8(to + stride*6, from, 6);
+ vst1_lane_s8(to + stride*7, from, 7);
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter<int8_t, Packet16c>(int8_t* to, const Packet16c& from, Index stride)
+{
+ vst1q_lane_s8(to + stride*0, from, 0);
+ vst1q_lane_s8(to + stride*1, from, 1);
+ vst1q_lane_s8(to + stride*2, from, 2);
+ vst1q_lane_s8(to + stride*3, from, 3);
+ vst1q_lane_s8(to + stride*4, from, 4);
+ vst1q_lane_s8(to + stride*5, from, 5);
+ vst1q_lane_s8(to + stride*6, from, 6);
+ vst1q_lane_s8(to + stride*7, from, 7);
+ vst1q_lane_s8(to + stride*8, from, 8);
+ vst1q_lane_s8(to + stride*9, from, 9);
+ vst1q_lane_s8(to + stride*10, from, 10);
+ vst1q_lane_s8(to + stride*11, from, 11);
+ vst1q_lane_s8(to + stride*12, from, 12);
+ vst1q_lane_s8(to + stride*13, from, 13);
+ vst1q_lane_s8(to + stride*14, from, 14);
+ vst1q_lane_s8(to + stride*15, from, 15);
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter<uint8_t, Packet4uc>(uint8_t* to, const Packet4uc& from, Index stride)
+{
+ for (int i = 0; i != 4; i++)
+ *(to + i * stride) = reinterpret_cast<const uint8_t*>(&from)[i];
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter<uint8_t, Packet8uc>(uint8_t* to, const Packet8uc& from, Index stride)
+{
+ vst1_lane_u8(to + stride*0, from, 0);
+ vst1_lane_u8(to + stride*1, from, 1);
+ vst1_lane_u8(to + stride*2, from, 2);
+ vst1_lane_u8(to + stride*3, from, 3);
+ vst1_lane_u8(to + stride*4, from, 4);
+ vst1_lane_u8(to + stride*5, from, 5);
+ vst1_lane_u8(to + stride*6, from, 6);
+ vst1_lane_u8(to + stride*7, from, 7);
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter<uint8_t, Packet16uc>(uint8_t* to, const Packet16uc& from, Index stride)
+{
+ vst1q_lane_u8(to + stride*0, from, 0);
+ vst1q_lane_u8(to + stride*1, from, 1);
+ vst1q_lane_u8(to + stride*2, from, 2);
+ vst1q_lane_u8(to + stride*3, from, 3);
+ vst1q_lane_u8(to + stride*4, from, 4);
+ vst1q_lane_u8(to + stride*5, from, 5);
+ vst1q_lane_u8(to + stride*6, from, 6);
+ vst1q_lane_u8(to + stride*7, from, 7);
+ vst1q_lane_u8(to + stride*8, from, 8);
+ vst1q_lane_u8(to + stride*9, from, 9);
+ vst1q_lane_u8(to + stride*10, from, 10);
+ vst1q_lane_u8(to + stride*11, from, 11);
+ vst1q_lane_u8(to + stride*12, from, 12);
+ vst1q_lane_u8(to + stride*13, from, 13);
+ vst1q_lane_u8(to + stride*14, from, 14);
+ vst1q_lane_u8(to + stride*15, from, 15);
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter<int16_t, Packet4s>(int16_t* to, const Packet4s& from, Index stride)
+{
+ vst1_lane_s16(to + stride*0, from, 0);
+ vst1_lane_s16(to + stride*1, from, 1);
+ vst1_lane_s16(to + stride*2, from, 2);
+ vst1_lane_s16(to + stride*3, from, 3);
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter<int16_t, Packet8s>(int16_t* to, const Packet8s& from, Index stride)
+{
+ vst1q_lane_s16(to + stride*0, from, 0);
+ vst1q_lane_s16(to + stride*1, from, 1);
+ vst1q_lane_s16(to + stride*2, from, 2);
+ vst1q_lane_s16(to + stride*3, from, 3);
+ vst1q_lane_s16(to + stride*4, from, 4);
+ vst1q_lane_s16(to + stride*5, from, 5);
+ vst1q_lane_s16(to + stride*6, from, 6);
+ vst1q_lane_s16(to + stride*7, from, 7);
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter<uint16_t, Packet4us>(uint16_t* to, const Packet4us& from, Index stride)
+{
+ vst1_lane_u16(to + stride*0, from, 0);
+ vst1_lane_u16(to + stride*1, from, 1);
+ vst1_lane_u16(to + stride*2, from, 2);
+ vst1_lane_u16(to + stride*3, from, 3);
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter<uint16_t, Packet8us>(uint16_t* to, const Packet8us& from, Index stride)
+{
+ vst1q_lane_u16(to + stride*0, from, 0);
+ vst1q_lane_u16(to + stride*1, from, 1);
+ vst1q_lane_u16(to + stride*2, from, 2);
+ vst1q_lane_u16(to + stride*3, from, 3);
+ vst1q_lane_u16(to + stride*4, from, 4);
+ vst1q_lane_u16(to + stride*5, from, 5);
+ vst1q_lane_u16(to + stride*6, from, 6);
+ vst1q_lane_u16(to + stride*7, from, 7);
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter<int32_t, Packet2i>(int32_t* to, const Packet2i& from, Index stride)
+{
+ vst1_lane_s32(to + stride*0, from, 0);
+ vst1_lane_s32(to + stride*1, from, 1);
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter<int32_t, Packet4i>(int32_t* to, const Packet4i& from, Index stride)
+{
+ vst1q_lane_s32(to + stride*0, from, 0);
+ vst1q_lane_s32(to + stride*1, from, 1);
+ vst1q_lane_s32(to + stride*2, from, 2);
+ vst1q_lane_s32(to + stride*3, from, 3);
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter<uint32_t, Packet2ui>(uint32_t* to, const Packet2ui& from, Index stride)
+{
+ vst1_lane_u32(to + stride*0, from, 0);
+ vst1_lane_u32(to + stride*1, from, 1);
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter<uint32_t, Packet4ui>(uint32_t* to, const Packet4ui& from, Index stride)
+{
+ vst1q_lane_u32(to + stride*0, from, 0);
+ vst1q_lane_u32(to + stride*1, from, 1);
+ vst1q_lane_u32(to + stride*2, from, 2);
+ vst1q_lane_u32(to + stride*3, from, 3);
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter<int64_t, Packet2l>(int64_t* to, const Packet2l& from, Index stride)
+{
+ vst1q_lane_s64(to + stride*0, from, 0);
+ vst1q_lane_s64(to + stride*1, from, 1);
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter<uint64_t, Packet2ul>(uint64_t* to, const Packet2ul& from, Index stride)
+{
+ vst1q_lane_u64(to + stride*0, from, 0);
+ vst1q_lane_u64(to + stride*1, from, 1);
+}
-// FIXME only store the 2 first elements ?
-template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { float EIGEN_ALIGN16 x[4]; vst1q_f32(x, a); return x[0]; }
-template<> EIGEN_STRONG_INLINE int32_t pfirst<Packet4i>(const Packet4i& a) { int32_t EIGEN_ALIGN16 x[4]; vst1q_s32(x, a); return x[0]; }
+template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { EIGEN_ARM_PREFETCH(addr); }
+template<> EIGEN_STRONG_INLINE void prefetch<int8_t>(const int8_t* addr) { EIGEN_ARM_PREFETCH(addr); }
+template<> EIGEN_STRONG_INLINE void prefetch<uint8_t>(const uint8_t* addr) { EIGEN_ARM_PREFETCH(addr); }
+template<> EIGEN_STRONG_INLINE void prefetch<int16_t>(const int16_t* addr) { EIGEN_ARM_PREFETCH(addr); }
+template<> EIGEN_STRONG_INLINE void prefetch<uint16_t>(const uint16_t* addr) { EIGEN_ARM_PREFETCH(addr); }
+template<> EIGEN_STRONG_INLINE void prefetch<int32_t>(const int32_t* addr) { EIGEN_ARM_PREFETCH(addr); }
+template<> EIGEN_STRONG_INLINE void prefetch<uint32_t>(const uint32_t* addr) { EIGEN_ARM_PREFETCH(addr); }
+template<> EIGEN_STRONG_INLINE void prefetch<int64_t>(const int64_t* addr) { EIGEN_ARM_PREFETCH(addr); }
+template<> EIGEN_STRONG_INLINE void prefetch<uint64_t>(const uint64_t* addr) { EIGEN_ARM_PREFETCH(addr); }
-template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) {
- float32x2_t a_lo, a_hi;
- Packet4f a_r64;
+template<> EIGEN_STRONG_INLINE float pfirst<Packet2f>(const Packet2f& a) { return vget_lane_f32(a,0); }
+template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { return vgetq_lane_f32(a,0); }
+template<> EIGEN_STRONG_INLINE int8_t pfirst<Packet4c>(const Packet4c& a) { return static_cast<int8_t>(a & 0xff); }
+template<> EIGEN_STRONG_INLINE int8_t pfirst<Packet8c>(const Packet8c& a) { return vget_lane_s8(a,0); }
+template<> EIGEN_STRONG_INLINE int8_t pfirst<Packet16c>(const Packet16c& a) { return vgetq_lane_s8(a,0); }
+template<> EIGEN_STRONG_INLINE uint8_t pfirst<Packet4uc>(const Packet4uc& a) { return static_cast<uint8_t>(a & 0xff); }
+template<> EIGEN_STRONG_INLINE uint8_t pfirst<Packet8uc>(const Packet8uc& a) { return vget_lane_u8(a,0); }
+template<> EIGEN_STRONG_INLINE uint8_t pfirst<Packet16uc>(const Packet16uc& a) { return vgetq_lane_u8(a,0); }
+template<> EIGEN_STRONG_INLINE int16_t pfirst<Packet4s>(const Packet4s& a) { return vget_lane_s16(a,0); }
+template<> EIGEN_STRONG_INLINE int16_t pfirst<Packet8s>(const Packet8s& a) { return vgetq_lane_s16(a,0); }
+template<> EIGEN_STRONG_INLINE uint16_t pfirst<Packet4us>(const Packet4us& a) { return vget_lane_u16(a,0); }
+template<> EIGEN_STRONG_INLINE uint16_t pfirst<Packet8us>(const Packet8us& a) { return vgetq_lane_u16(a,0); }
+template<> EIGEN_STRONG_INLINE int32_t pfirst<Packet2i>(const Packet2i& a) { return vget_lane_s32(a,0); }
+template<> EIGEN_STRONG_INLINE int32_t pfirst<Packet4i>(const Packet4i& a) { return vgetq_lane_s32(a,0); }
+template<> EIGEN_STRONG_INLINE uint32_t pfirst<Packet2ui>(const Packet2ui& a) { return vget_lane_u32(a,0); }
+template<> EIGEN_STRONG_INLINE uint32_t pfirst<Packet4ui>(const Packet4ui& a) { return vgetq_lane_u32(a,0); }
+template<> EIGEN_STRONG_INLINE int64_t pfirst<Packet2l>(const Packet2l& a) { return vgetq_lane_s64(a,0); }
+template<> EIGEN_STRONG_INLINE uint64_t pfirst<Packet2ul>(const Packet2ul& a) { return vgetq_lane_u64(a,0); }
- a_r64 = vrev64q_f32(a);
- a_lo = vget_low_f32(a_r64);
- a_hi = vget_high_f32(a_r64);
- return vcombine_f32(a_hi, a_lo);
+template<> EIGEN_STRONG_INLINE Packet2f preverse(const Packet2f& a) { return vrev64_f32(a); }
+template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)
+{
+ const float32x4_t a_r64 = vrev64q_f32(a);
+ return vcombine_f32(vget_high_f32(a_r64), vget_low_f32(a_r64));
}
-template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) {
- int32x2_t a_lo, a_hi;
- Packet4i a_r64;
-
- a_r64 = vrev64q_s32(a);
- a_lo = vget_low_s32(a_r64);
- a_hi = vget_high_s32(a_r64);
- return vcombine_s32(a_hi, a_lo);
+template<> EIGEN_STRONG_INLINE Packet4c preverse(const Packet4c& a)
+{ return vget_lane_s32(vreinterpret_s32_s8(vrev64_s8(vreinterpret_s8_s32(vdup_n_s32(a)))), 0); }
+template<> EIGEN_STRONG_INLINE Packet8c preverse(const Packet8c& a) { return vrev64_s8(a); }
+template<> EIGEN_STRONG_INLINE Packet16c preverse(const Packet16c& a)
+{
+ const int8x16_t a_r64 = vrev64q_s8(a);
+ return vcombine_s8(vget_high_s8(a_r64), vget_low_s8(a_r64));
}
+template<> EIGEN_STRONG_INLINE Packet4uc preverse(const Packet4uc& a)
+{ return vget_lane_u32(vreinterpret_u32_u8(vrev64_u8(vreinterpret_u8_u32(vdup_n_u32(a)))), 0); }
+template<> EIGEN_STRONG_INLINE Packet8uc preverse(const Packet8uc& a) { return vrev64_u8(a); }
+template<> EIGEN_STRONG_INLINE Packet16uc preverse(const Packet16uc& a)
+{
+ const uint8x16_t a_r64 = vrev64q_u8(a);
+ return vcombine_u8(vget_high_u8(a_r64), vget_low_u8(a_r64));
+}
+template<> EIGEN_STRONG_INLINE Packet4s preverse(const Packet4s& a) { return vrev64_s16(a); }
+template<> EIGEN_STRONG_INLINE Packet8s preverse(const Packet8s& a)
+{
+ const int16x8_t a_r64 = vrev64q_s16(a);
+ return vcombine_s16(vget_high_s16(a_r64), vget_low_s16(a_r64));
+}
+template<> EIGEN_STRONG_INLINE Packet4us preverse(const Packet4us& a) { return vrev64_u16(a); }
+template<> EIGEN_STRONG_INLINE Packet8us preverse(const Packet8us& a)
+{
+ const uint16x8_t a_r64 = vrev64q_u16(a);
+ return vcombine_u16(vget_high_u16(a_r64), vget_low_u16(a_r64));
+}
+template<> EIGEN_STRONG_INLINE Packet2i preverse(const Packet2i& a) { return vrev64_s32(a); }
+template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a)
+{
+ const int32x4_t a_r64 = vrev64q_s32(a);
+ return vcombine_s32(vget_high_s32(a_r64), vget_low_s32(a_r64));
+}
+template<> EIGEN_STRONG_INLINE Packet2ui preverse(const Packet2ui& a) { return vrev64_u32(a); }
+template<> EIGEN_STRONG_INLINE Packet4ui preverse(const Packet4ui& a)
+{
+ const uint32x4_t a_r64 = vrev64q_u32(a);
+ return vcombine_u32(vget_high_u32(a_r64), vget_low_u32(a_r64));
+}
+template<> EIGEN_STRONG_INLINE Packet2l preverse(const Packet2l& a)
+{ return vcombine_s64(vget_high_s64(a), vget_low_s64(a)); }
+template<> EIGEN_STRONG_INLINE Packet2ul preverse(const Packet2ul& a)
+{ return vcombine_u64(vget_high_u64(a), vget_low_u64(a)); }
+template<> EIGEN_STRONG_INLINE Packet2f pabs(const Packet2f& a) { return vabs_f32(a); }
template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { return vabsq_f32(a); }
+template<> EIGEN_STRONG_INLINE Packet4c pabs<Packet4c>(const Packet4c& a)
+{ return vget_lane_s32(vreinterpret_s32_s8(vabs_s8(vreinterpret_s8_s32(vdup_n_s32(a)))), 0); }
+template<> EIGEN_STRONG_INLINE Packet8c pabs(const Packet8c& a) { return vabs_s8(a); }
+template<> EIGEN_STRONG_INLINE Packet16c pabs(const Packet16c& a) { return vabsq_s8(a); }
+template<> EIGEN_STRONG_INLINE Packet4uc pabs(const Packet4uc& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet8uc pabs(const Packet8uc& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet16uc pabs(const Packet16uc& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet4s pabs(const Packet4s& a) { return vabs_s16(a); }
+template<> EIGEN_STRONG_INLINE Packet8s pabs(const Packet8s& a) { return vabsq_s16(a); }
+template<> EIGEN_STRONG_INLINE Packet4us pabs(const Packet4us& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet8us pabs(const Packet8us& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet2i pabs(const Packet2i& a) { return vabs_s32(a); }
template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vabsq_s32(a); }
+template<> EIGEN_STRONG_INLINE Packet2ui pabs(const Packet2ui& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet4ui pabs(const Packet4ui& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet2l pabs(const Packet2l& a) {
+#if EIGEN_ARCH_ARM64
+ return vabsq_s64(a);
+#else
+ return vcombine_s64(
+ vdup_n_s64((std::abs)(vgetq_lane_s64(a, 0))),
+ vdup_n_s64((std::abs)(vgetq_lane_s64(a, 1))));
+#endif
+}
+template<> EIGEN_STRONG_INLINE Packet2ul pabs(const Packet2ul& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet2f pfrexp<Packet2f>(const Packet2f& a, Packet2f& exponent)
+{ return pfrexp_generic(a,exponent); }
+template<> EIGEN_STRONG_INLINE Packet4f pfrexp<Packet4f>(const Packet4f& a, Packet4f& exponent)
+{ return pfrexp_generic(a,exponent); }
+
+template<> EIGEN_STRONG_INLINE Packet2f pldexp<Packet2f>(const Packet2f& a, const Packet2f& exponent)
+{ return pldexp_generic(a,exponent); }
+template<> EIGEN_STRONG_INLINE Packet4f pldexp<Packet4f>(const Packet4f& a, const Packet4f& exponent)
+{ return pldexp_generic(a,exponent); }
+
+template<> EIGEN_STRONG_INLINE float predux<Packet2f>(const Packet2f& a) { return vget_lane_f32(vpadd_f32(a,a), 0); }
template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
{
- float32x2_t a_lo, a_hi, sum;
+ const float32x2_t sum = vadd_f32(vget_low_f32(a), vget_high_f32(a));
+ return vget_lane_f32(vpadd_f32(sum, sum), 0);
+}
+template<> EIGEN_STRONG_INLINE int8_t predux<Packet4c>(const Packet4c& a)
+{
+ const int8x8_t a_dup = vreinterpret_s8_s32(vdup_n_s32(a));
+ int8x8_t sum = vpadd_s8(a_dup, a_dup);
+ sum = vpadd_s8(sum, sum);
+ return vget_lane_s8(sum, 0);
+}
+template<> EIGEN_STRONG_INLINE int8_t predux<Packet8c>(const Packet8c& a)
+{
+ int8x8_t sum = vpadd_s8(a,a);
+ sum = vpadd_s8(sum, sum);
+ sum = vpadd_s8(sum, sum);
+ return vget_lane_s8(sum, 0);
+}
+template<> EIGEN_STRONG_INLINE int8_t predux<Packet16c>(const Packet16c& a)
+{
+ int8x8_t sum = vadd_s8(vget_low_s8(a), vget_high_s8(a));
+ sum = vpadd_s8(sum, sum);
+ sum = vpadd_s8(sum, sum);
+ sum = vpadd_s8(sum, sum);
+ return vget_lane_s8(sum, 0);
+}
+template<> EIGEN_STRONG_INLINE uint8_t predux<Packet4uc>(const Packet4uc& a)
+{
+ const uint8x8_t a_dup = vreinterpret_u8_u32(vdup_n_u32(a));
+ uint8x8_t sum = vpadd_u8(a_dup, a_dup);
+ sum = vpadd_u8(sum, sum);
+ return vget_lane_u8(sum, 0);
+}
+template<> EIGEN_STRONG_INLINE uint8_t predux<Packet8uc>(const Packet8uc& a)
+{
+ uint8x8_t sum = vpadd_u8(a,a);
+ sum = vpadd_u8(sum, sum);
+ sum = vpadd_u8(sum, sum);
+ return vget_lane_u8(sum, 0);
+}
+template<> EIGEN_STRONG_INLINE uint8_t predux<Packet16uc>(const Packet16uc& a)
+{
+ uint8x8_t sum = vadd_u8(vget_low_u8(a), vget_high_u8(a));
+ sum = vpadd_u8(sum, sum);
+ sum = vpadd_u8(sum, sum);
+ sum = vpadd_u8(sum, sum);
+ return vget_lane_u8(sum, 0);
+}
+template<> EIGEN_STRONG_INLINE int16_t predux<Packet4s>(const Packet4s& a)
+{
+ const int16x4_t sum = vpadd_s16(a,a);
+ return vget_lane_s16(vpadd_s16(sum, sum), 0);
+}
+template<> EIGEN_STRONG_INLINE int16_t predux<Packet8s>(const Packet8s& a)
+{
+ int16x4_t sum = vadd_s16(vget_low_s16(a), vget_high_s16(a));
+ sum = vpadd_s16(sum, sum);
+ sum = vpadd_s16(sum, sum);
+ return vget_lane_s16(sum, 0);
+}
+template<> EIGEN_STRONG_INLINE uint16_t predux<Packet4us>(const Packet4us& a)
+{
+ const uint16x4_t sum = vpadd_u16(a,a);
+ return vget_lane_u16(vpadd_u16(sum, sum), 0);
+}
+template<> EIGEN_STRONG_INLINE uint16_t predux<Packet8us>(const Packet8us& a)
+{
+ uint16x4_t sum = vadd_u16(vget_low_u16(a), vget_high_u16(a));
+ sum = vpadd_u16(sum, sum);
+ sum = vpadd_u16(sum, sum);
+ return vget_lane_u16(sum, 0);
+}
+template<> EIGEN_STRONG_INLINE int32_t predux<Packet2i>(const Packet2i& a) { return vget_lane_s32(vpadd_s32(a,a), 0); }
+template<> EIGEN_STRONG_INLINE int32_t predux<Packet4i>(const Packet4i& a)
+{
+ const int32x2_t sum = vadd_s32(vget_low_s32(a), vget_high_s32(a));
+ return vget_lane_s32(vpadd_s32(sum, sum), 0);
+}
+template<> EIGEN_STRONG_INLINE uint32_t predux<Packet2ui>(const Packet2ui& a) { return vget_lane_u32(vpadd_u32(a,a), 0); }
+template<> EIGEN_STRONG_INLINE uint32_t predux<Packet4ui>(const Packet4ui& a)
+{
+ const uint32x2_t sum = vadd_u32(vget_low_u32(a), vget_high_u32(a));
+ return vget_lane_u32(vpadd_u32(sum, sum), 0);
+}
+template<> EIGEN_STRONG_INLINE int64_t predux<Packet2l>(const Packet2l& a)
+{ return vgetq_lane_s64(a, 0) + vgetq_lane_s64(a, 1); }
+template<> EIGEN_STRONG_INLINE uint64_t predux<Packet2ul>(const Packet2ul& a)
+{ return vgetq_lane_u64(a, 0) + vgetq_lane_u64(a, 1); }
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4c predux_half_dowto4(const Packet8c& a)
+{
+ return vget_lane_s32(vreinterpret_s32_s8(vadd_s8(a,
+ vreinterpret_s8_s32(vrev64_s32(vreinterpret_s32_s8(a))))), 0);
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet8c predux_half_dowto4(const Packet16c& a)
+{ return vadd_s8(vget_high_s8(a), vget_low_s8(a)); }
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4uc predux_half_dowto4(const Packet8uc& a)
+{
+ return vget_lane_u32(vreinterpret_u32_u8(vadd_u8(a,
+ vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(a))))), 0);
+}
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet8uc predux_half_dowto4(const Packet16uc& a)
+{ return vadd_u8(vget_high_u8(a), vget_low_u8(a)); }
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4s predux_half_dowto4(const Packet8s& a)
+{ return vadd_s16(vget_high_s16(a), vget_low_s16(a)); }
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4us predux_half_dowto4(const Packet8us& a)
+{ return vadd_u16(vget_high_u16(a), vget_low_u16(a)); }
+
+// Other reduction functions:
+// mul
+template<> EIGEN_STRONG_INLINE float predux_mul<Packet2f>(const Packet2f& a)
+{ return vget_lane_f32(a, 0) * vget_lane_f32(a, 1); }
+template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
+{ return predux_mul(vmul_f32(vget_low_f32(a), vget_high_f32(a))); }
+template<> EIGEN_STRONG_INLINE int8_t predux_mul<Packet4c>(const Packet4c& a)
+{
+ int8x8_t prod = vreinterpret_s8_s32(vdup_n_s32(a));
+ prod = vmul_s8(prod, vrev16_s8(prod));
+ return vget_lane_s8(prod, 0) * vget_lane_s8(prod, 2);
+}
+template<> EIGEN_STRONG_INLINE int8_t predux_mul<Packet8c>(const Packet8c& a)
+{
+ int8x8_t prod = vmul_s8(a, vrev16_s8(a));
+ prod = vmul_s8(prod, vrev32_s8(prod));
+ return vget_lane_s8(prod, 0) * vget_lane_s8(prod, 4);
+}
+template<> EIGEN_STRONG_INLINE int8_t predux_mul<Packet16c>(const Packet16c& a)
+{ return predux_mul(vmul_s8(vget_low_s8(a), vget_high_s8(a))); }
+template<> EIGEN_STRONG_INLINE uint8_t predux_mul<Packet4uc>(const Packet4uc& a)
+{
+ uint8x8_t prod = vreinterpret_u8_u32(vdup_n_u32(a));
+ prod = vmul_u8(prod, vrev16_u8(prod));
+ return vget_lane_u8(prod, 0) * vget_lane_u8(prod, 2);
+}
+template<> EIGEN_STRONG_INLINE uint8_t predux_mul<Packet8uc>(const Packet8uc& a)
+{
+ uint8x8_t prod = vmul_u8(a, vrev16_u8(a));
+ prod = vmul_u8(prod, vrev32_u8(prod));
+ return vget_lane_u8(prod, 0) * vget_lane_u8(prod, 4);
+}
+template<> EIGEN_STRONG_INLINE uint8_t predux_mul<Packet16uc>(const Packet16uc& a)
+{ return predux_mul(vmul_u8(vget_low_u8(a), vget_high_u8(a))); }
+template<> EIGEN_STRONG_INLINE int16_t predux_mul<Packet4s>(const Packet4s& a)
+{
+ const int16x4_t prod = vmul_s16(a, vrev32_s16(a));
+ return vget_lane_s16(prod, 0) * vget_lane_s16(prod, 2);
+}
+template<> EIGEN_STRONG_INLINE int16_t predux_mul<Packet8s>(const Packet8s& a)
+{
+ int16x4_t prod;
+
+ // Get the product of a_lo * a_hi -> |a1*a5|a2*a6|a3*a7|a4*a8|
+ prod = vmul_s16(vget_low_s16(a), vget_high_s16(a));
+ // Swap and multiply |a1*a5*a2*a6|a3*a7*a4*a8|
+ prod = vmul_s16(prod, vrev32_s16(prod));
+ // Multiply |a1*a5*a2*a6*a3*a7*a4*a8|
+ return vget_lane_s16(prod, 0) * vget_lane_s16(prod, 2);
+}
+template<> EIGEN_STRONG_INLINE uint16_t predux_mul<Packet4us>(const Packet4us& a)
+{
+ const uint16x4_t prod = vmul_u16(a, vrev32_u16(a));
+ return vget_lane_u16(prod, 0) * vget_lane_u16(prod, 2);
+}
+template<> EIGEN_STRONG_INLINE uint16_t predux_mul<Packet8us>(const Packet8us& a)
+{
+ uint16x4_t prod;
+
+ // Get the product of a_lo * a_hi -> |a1*a5|a2*a6|a3*a7|a4*a8|
+ prod = vmul_u16(vget_low_u16(a), vget_high_u16(a));
+ // Swap and multiply |a1*a5*a2*a6|a3*a7*a4*a8|
+ prod = vmul_u16(prod, vrev32_u16(prod));
+ // Multiply |a1*a5*a2*a6*a3*a7*a4*a8|
+ return vget_lane_u16(prod, 0) * vget_lane_u16(prod, 2);
+}
+template<> EIGEN_STRONG_INLINE int32_t predux_mul<Packet2i>(const Packet2i& a)
+{ return vget_lane_s32(a, 0) * vget_lane_s32(a, 1); }
+template<> EIGEN_STRONG_INLINE int32_t predux_mul<Packet4i>(const Packet4i& a)
+{ return predux_mul(vmul_s32(vget_low_s32(a), vget_high_s32(a))); }
+template<> EIGEN_STRONG_INLINE uint32_t predux_mul<Packet2ui>(const Packet2ui& a)
+{ return vget_lane_u32(a, 0) * vget_lane_u32(a, 1); }
+template<> EIGEN_STRONG_INLINE uint32_t predux_mul<Packet4ui>(const Packet4ui& a)
+{ return predux_mul(vmul_u32(vget_low_u32(a), vget_high_u32(a))); }
+template<> EIGEN_STRONG_INLINE int64_t predux_mul<Packet2l>(const Packet2l& a)
+{ return vgetq_lane_s64(a, 0) * vgetq_lane_s64(a, 1); }
+template<> EIGEN_STRONG_INLINE uint64_t predux_mul<Packet2ul>(const Packet2ul& a)
+{ return vgetq_lane_u64(a, 0) * vgetq_lane_u64(a, 1); }
+
+// min
+template<> EIGEN_STRONG_INLINE float predux_min<Packet2f>(const Packet2f& a)
+{ return vget_lane_f32(vpmin_f32(a,a), 0); }
+template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
+{
+ const float32x2_t min = vmin_f32(vget_low_f32(a), vget_high_f32(a));
+ return vget_lane_f32(vpmin_f32(min, min), 0);
+}
+template<> EIGEN_STRONG_INLINE int8_t predux_min<Packet4c>(const Packet4c& a)
+{
+ const int8x8_t a_dup = vreinterpret_s8_s32(vdup_n_s32(a));
+ int8x8_t min = vpmin_s8(a_dup, a_dup);
+ min = vpmin_s8(min, min);
+ return vget_lane_s8(min, 0);
+}
+template<> EIGEN_STRONG_INLINE int8_t predux_min<Packet8c>(const Packet8c& a)
+{
+ int8x8_t min = vpmin_s8(a,a);
+ min = vpmin_s8(min, min);
+ min = vpmin_s8(min, min);
+ return vget_lane_s8(min, 0);
+}
+template<> EIGEN_STRONG_INLINE int8_t predux_min<Packet16c>(const Packet16c& a)
+{
+ int8x8_t min = vmin_s8(vget_low_s8(a), vget_high_s8(a));
+ min = vpmin_s8(min, min);
+ min = vpmin_s8(min, min);
+ min = vpmin_s8(min, min);
+ return vget_lane_s8(min, 0);
+}
+template<> EIGEN_STRONG_INLINE uint8_t predux_min<Packet4uc>(const Packet4uc& a)
+{
+ const uint8x8_t a_dup = vreinterpret_u8_u32(vdup_n_u32(a));
+ uint8x8_t min = vpmin_u8(a_dup, a_dup);
+ min = vpmin_u8(min, min);
+ return vget_lane_u8(min, 0);
+}
+template<> EIGEN_STRONG_INLINE uint8_t predux_min<Packet8uc>(const Packet8uc& a)
+{
+ uint8x8_t min = vpmin_u8(a,a);
+ min = vpmin_u8(min, min);
+ min = vpmin_u8(min, min);
+ return vget_lane_u8(min, 0);
+}
+template<> EIGEN_STRONG_INLINE uint8_t predux_min<Packet16uc>(const Packet16uc& a)
+{
+ uint8x8_t min = vmin_u8(vget_low_u8(a), vget_high_u8(a));
+ min = vpmin_u8(min, min);
+ min = vpmin_u8(min, min);
+ min = vpmin_u8(min, min);
+ return vget_lane_u8(min, 0);
+}
+template<> EIGEN_STRONG_INLINE int16_t predux_min<Packet4s>(const Packet4s& a)
+{
+ const int16x4_t min = vpmin_s16(a,a);
+ return vget_lane_s16(vpmin_s16(min, min), 0);
+}
+template<> EIGEN_STRONG_INLINE int16_t predux_min<Packet8s>(const Packet8s& a)
+{
+ int16x4_t min = vmin_s16(vget_low_s16(a), vget_high_s16(a));
+ min = vpmin_s16(min, min);
+ min = vpmin_s16(min, min);
+ return vget_lane_s16(min, 0);
+}
+template<> EIGEN_STRONG_INLINE uint16_t predux_min<Packet4us>(const Packet4us& a)
+{
+ const uint16x4_t min = vpmin_u16(a,a);
+ return vget_lane_u16(vpmin_u16(min, min), 0);
+}
+template<> EIGEN_STRONG_INLINE uint16_t predux_min<Packet8us>(const Packet8us& a)
+{
+ uint16x4_t min = vmin_u16(vget_low_u16(a), vget_high_u16(a));
+ min = vpmin_u16(min, min);
+ min = vpmin_u16(min, min);
+ return vget_lane_u16(min, 0);
+}
+template<> EIGEN_STRONG_INLINE int32_t predux_min<Packet2i>(const Packet2i& a)
+{ return vget_lane_s32(vpmin_s32(a,a), 0); }
+template<> EIGEN_STRONG_INLINE int32_t predux_min<Packet4i>(const Packet4i& a)
+{
+ const int32x2_t min = vmin_s32(vget_low_s32(a), vget_high_s32(a));
+ return vget_lane_s32(vpmin_s32(min, min), 0);
+}
+template<> EIGEN_STRONG_INLINE uint32_t predux_min<Packet2ui>(const Packet2ui& a)
+{ return vget_lane_u32(vpmin_u32(a,a), 0); }
+template<> EIGEN_STRONG_INLINE uint32_t predux_min<Packet4ui>(const Packet4ui& a)
+{
+ const uint32x2_t min = vmin_u32(vget_low_u32(a), vget_high_u32(a));
+ return vget_lane_u32(vpmin_u32(min, min), 0);
+}
+template<> EIGEN_STRONG_INLINE int64_t predux_min<Packet2l>(const Packet2l& a)
+{ return (std::min)(vgetq_lane_s64(a, 0), vgetq_lane_s64(a, 1)); }
+template<> EIGEN_STRONG_INLINE uint64_t predux_min<Packet2ul>(const Packet2ul& a)
+{ return (std::min)(vgetq_lane_u64(a, 0), vgetq_lane_u64(a, 1)); }
- a_lo = vget_low_f32(a);
- a_hi = vget_high_f32(a);
- sum = vpadd_f32(a_lo, a_hi);
- sum = vpadd_f32(sum, sum);
- return vget_lane_f32(sum, 0);
+// max
+template<> EIGEN_STRONG_INLINE float predux_max<Packet2f>(const Packet2f& a)
+{ return vget_lane_f32(vpmax_f32(a,a), 0); }
+template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
+{
+ const float32x2_t max = vmax_f32(vget_low_f32(a), vget_high_f32(a));
+ return vget_lane_f32(vpmax_f32(max, max), 0);
+}
+template<> EIGEN_STRONG_INLINE int8_t predux_max<Packet4c>(const Packet4c& a)
+{
+ const int8x8_t a_dup = vreinterpret_s8_s32(vdup_n_s32(a));
+ int8x8_t max = vpmax_s8(a_dup, a_dup);
+ max = vpmax_s8(max, max);
+ return vget_lane_s8(max, 0);
+}
+template<> EIGEN_STRONG_INLINE int8_t predux_max<Packet8c>(const Packet8c& a)
+{
+ int8x8_t max = vpmax_s8(a,a);
+ max = vpmax_s8(max, max);
+ max = vpmax_s8(max, max);
+ return vget_lane_s8(max, 0);
+}
+template<> EIGEN_STRONG_INLINE int8_t predux_max<Packet16c>(const Packet16c& a)
+{
+ int8x8_t max = vmax_s8(vget_low_s8(a), vget_high_s8(a));
+ max = vpmax_s8(max, max);
+ max = vpmax_s8(max, max);
+ max = vpmax_s8(max, max);
+ return vget_lane_s8(max, 0);
+}
+template<> EIGEN_STRONG_INLINE uint8_t predux_max<Packet4uc>(const Packet4uc& a)
+{
+ const uint8x8_t a_dup = vreinterpret_u8_u32(vdup_n_u32(a));
+ uint8x8_t max = vpmax_u8(a_dup, a_dup);
+ max = vpmax_u8(max, max);
+ return vget_lane_u8(max, 0);
+}
+template<> EIGEN_STRONG_INLINE uint8_t predux_max<Packet8uc>(const Packet8uc& a)
+{
+ uint8x8_t max = vpmax_u8(a,a);
+ max = vpmax_u8(max, max);
+ max = vpmax_u8(max, max);
+ return vget_lane_u8(max, 0);
+}
+template<> EIGEN_STRONG_INLINE uint8_t predux_max<Packet16uc>(const Packet16uc& a)
+{
+ uint8x8_t max = vmax_u8(vget_low_u8(a), vget_high_u8(a));
+ max = vpmax_u8(max, max);
+ max = vpmax_u8(max, max);
+ max = vpmax_u8(max, max);
+ return vget_lane_u8(max, 0);
+}
+template<> EIGEN_STRONG_INLINE int16_t predux_max<Packet4s>(const Packet4s& a)
+{
+ const int16x4_t max = vpmax_s16(a,a);
+ return vget_lane_s16(vpmax_s16(max, max), 0);
+}
+template<> EIGEN_STRONG_INLINE int16_t predux_max<Packet8s>(const Packet8s& a)
+{
+ int16x4_t max = vmax_s16(vget_low_s16(a), vget_high_s16(a));
+ max = vpmax_s16(max, max);
+ max = vpmax_s16(max, max);
+ return vget_lane_s16(max, 0);
+}
+template<> EIGEN_STRONG_INLINE uint16_t predux_max<Packet4us>(const Packet4us& a)
+{
+ const uint16x4_t max = vpmax_u16(a,a);
+ return vget_lane_u16(vpmax_u16(max, max), 0);
+}
+template<> EIGEN_STRONG_INLINE uint16_t predux_max<Packet8us>(const Packet8us& a)
+{
+ uint16x4_t max = vmax_u16(vget_low_u16(a), vget_high_u16(a));
+ max = vpmax_u16(max, max);
+ max = vpmax_u16(max, max);
+ return vget_lane_u16(max, 0);
+}
+template<> EIGEN_STRONG_INLINE int32_t predux_max<Packet2i>(const Packet2i& a)
+{ return vget_lane_s32(vpmax_s32(a,a), 0); }
+template<> EIGEN_STRONG_INLINE int32_t predux_max<Packet4i>(const Packet4i& a)
+{
+ const int32x2_t max = vmax_s32(vget_low_s32(a), vget_high_s32(a));
+ return vget_lane_s32(vpmax_s32(max, max), 0);
+}
+template<> EIGEN_STRONG_INLINE uint32_t predux_max<Packet2ui>(const Packet2ui& a)
+{ return vget_lane_u32(vpmax_u32(a,a), 0); }
+template<> EIGEN_STRONG_INLINE uint32_t predux_max<Packet4ui>(const Packet4ui& a)
+{
+ const uint32x2_t max = vmax_u32(vget_low_u32(a), vget_high_u32(a));
+ return vget_lane_u32(vpmax_u32(max, max), 0);
}
+template<> EIGEN_STRONG_INLINE int64_t predux_max<Packet2l>(const Packet2l& a)
+{ return (std::max)(vgetq_lane_s64(a, 0), vgetq_lane_s64(a, 1)); }
+template<> EIGEN_STRONG_INLINE uint64_t predux_max<Packet2ul>(const Packet2ul& a)
+{ return (std::max)(vgetq_lane_u64(a, 0), vgetq_lane_u64(a, 1)); }
-template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
+template<> EIGEN_STRONG_INLINE bool predux_any(const Packet4f& x)
{
- float32x4x2_t vtrn1, vtrn2, res1, res2;
- Packet4f sum1, sum2, sum;
+ uint32x2_t tmp = vorr_u32(vget_low_u32( vreinterpretq_u32_f32(x)),
+ vget_high_u32(vreinterpretq_u32_f32(x)));
+ return vget_lane_u32(vpmax_u32(tmp, tmp), 0);
+}
- // NEON zip performs interleaving of the supplied vectors.
- // We perform two interleaves in a row to acquire the transposed vector
- vtrn1 = vzipq_f32(vecs[0], vecs[2]);
- vtrn2 = vzipq_f32(vecs[1], vecs[3]);
- res1 = vzipq_f32(vtrn1.val[0], vtrn2.val[0]);
- res2 = vzipq_f32(vtrn1.val[1], vtrn2.val[1]);
+// Helpers for ptranspose.
+namespace detail {
+
+template<typename Packet>
+void zip_in_place(Packet& p1, Packet& p2);
- // Do the addition of the resulting vectors
- sum1 = vaddq_f32(res1.val[0], res1.val[1]);
- sum2 = vaddq_f32(res2.val[0], res2.val[1]);
- sum = vaddq_f32(sum1, sum2);
+template<>
+EIGEN_ALWAYS_INLINE void zip_in_place<Packet2f>(Packet2f& p1, Packet2f& p2) {
+ const float32x2x2_t tmp = vzip_f32(p1, p2);
+ p1 = tmp.val[0];
+ p2 = tmp.val[1];
+}
- return sum;
+template<>
+EIGEN_ALWAYS_INLINE void zip_in_place<Packet4f>(Packet4f& p1, Packet4f& p2) {
+ const float32x4x2_t tmp = vzipq_f32(p1, p2);
+ p1 = tmp.val[0];
+ p2 = tmp.val[1];
}
-template<> EIGEN_STRONG_INLINE int32_t predux<Packet4i>(const Packet4i& a)
+template<>
+EIGEN_ALWAYS_INLINE void zip_in_place<Packet8c>(Packet8c& p1, Packet8c& p2) {
+ const int8x8x2_t tmp = vzip_s8(p1, p2);
+ p1 = tmp.val[0];
+ p2 = tmp.val[1];
+}
+
+template<>
+EIGEN_ALWAYS_INLINE void zip_in_place<Packet16c>(Packet16c& p1, Packet16c& p2) {
+ const int8x16x2_t tmp = vzipq_s8(p1, p2);
+ p1 = tmp.val[0];
+ p2 = tmp.val[1];
+}
+
+template<>
+EIGEN_ALWAYS_INLINE void zip_in_place<Packet8uc>(Packet8uc& p1, Packet8uc& p2) {
+ const uint8x8x2_t tmp = vzip_u8(p1, p2);
+ p1 = tmp.val[0];
+ p2 = tmp.val[1];
+}
+
+template<>
+EIGEN_ALWAYS_INLINE void zip_in_place<Packet16uc>(Packet16uc& p1, Packet16uc& p2) {
+ const uint8x16x2_t tmp = vzipq_u8(p1, p2);
+ p1 = tmp.val[0];
+ p2 = tmp.val[1];
+}
+
+template<>
+EIGEN_ALWAYS_INLINE void zip_in_place<Packet2i>(Packet2i& p1, Packet2i& p2) {
+ const int32x2x2_t tmp = vzip_s32(p1, p2);
+ p1 = tmp.val[0];
+ p2 = tmp.val[1];
+}
+
+template<>
+EIGEN_ALWAYS_INLINE void zip_in_place<Packet4i>(Packet4i& p1, Packet4i& p2) {
+ const int32x4x2_t tmp = vzipq_s32(p1, p2);
+ p1 = tmp.val[0];
+ p2 = tmp.val[1];
+}
+
+template<>
+EIGEN_ALWAYS_INLINE void zip_in_place<Packet2ui>(Packet2ui& p1, Packet2ui& p2) {
+ const uint32x2x2_t tmp = vzip_u32(p1, p2);
+ p1 = tmp.val[0];
+ p2 = tmp.val[1];
+}
+
+template<>
+EIGEN_ALWAYS_INLINE void zip_in_place<Packet4ui>(Packet4ui& p1, Packet4ui& p2) {
+ const uint32x4x2_t tmp = vzipq_u32(p1, p2);
+ p1 = tmp.val[0];
+ p2 = tmp.val[1];
+}
+
+template<>
+EIGEN_ALWAYS_INLINE void zip_in_place<Packet4s>(Packet4s& p1, Packet4s& p2) {
+ const int16x4x2_t tmp = vzip_s16(p1, p2);
+ p1 = tmp.val[0];
+ p2 = tmp.val[1];
+}
+
+template<>
+EIGEN_ALWAYS_INLINE void zip_in_place<Packet8s>(Packet8s& p1, Packet8s& p2) {
+ const int16x8x2_t tmp = vzipq_s16(p1, p2);
+ p1 = tmp.val[0];
+ p2 = tmp.val[1];
+}
+
+template<>
+EIGEN_ALWAYS_INLINE void zip_in_place<Packet4us>(Packet4us& p1, Packet4us& p2) {
+ const uint16x4x2_t tmp = vzip_u16(p1, p2);
+ p1 = tmp.val[0];
+ p2 = tmp.val[1];
+}
+
+template<>
+EIGEN_ALWAYS_INLINE void zip_in_place<Packet8us>(Packet8us& p1, Packet8us& p2) {
+ const uint16x8x2_t tmp = vzipq_u16(p1, p2);
+ p1 = tmp.val[0];
+ p2 = tmp.val[1];
+}
+
+template<typename Packet>
+EIGEN_ALWAYS_INLINE void ptranspose_impl(PacketBlock<Packet, 2>& kernel) {
+ zip_in_place(kernel.packet[0], kernel.packet[1]);
+}
+
+template<typename Packet>
+EIGEN_ALWAYS_INLINE void ptranspose_impl(PacketBlock<Packet, 4>& kernel) {
+ zip_in_place(kernel.packet[0], kernel.packet[2]);
+ zip_in_place(kernel.packet[1], kernel.packet[3]);
+ zip_in_place(kernel.packet[0], kernel.packet[1]);
+ zip_in_place(kernel.packet[2], kernel.packet[3]);
+}
+
+template<typename Packet>
+EIGEN_ALWAYS_INLINE void ptranspose_impl(PacketBlock<Packet, 8>& kernel) {
+ zip_in_place(kernel.packet[0], kernel.packet[4]);
+ zip_in_place(kernel.packet[1], kernel.packet[5]);
+ zip_in_place(kernel.packet[2], kernel.packet[6]);
+ zip_in_place(kernel.packet[3], kernel.packet[7]);
+
+ zip_in_place(kernel.packet[0], kernel.packet[2]);
+ zip_in_place(kernel.packet[1], kernel.packet[3]);
+ zip_in_place(kernel.packet[4], kernel.packet[6]);
+ zip_in_place(kernel.packet[5], kernel.packet[7]);
+
+ zip_in_place(kernel.packet[0], kernel.packet[1]);
+ zip_in_place(kernel.packet[2], kernel.packet[3]);
+ zip_in_place(kernel.packet[4], kernel.packet[5]);
+ zip_in_place(kernel.packet[6], kernel.packet[7]);
+}
+
+template<typename Packet>
+EIGEN_ALWAYS_INLINE void ptranspose_impl(PacketBlock<Packet, 16>& kernel) {
+ EIGEN_UNROLL_LOOP
+ for (int i=0; i<4; ++i) {
+ const int m = (1 << i);
+ EIGEN_UNROLL_LOOP
+ for (int j=0; j<m; ++j) {
+ const int n = (1 << (3-i));
+ EIGEN_UNROLL_LOOP
+ for (int k=0; k<n; ++k) {
+ const int idx = 2*j*n+k;
+ zip_in_place(kernel.packet[idx], kernel.packet[idx + n]);
+ }
+ }
+ }
+}
+
+} // namespace detail
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet2f, 2>& kernel) {
+ detail::ptranspose_impl(kernel);
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet4f, 4>& kernel) {
+ detail::ptranspose_impl(kernel);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet4c, 4>& kernel)
{
- int32x2_t a_lo, a_hi, sum;
+ const int8x8_t a = vreinterpret_s8_s32(vset_lane_s32(kernel.packet[2], vdup_n_s32(kernel.packet[0]), 1));
+ const int8x8_t b = vreinterpret_s8_s32(vset_lane_s32(kernel.packet[3], vdup_n_s32(kernel.packet[1]), 1));
- a_lo = vget_low_s32(a);
- a_hi = vget_high_s32(a);
- sum = vpadd_s32(a_lo, a_hi);
- sum = vpadd_s32(sum, sum);
- return vget_lane_s32(sum, 0);
+ const int8x8x2_t zip8 = vzip_s8(a,b);
+ const int16x4x2_t zip16 = vzip_s16(vreinterpret_s16_s8(zip8.val[0]), vreinterpret_s16_s8(zip8.val[1]));
+
+ kernel.packet[0] = vget_lane_s32(vreinterpret_s32_s16(zip16.val[0]), 0);
+ kernel.packet[1] = vget_lane_s32(vreinterpret_s32_s16(zip16.val[0]), 1);
+ kernel.packet[2] = vget_lane_s32(vreinterpret_s32_s16(zip16.val[1]), 0);
+ kernel.packet[3] = vget_lane_s32(vreinterpret_s32_s16(zip16.val[1]), 1);
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet8c, 8>& kernel) {
+ detail::ptranspose_impl(kernel);
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet8c, 4>& kernel) {
+ detail::ptranspose_impl(kernel);
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet16c, 16>& kernel) {
+ detail::ptranspose_impl(kernel);
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet16c, 8>& kernel) {
+ detail::ptranspose_impl(kernel);
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet16c, 4>& kernel) {
+ detail::ptranspose_impl(kernel);
}
-template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet4uc, 4>& kernel)
{
- int32x4x2_t vtrn1, vtrn2, res1, res2;
- Packet4i sum1, sum2, sum;
+ const uint8x8_t a = vreinterpret_u8_u32(vset_lane_u32(kernel.packet[2], vdup_n_u32(kernel.packet[0]), 1));
+ const uint8x8_t b = vreinterpret_u8_u32(vset_lane_u32(kernel.packet[3], vdup_n_u32(kernel.packet[1]), 1));
+
+ const uint8x8x2_t zip8 = vzip_u8(a,b);
+ const uint16x4x2_t zip16 = vzip_u16(vreinterpret_u16_u8(zip8.val[0]), vreinterpret_u16_u8(zip8.val[1]));
+
+ kernel.packet[0] = vget_lane_u32(vreinterpret_u32_u16(zip16.val[0]), 0);
+ kernel.packet[1] = vget_lane_u32(vreinterpret_u32_u16(zip16.val[0]), 1);
+ kernel.packet[2] = vget_lane_u32(vreinterpret_u32_u16(zip16.val[1]), 0);
+ kernel.packet[3] = vget_lane_u32(vreinterpret_u32_u16(zip16.val[1]), 1);
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet8uc, 8>& kernel) {
+ detail::ptranspose_impl(kernel);
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet8uc, 4>& kernel) {
+ detail::ptranspose_impl(kernel);
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet16uc, 16>& kernel) {
+ detail::ptranspose_impl(kernel);
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet16uc, 8>& kernel) {
+ detail::ptranspose_impl(kernel);
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet16uc, 4>& kernel) {
+ detail::ptranspose_impl(kernel);
+}
- // NEON zip performs interleaving of the supplied vectors.
- // We perform two interleaves in a row to acquire the transposed vector
- vtrn1 = vzipq_s32(vecs[0], vecs[2]);
- vtrn2 = vzipq_s32(vecs[1], vecs[3]);
- res1 = vzipq_s32(vtrn1.val[0], vtrn2.val[0]);
- res2 = vzipq_s32(vtrn1.val[1], vtrn2.val[1]);
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet4s, 4>& kernel) {
+ detail::ptranspose_impl(kernel);
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet8s, 8>& kernel) {
+ detail::ptranspose_impl(kernel);
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet8s, 4>& kernel) {
+ detail::ptranspose_impl(kernel);
+}
- // Do the addition of the resulting vectors
- sum1 = vaddq_s32(res1.val[0], res1.val[1]);
- sum2 = vaddq_s32(res2.val[0], res2.val[1]);
- sum = vaddq_s32(sum1, sum2);
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet4us, 4>& kernel) {
+ detail::ptranspose_impl(kernel);
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet8us, 8>& kernel) {
+ detail::ptranspose_impl(kernel);
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet8us, 4>& kernel) {
+ detail::ptranspose_impl(kernel);
+}
- return sum;
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet2i, 2>& kernel) {
+ detail::ptranspose_impl(kernel);
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet4i, 4>& kernel) {
+ detail::ptranspose_impl(kernel);
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet2ui, 2>& kernel) {
+ detail::zip_in_place(kernel.packet[0], kernel.packet[1]);
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet4ui, 4>& kernel) {
+ detail::ptranspose_impl(kernel);
}
-// Other reduction functions:
-// mul
-template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void
+ptranspose(PacketBlock<Packet2l, 2>& kernel)
{
- float32x2_t a_lo, a_hi, prod;
+#if EIGEN_ARCH_ARM64
+ const int64x2_t tmp1 = vzip1q_s64(kernel.packet[0], kernel.packet[1]);
+ kernel.packet[1] = vzip2q_s64(kernel.packet[0], kernel.packet[1]);
+ kernel.packet[0] = tmp1;
+#else
+ const int64x1_t tmp[2][2] = {
+ { vget_low_s64(kernel.packet[0]), vget_high_s64(kernel.packet[0]) },
+ { vget_low_s64(kernel.packet[1]), vget_high_s64(kernel.packet[1]) }
+ };
- // Get a_lo = |a1|a2| and a_hi = |a3|a4|
- a_lo = vget_low_f32(a);
- a_hi = vget_high_f32(a);
- // Get the product of a_lo * a_hi -> |a1*a3|a2*a4|
- prod = vmul_f32(a_lo, a_hi);
- // Multiply prod with its swapped value |a2*a4|a1*a3|
- prod = vmul_f32(prod, vrev64_f32(prod));
+ kernel.packet[0] = vcombine_s64(tmp[0][0], tmp[1][0]);
+ kernel.packet[1] = vcombine_s64(tmp[0][1], tmp[1][1]);
+#endif
+}
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void
+ptranspose(PacketBlock<Packet2ul, 2>& kernel)
+{
+#if EIGEN_ARCH_ARM64
+ const uint64x2_t tmp1 = vzip1q_u64(kernel.packet[0], kernel.packet[1]);
+ kernel.packet[1] = vzip2q_u64(kernel.packet[0], kernel.packet[1]);
+ kernel.packet[0] = tmp1;
+#else
+ const uint64x1_t tmp[2][2] = {
+ { vget_low_u64(kernel.packet[0]), vget_high_u64(kernel.packet[0]) },
+ { vget_low_u64(kernel.packet[1]), vget_high_u64(kernel.packet[1]) }
+ };
- return vget_lane_f32(prod, 0);
+ kernel.packet[0] = vcombine_u64(tmp[0][0], tmp[1][0]);
+ kernel.packet[1] = vcombine_u64(tmp[0][1], tmp[1][1]);
+#endif
}
-template<> EIGEN_STRONG_INLINE int32_t predux_mul<Packet4i>(const Packet4i& a)
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet2f pselect( const Packet2f& mask, const Packet2f& a, const Packet2f& b)
+{ return vbsl_f32(vreinterpret_u32_f32(mask), a, b); }
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4f pselect(const Packet4f& mask, const Packet4f& a, const Packet4f& b)
+{ return vbslq_f32(vreinterpretq_u32_f32(mask), a, b); }
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet8c pselect(const Packet8c& mask, const Packet8c& a, const Packet8c& b)
+{ return vbsl_s8(vreinterpret_u8_s8(mask), a, b); }
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet16c pselect(const Packet16c& mask, const Packet16c& a, const Packet16c& b)
+{ return vbslq_s8(vreinterpretq_u8_s8(mask), a, b); }
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet8uc pselect(const Packet8uc& mask, const Packet8uc& a, const Packet8uc& b)
+{ return vbsl_u8(mask, a, b); }
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet16uc pselect(const Packet16uc& mask, const Packet16uc& a, const Packet16uc& b)
+{ return vbslq_u8(mask, a, b); }
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4s pselect(const Packet4s& mask, const Packet4s& a, const Packet4s& b)
+{ return vbsl_s16(vreinterpret_u16_s16(mask), a, b); }
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet8s pselect(const Packet8s& mask, const Packet8s& a, const Packet8s& b)
+{ return vbslq_s16(vreinterpretq_u16_s16(mask), a, b); }
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4us pselect(const Packet4us& mask, const Packet4us& a, const Packet4us& b)
+{ return vbsl_u16(mask, a, b); }
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet8us pselect(const Packet8us& mask, const Packet8us& a, const Packet8us& b)
+{ return vbslq_u16(mask, a, b); }
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet2i pselect(const Packet2i& mask, const Packet2i& a, const Packet2i& b)
+{ return vbsl_s32(vreinterpret_u32_s32(mask), a, b); }
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4i pselect(const Packet4i& mask, const Packet4i& a, const Packet4i& b)
+{ return vbslq_s32(vreinterpretq_u32_s32(mask), a, b); }
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet2ui pselect(const Packet2ui& mask, const Packet2ui& a, const Packet2ui& b)
+{ return vbsl_u32(mask, a, b); }
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4ui pselect(const Packet4ui& mask, const Packet4ui& a, const Packet4ui& b)
+{ return vbslq_u32(mask, a, b); }
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet2l pselect(const Packet2l& mask, const Packet2l& a, const Packet2l& b)
+{ return vbslq_s64(vreinterpretq_u64_s64(mask), a, b); }
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet2ul pselect(const Packet2ul& mask, const Packet2ul& a, const Packet2ul& b)
+{ return vbslq_u64(mask, a, b); }
+
+// Use armv8 rounding intinsics if available.
+#if EIGEN_ARCH_ARMV8
+template<> EIGEN_STRONG_INLINE Packet2f print<Packet2f>(const Packet2f& a)
+{ return vrndn_f32(a); }
+
+template<> EIGEN_STRONG_INLINE Packet4f print<Packet4f>(const Packet4f& a)
+{ return vrndnq_f32(a); }
+
+template<> EIGEN_STRONG_INLINE Packet2f pfloor<Packet2f>(const Packet2f& a)
+{ return vrndm_f32(a); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a)
+{ return vrndmq_f32(a); }
+
+template<> EIGEN_STRONG_INLINE Packet2f pceil<Packet2f>(const Packet2f& a)
+{ return vrndp_f32(a); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pceil<Packet4f>(const Packet4f& a)
+{ return vrndpq_f32(a); }
+
+#else
+
+template<> EIGEN_STRONG_INLINE Packet4f print(const Packet4f& a) {
+ // Adds and subtracts signum(a) * 2^23 to force rounding.
+ const Packet4f limit = pset1<Packet4f>(static_cast<float>(1<<23));
+ const Packet4f abs_a = pabs(a);
+ Packet4f r = padd(abs_a, limit);
+ // Don't compile-away addition and subtraction.
+ EIGEN_OPTIMIZATION_BARRIER(r);
+ r = psub(r, limit);
+ // If greater than limit, simply return a. Otherwise, account for sign.
+ r = pselect(pcmp_lt(abs_a, limit),
+ pselect(pcmp_lt(a, pzero(a)), pnegate(r), r), a);
+ return r;
+}
+
+template<> EIGEN_STRONG_INLINE Packet2f print(const Packet2f& a) {
+ // Adds and subtracts signum(a) * 2^23 to force rounding.
+ const Packet2f limit = pset1<Packet2f>(static_cast<float>(1<<23));
+ const Packet2f abs_a = pabs(a);
+ Packet2f r = padd(abs_a, limit);
+ // Don't compile-away addition and subtraction.
+ EIGEN_OPTIMIZATION_BARRIER(r);
+ r = psub(r, limit);
+ // If greater than limit, simply return a. Otherwise, account for sign.
+ r = pselect(pcmp_lt(abs_a, limit),
+ pselect(pcmp_lt(a, pzero(a)), pnegate(r), r), a);
+ return r;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a)
{
- int32x2_t a_lo, a_hi, prod;
+ const Packet4f cst_1 = pset1<Packet4f>(1.0f);
+ Packet4f tmp = print<Packet4f>(a);
+ // If greater, subtract one.
+ Packet4f mask = pcmp_lt(a, tmp);
+ mask = pand(mask, cst_1);
+ return psub(tmp, mask);
+}
- // Get a_lo = |a1|a2| and a_hi = |a3|a4|
- a_lo = vget_low_s32(a);
- a_hi = vget_high_s32(a);
- // Get the product of a_lo * a_hi -> |a1*a3|a2*a4|
- prod = vmul_s32(a_lo, a_hi);
- // Multiply prod with its swapped value |a2*a4|a1*a3|
- prod = vmul_s32(prod, vrev64_s32(prod));
+template<> EIGEN_STRONG_INLINE Packet2f pfloor<Packet2f>(const Packet2f& a)
+{
+ const Packet2f cst_1 = pset1<Packet2f>(1.0f);
+ Packet2f tmp = print<Packet2f>(a);
+ // If greater, subtract one.
+ Packet2f mask = pcmp_lt(a, tmp);
+ mask = pand(mask, cst_1);
+ return psub(tmp, mask);
+}
- return vget_lane_s32(prod, 0);
+template<> EIGEN_STRONG_INLINE Packet4f pceil<Packet4f>(const Packet4f& a)
+{
+ const Packet4f cst_1 = pset1<Packet4f>(1.0f);
+ Packet4f tmp = print<Packet4f>(a);
+ // If smaller, add one.
+ Packet4f mask = pcmp_lt(tmp, a);
+ mask = pand(mask, cst_1);
+ return padd(tmp, mask);
}
-// min
-template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
+template<> EIGEN_STRONG_INLINE Packet2f pceil<Packet2f>(const Packet2f& a)
+{
+ const Packet2f cst_1 = pset1<Packet2f>(1.0);
+ Packet2f tmp = print<Packet2f>(a);
+ // If smaller, add one.
+ Packet2f mask = pcmp_lt(tmp, a);
+ mask = pand(mask, cst_1);
+ return padd(tmp, mask);
+}
+
+#endif
+
+/**
+ * Computes the integer square root
+ * @remarks The calculation is performed using an algorithm which iterates through each binary digit of the result
+ * and tests whether setting that digit to 1 would cause the square of the value to be greater than the argument
+ * value. The algorithm is described in detail here: http://ww1.microchip.com/downloads/en/AppNotes/91040a.pdf .
+ */
+template<> EIGEN_STRONG_INLINE Packet4uc psqrt(const Packet4uc& a) {
+ uint8x8_t x = vreinterpret_u8_u32(vdup_n_u32(a));
+ uint8x8_t res = vdup_n_u8(0);
+ uint8x8_t add = vdup_n_u8(0x8);
+ for (int i = 0; i < 4; i++)
+ {
+ const uint8x8_t temp = vorr_u8(res, add);
+ res = vbsl_u8(vcge_u8(x, vmul_u8(temp, temp)), temp, res);
+ add = vshr_n_u8(add, 1);
+ }
+ return vget_lane_u32(vreinterpret_u32_u8(res), 0);
+}
+/// @copydoc Eigen::internal::psqrt(const Packet4uc& a)
+template<> EIGEN_STRONG_INLINE Packet8uc psqrt(const Packet8uc& a) {
+ uint8x8_t res = vdup_n_u8(0);
+ uint8x8_t add = vdup_n_u8(0x8);
+ for (int i = 0; i < 4; i++)
+ {
+ const uint8x8_t temp = vorr_u8(res, add);
+ res = vbsl_u8(vcge_u8(a, vmul_u8(temp, temp)), temp, res);
+ add = vshr_n_u8(add, 1);
+ }
+ return res;
+}
+/// @copydoc Eigen::internal::psqrt(const Packet4uc& a)
+template<> EIGEN_STRONG_INLINE Packet16uc psqrt(const Packet16uc& a) {
+ uint8x16_t res = vdupq_n_u8(0);
+ uint8x16_t add = vdupq_n_u8(0x8);
+ for (int i = 0; i < 4; i++)
+ {
+ const uint8x16_t temp = vorrq_u8(res, add);
+ res = vbslq_u8(vcgeq_u8(a, vmulq_u8(temp, temp)), temp, res);
+ add = vshrq_n_u8(add, 1);
+ }
+ return res;
+}
+/// @copydoc Eigen::internal::psqrt(const Packet4uc& a)
+template<> EIGEN_STRONG_INLINE Packet4us psqrt(const Packet4us& a) {
+ uint16x4_t res = vdup_n_u16(0);
+ uint16x4_t add = vdup_n_u16(0x80);
+ for (int i = 0; i < 8; i++)
+ {
+ const uint16x4_t temp = vorr_u16(res, add);
+ res = vbsl_u16(vcge_u16(a, vmul_u16(temp, temp)), temp, res);
+ add = vshr_n_u16(add, 1);
+ }
+ return res;
+}
+/// @copydoc Eigen::internal::psqrt(const Packet4uc& a)
+template<> EIGEN_STRONG_INLINE Packet8us psqrt(const Packet8us& a) {
+ uint16x8_t res = vdupq_n_u16(0);
+ uint16x8_t add = vdupq_n_u16(0x80);
+ for (int i = 0; i < 8; i++)
+ {
+ const uint16x8_t temp = vorrq_u16(res, add);
+ res = vbslq_u16(vcgeq_u16(a, vmulq_u16(temp, temp)), temp, res);
+ add = vshrq_n_u16(add, 1);
+ }
+ return res;
+}
+/// @copydoc Eigen::internal::psqrt(const Packet4uc& a)
+template<> EIGEN_STRONG_INLINE Packet2ui psqrt(const Packet2ui& a) {
+ uint32x2_t res = vdup_n_u32(0);
+ uint32x2_t add = vdup_n_u32(0x8000);
+ for (int i = 0; i < 16; i++)
+ {
+ const uint32x2_t temp = vorr_u32(res, add);
+ res = vbsl_u32(vcge_u32(a, vmul_u32(temp, temp)), temp, res);
+ add = vshr_n_u32(add, 1);
+ }
+ return res;
+}
+/// @copydoc Eigen::internal::psqrt(const Packet4uc& a)
+template<> EIGEN_STRONG_INLINE Packet4ui psqrt(const Packet4ui& a) {
+ uint32x4_t res = vdupq_n_u32(0);
+ uint32x4_t add = vdupq_n_u32(0x8000);
+ for (int i = 0; i < 16; i++)
+ {
+ const uint32x4_t temp = vorrq_u32(res, add);
+ res = vbslq_u32(vcgeq_u32(a, vmulq_u32(temp, temp)), temp, res);
+ add = vshrq_n_u32(add, 1);
+ }
+ return res;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f prsqrt(const Packet4f& a) {
+ // Compute approximate reciprocal sqrt.
+ Packet4f x = vrsqrteq_f32(a);
+ // Do Newton iterations for 1/sqrt(x).
+ x = vmulq_f32(vrsqrtsq_f32(vmulq_f32(a, x), x), x);
+ x = vmulq_f32(vrsqrtsq_f32(vmulq_f32(a, x), x), x);
+ const Packet4f infinity = pset1<Packet4f>(NumTraits<float>::infinity());
+ return pselect(pcmp_eq(a, pzero(a)), infinity, x);
+}
+
+template<> EIGEN_STRONG_INLINE Packet2f prsqrt(const Packet2f& a) {
+ // Compute approximate reciprocal sqrt.
+ Packet2f x = vrsqrte_f32(a);
+ // Do Newton iterations for 1/sqrt(x).
+ x = vmul_f32(vrsqrts_f32(vmul_f32(a, x), x), x);
+ x = vmul_f32(vrsqrts_f32(vmul_f32(a, x), x), x);
+ const Packet2f infinity = pset1<Packet2f>(NumTraits<float>::infinity());
+ return pselect(pcmp_eq(a, pzero(a)), infinity, x);
+}
+
+// Unfortunately vsqrt_f32 is only available for A64.
+#if EIGEN_ARCH_ARM64
+template<> EIGEN_STRONG_INLINE Packet4f psqrt(const Packet4f& _x){return vsqrtq_f32(_x);}
+template<> EIGEN_STRONG_INLINE Packet2f psqrt(const Packet2f& _x){return vsqrt_f32(_x); }
+#else
+template<> EIGEN_STRONG_INLINE Packet4f psqrt(const Packet4f& a) {
+ const Packet4f infinity = pset1<Packet4f>(NumTraits<float>::infinity());
+ const Packet4f is_zero_or_inf = por(pcmp_eq(a, pzero(a)), pcmp_eq(a, infinity));
+ return pselect(is_zero_or_inf, a, pmul(a, prsqrt(a)));
+}
+template<> EIGEN_STRONG_INLINE Packet2f psqrt(const Packet2f& a) {
+ const Packet2f infinity = pset1<Packet2f>(NumTraits<float>::infinity());
+ const Packet2f is_zero_or_inf = por(pcmp_eq(a, pzero(a)), pcmp_eq(a, infinity));
+ return pselect(is_zero_or_inf, a, pmul(a, prsqrt(a)));
+}
+#endif
+
+//---------- bfloat16 ----------
+// TODO: Add support for native armv8.6-a bfloat16_t
+
+// TODO: Guard if we have native bfloat16 support
+typedef eigen_packet_wrapper<uint16x4_t, 19> Packet4bf;
+
+template<> struct is_arithmetic<Packet4bf> { enum { value = true }; };
+
+template<> struct packet_traits<bfloat16> : default_packet_traits
{
- float32x2_t a_lo, a_hi, min;
+ typedef Packet4bf type;
+ typedef Packet4bf half;
+ enum
+ {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 4,
+ HasHalfPacket = 0,
- a_lo = vget_low_f32(a);
- a_hi = vget_high_f32(a);
- min = vpmin_f32(a_lo, a_hi);
- min = vpmin_f32(min, min);
+ HasCmp = 1,
+ HasAdd = 1,
+ HasSub = 1,
+ HasShift = 1,
+ HasMul = 1,
+ HasNegate = 1,
+ HasAbs = 1,
+ HasArg = 0,
+ HasAbs2 = 1,
+ HasAbsDiff = 1,
+ HasMin = 1,
+ HasMax = 1,
+ HasConj = 1,
+ HasSetLinear = 0,
+ HasBlend = 0,
+ HasDiv = 1,
+ HasFloor = 1,
+ HasCeil = 1,
+ HasRint = 1,
- return vget_lane_f32(min, 0);
+ HasSin = EIGEN_FAST_MATH,
+ HasCos = EIGEN_FAST_MATH,
+ HasLog = 1,
+ HasExp = 1,
+ HasSqrt = 0,
+ HasTanh = EIGEN_FAST_MATH,
+ HasErf = EIGEN_FAST_MATH,
+ HasBessel = 0, // Issues with accuracy.
+ HasNdtri = 0
+ };
+};
+
+template<> struct unpacket_traits<Packet4bf>
+{
+ typedef bfloat16 type;
+ typedef Packet4bf half;
+ enum
+ {
+ size = 4,
+ alignment = Aligned16,
+ vectorizable = true,
+ masked_load_available = false,
+ masked_store_available = false
+ };
+};
+
+namespace detail {
+template<>
+EIGEN_ALWAYS_INLINE void zip_in_place<Packet4bf>(Packet4bf& p1, Packet4bf& p2) {
+ const uint16x4x2_t tmp = vzip_u16(p1, p2);
+ p1 = tmp.val[0];
+ p2 = tmp.val[1];
}
+} // namespace detail
-template<> EIGEN_STRONG_INLINE int32_t predux_min<Packet4i>(const Packet4i& a)
+EIGEN_STRONG_INLINE Packet4bf F32ToBf16(const Packet4f& p)
{
- int32x2_t a_lo, a_hi, min;
+ // See the scalar implemention in BFloat16.h for a comprehensible explanation
+ // of this fast rounding algorithm
+ Packet4ui input = reinterpret_cast<Packet4ui>(p);
- a_lo = vget_low_s32(a);
- a_hi = vget_high_s32(a);
- min = vpmin_s32(a_lo, a_hi);
- min = vpmin_s32(min, min);
-
- return vget_lane_s32(min, 0);
+ // lsb = (input >> 16) & 1
+ Packet4ui lsb = vandq_u32(vshrq_n_u32(input, 16), vdupq_n_u32(1));
+
+ // rounding_bias = 0x7fff + lsb
+ Packet4ui rounding_bias = vaddq_u32(lsb, vdupq_n_u32(0x7fff));
+
+ // input += rounding_bias
+ input = vaddq_u32(input, rounding_bias);
+
+ // input = input >> 16
+ input = vshrq_n_u32(input, 16);
+
+ // Replace float-nans by bfloat16-nans, that is 0x7fc0
+ const Packet4ui bf16_nan = vdupq_n_u32(0x7fc0);
+ const Packet4ui mask = vceqq_f32(p, p);
+ input = vbslq_u32(mask, input, bf16_nan);
+
+ // output = static_cast<uint16_t>(input)
+ return vmovn_u32(input);
}
-// max
-template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
+EIGEN_STRONG_INLINE Packet4f Bf16ToF32(const Packet4bf& p)
{
- float32x2_t a_lo, a_hi, max;
+ return reinterpret_cast<Packet4f>(vshlq_n_u32(vmovl_u16(p), 16));
+}
- a_lo = vget_low_f32(a);
- a_hi = vget_high_f32(a);
- max = vpmax_f32(a_lo, a_hi);
- max = vpmax_f32(max, max);
+EIGEN_STRONG_INLINE Packet4bf F32MaskToBf16Mask(const Packet4f& p) {
+ return vmovn_u32(vreinterpretq_u32_f32(p));
+}
- return vget_lane_f32(max, 0);
+template<> EIGEN_STRONG_INLINE Packet4bf pset1<Packet4bf>(const bfloat16& from) {
+ return pset1<Packet4us>(from.value);
}
-template<> EIGEN_STRONG_INLINE int32_t predux_max<Packet4i>(const Packet4i& a)
+template<> EIGEN_STRONG_INLINE bfloat16 pfirst<Packet4bf>(const Packet4bf& from) {
+ return bfloat16_impl::raw_uint16_to_bfloat16(static_cast<uint16_t>(pfirst<Packet4us>(from)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4bf pload<Packet4bf>(const bfloat16* from)
+{
+ return pload<Packet4us>(reinterpret_cast<const uint16_t*>(from));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4bf ploadu<Packet4bf>(const bfloat16* from)
+{
+ return ploadu<Packet4us>(reinterpret_cast<const uint16_t*>(from));
+}
+
+template<> EIGEN_STRONG_INLINE void pstore<bfloat16>(bfloat16* to, const Packet4bf& from)
+{
+ EIGEN_DEBUG_ALIGNED_STORE vst1_u16(reinterpret_cast<uint16_t*>(to), from);
+}
+
+template<> EIGEN_STRONG_INLINE void pstoreu<bfloat16>(bfloat16* to, const Packet4bf& from)
+{
+ EIGEN_DEBUG_UNALIGNED_STORE vst1_u16(reinterpret_cast<uint16_t*>(to), from);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4bf ploaddup<Packet4bf>(const bfloat16* from)
+{
+ return ploaddup<Packet4us>(reinterpret_cast<const uint16_t*>(from));
+}
+
+template <> EIGEN_STRONG_INLINE Packet4bf pabs(const Packet4bf& a) {
+ return F32ToBf16(pabs<Packet4f>(Bf16ToF32(a)));
+}
+
+template <> EIGEN_STRONG_INLINE Packet4bf pmin<PropagateNumbers, Packet4bf>(const Packet4bf &a,
+ const Packet4bf &b)
+{
+ return F32ToBf16(pmin<PropagateNumbers, Packet4f>(Bf16ToF32(a), Bf16ToF32(b)));
+}
+template <> EIGEN_STRONG_INLINE Packet4bf pmin<PropagateNaN, Packet4bf>(const Packet4bf &a,
+ const Packet4bf &b)
+{
+ return F32ToBf16(pmin<PropagateNaN, Packet4f>(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template <> EIGEN_STRONG_INLINE Packet4bf pmin<Packet4bf>(const Packet4bf &a,
+ const Packet4bf &b)
+{
+ return F32ToBf16(pmin<Packet4f>(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template <> EIGEN_STRONG_INLINE Packet4bf pmax<PropagateNumbers, Packet4bf>(const Packet4bf &a,
+ const Packet4bf &b)
+{
+ return F32ToBf16(pmax<PropagateNumbers, Packet4f>(Bf16ToF32(a), Bf16ToF32(b)));
+}
+template <> EIGEN_STRONG_INLINE Packet4bf pmax<PropagateNaN, Packet4bf>(const Packet4bf &a,
+ const Packet4bf &b)
+{
+ return F32ToBf16(pmax<PropagateNaN, Packet4f>(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template <> EIGEN_STRONG_INLINE Packet4bf pmax<Packet4bf>(const Packet4bf &a,
+ const Packet4bf &b)
+{
+ return F32ToBf16(pmax<Packet4f>(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4bf plset<Packet4bf>(const bfloat16& a)
+{
+ return F32ToBf16(plset<Packet4f>(static_cast<float>(a)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4bf por(const Packet4bf& a,const Packet4bf& b) {
+ return por<Packet4us>(a, b);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4bf pxor(const Packet4bf& a,const Packet4bf& b) {
+ return pxor<Packet4us>(a, b);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4bf pand(const Packet4bf& a,const Packet4bf& b) {
+ return pand<Packet4us>(a, b);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4bf pandnot(const Packet4bf& a,const Packet4bf& b) {
+ return pandnot<Packet4us>(a, b);
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4bf pselect(const Packet4bf& mask, const Packet4bf& a,
+ const Packet4bf& b)
+{
+ return pselect<Packet4us>(mask, a, b);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4bf print<Packet4bf>(const Packet4bf& a)
+{
+ return F32ToBf16(print<Packet4f>(Bf16ToF32(a)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4bf pfloor<Packet4bf>(const Packet4bf& a)
+{
+ return F32ToBf16(pfloor<Packet4f>(Bf16ToF32(a)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4bf pceil<Packet4bf>(const Packet4bf& a)
+{
+ return F32ToBf16(pceil<Packet4f>(Bf16ToF32(a)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4bf pconj(const Packet4bf& a) { return a; }
+
+template<> EIGEN_STRONG_INLINE Packet4bf padd<Packet4bf>(const Packet4bf& a, const Packet4bf& b) {
+ return F32ToBf16(padd<Packet4f>(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4bf psub<Packet4bf>(const Packet4bf& a, const Packet4bf& b) {
+ return F32ToBf16(psub<Packet4f>(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4bf pmul<Packet4bf>(const Packet4bf& a, const Packet4bf& b) {
+ return F32ToBf16(pmul<Packet4f>(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4bf pdiv<Packet4bf>(const Packet4bf& a, const Packet4bf& b) {
+ return F32ToBf16(pdiv<Packet4f>(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template<>
+EIGEN_STRONG_INLINE Packet4bf pgather<bfloat16, Packet4bf>(const bfloat16* from, Index stride)
+{
+ return pgather<uint16_t, Packet4us>(reinterpret_cast<const uint16_t*>(from), stride);
+}
+
+template<>
+EIGEN_STRONG_INLINE void pscatter<bfloat16, Packet4bf>(bfloat16* to, const Packet4bf& from, Index stride)
+{
+ pscatter<uint16_t, Packet4us>(reinterpret_cast<uint16_t*>(to), from, stride);
+}
+
+template<> EIGEN_STRONG_INLINE bfloat16 predux<Packet4bf>(const Packet4bf& a)
+{
+ return static_cast<bfloat16>(predux<Packet4f>(Bf16ToF32(a)));
+}
+
+template<> EIGEN_STRONG_INLINE bfloat16 predux_max<Packet4bf>(const Packet4bf& a)
+{
+ return static_cast<bfloat16>(predux_max<Packet4f>(Bf16ToF32(a)));
+}
+
+template<> EIGEN_STRONG_INLINE bfloat16 predux_min<Packet4bf>(const Packet4bf& a)
+{
+ return static_cast<bfloat16>(predux_min<Packet4f>(Bf16ToF32(a)));
+}
+
+template<> EIGEN_STRONG_INLINE bfloat16 predux_mul<Packet4bf>(const Packet4bf& a)
+{
+ return static_cast<bfloat16>(predux_mul<Packet4f>(Bf16ToF32(a)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4bf preverse<Packet4bf>(const Packet4bf& a)
+{
+ return preverse<Packet4us>(a);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet4bf, 4>& kernel)
+{
+ detail::ptranspose_impl(kernel);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4bf pabsdiff<Packet4bf>(const Packet4bf& a, const Packet4bf& b)
{
- int32x2_t a_lo, a_hi, max;
-
- a_lo = vget_low_s32(a);
- a_hi = vget_high_s32(a);
- max = vpmax_s32(a_lo, a_hi);
- max = vpmax_s32(max, max);
-
- return vget_lane_s32(max, 0);
-}
-
-// this PALIGN_NEON business is to work around a bug in LLVM Clang 3.0 causing incorrect compilation errors,
-// see bug 347 and this LLVM bug: http://llvm.org/bugs/show_bug.cgi?id=11074
-#define PALIGN_NEON(Offset,Type,Command) \
-template<>\
-struct palign_impl<Offset,Type>\
-{\
- EIGEN_STRONG_INLINE static void run(Type& first, const Type& second)\
- {\
- if (Offset!=0)\
- first = Command(first, second, Offset);\
- }\
-};\
-
-PALIGN_NEON(0,Packet4f,vextq_f32)
-PALIGN_NEON(1,Packet4f,vextq_f32)
-PALIGN_NEON(2,Packet4f,vextq_f32)
-PALIGN_NEON(3,Packet4f,vextq_f32)
-PALIGN_NEON(0,Packet4i,vextq_s32)
-PALIGN_NEON(1,Packet4i,vextq_s32)
-PALIGN_NEON(2,Packet4i,vextq_s32)
-PALIGN_NEON(3,Packet4i,vextq_s32)
-
-#undef PALIGN_NEON
-
-EIGEN_DEVICE_FUNC inline void
-ptranspose(PacketBlock<Packet4f,4>& kernel) {
- float32x4x2_t tmp1 = vzipq_f32(kernel.packet[0], kernel.packet[1]);
- float32x4x2_t tmp2 = vzipq_f32(kernel.packet[2], kernel.packet[3]);
-
- kernel.packet[0] = vcombine_f32(vget_low_f32(tmp1.val[0]), vget_low_f32(tmp2.val[0]));
- kernel.packet[1] = vcombine_f32(vget_high_f32(tmp1.val[0]), vget_high_f32(tmp2.val[0]));
- kernel.packet[2] = vcombine_f32(vget_low_f32(tmp1.val[1]), vget_low_f32(tmp2.val[1]));
- kernel.packet[3] = vcombine_f32(vget_high_f32(tmp1.val[1]), vget_high_f32(tmp2.val[1]));
-}
-
-EIGEN_DEVICE_FUNC inline void
-ptranspose(PacketBlock<Packet4i,4>& kernel) {
- int32x4x2_t tmp1 = vzipq_s32(kernel.packet[0], kernel.packet[1]);
- int32x4x2_t tmp2 = vzipq_s32(kernel.packet[2], kernel.packet[3]);
- kernel.packet[0] = vcombine_s32(vget_low_s32(tmp1.val[0]), vget_low_s32(tmp2.val[0]));
- kernel.packet[1] = vcombine_s32(vget_high_s32(tmp1.val[0]), vget_high_s32(tmp2.val[0]));
- kernel.packet[2] = vcombine_s32(vget_low_s32(tmp1.val[1]), vget_low_s32(tmp2.val[1]));
- kernel.packet[3] = vcombine_s32(vget_high_s32(tmp1.val[1]), vget_high_s32(tmp2.val[1]));
+ return F32ToBf16(pabsdiff<Packet4f>(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4bf pcmp_eq<Packet4bf>(const Packet4bf& a, const Packet4bf& b)
+{
+ return F32MaskToBf16Mask(pcmp_eq<Packet4f>(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4bf pcmp_lt<Packet4bf>(const Packet4bf& a, const Packet4bf& b)
+{
+ return F32MaskToBf16Mask(pcmp_lt<Packet4f>(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4bf pcmp_lt_or_nan<Packet4bf>(const Packet4bf& a, const Packet4bf& b)
+{
+ return F32MaskToBf16Mask(pcmp_lt_or_nan<Packet4f>(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4bf pcmp_le<Packet4bf>(const Packet4bf& a, const Packet4bf& b)
+{
+ return F32MaskToBf16Mask(pcmp_le<Packet4f>(Bf16ToF32(a), Bf16ToF32(b)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4bf pnegate<Packet4bf>(const Packet4bf& a)
+{
+ return pxor<Packet4us>(a, pset1<Packet4us>(static_cast<uint16_t>(0x8000)));
}
//---------- double ----------
@@ -540,55 +3642,115 @@ ptranspose(PacketBlock<Packet4i,4>& kernel) {
// Defining these functions as templates ensures that if these intrinsics are
// already defined in arm_neon.h, then our workaround doesn't cause a conflict
// and has lower priority in overload resolution.
-template <typename T>
-uint64x2_t vreinterpretq_u64_f64(T a)
+template <typename T> uint64x2_t vreinterpretq_u64_f64(T a) { return (uint64x2_t) a; }
+
+template <typename T> float64x2_t vreinterpretq_f64_u64(T a) { return (float64x2_t) a; }
+
+typedef float64x2_t Packet2d;
+typedef float64x1_t Packet1d;
+
+// fuctionally equivalent to _mm_shuffle_pd in SSE (i.e. shuffle(m, n, mask) equals _mm_shuffle_pd(m,n,mask))
+// Currently used in LU/arch/InverseSize4.h to enable a shared implementation
+// for fast inversion of matrices of size 4.
+EIGEN_STRONG_INLINE Packet2d shuffle(const Packet2d& m, const Packet2d& n, int mask)
{
- return (uint64x2_t) a;
+ const double* a = reinterpret_cast<const double*>(&m);
+ const double* b = reinterpret_cast<const double*>(&n);
+ Packet2d res = {*(a + (mask & 1)), *(b + ((mask >> 1) & 1))};
+ return res;
}
-template <typename T>
-float64x2_t vreinterpretq_f64_u64(T a)
+EIGEN_STRONG_INLINE Packet2d vec2d_swizzle2(const Packet2d& a, const Packet2d& b, int mask)
{
- return (float64x2_t) a;
+ return shuffle(a, b, mask);
}
-
-typedef float64x2_t Packet2d;
-typedef float64x1_t Packet1d;
+EIGEN_STRONG_INLINE Packet2d vec2d_unpacklo(const Packet2d& a,const Packet2d& b)
+{
+ return shuffle(a, b, 0);
+}
+EIGEN_STRONG_INLINE Packet2d vec2d_unpackhi(const Packet2d& a,const Packet2d& b)
+{
+ return shuffle(a, b, 3);
+}
+#define vec2d_duplane(a, p) \
+ vdupq_laneq_f64(a, p)
template<> struct packet_traits<double> : default_packet_traits
{
typedef Packet2d type;
typedef Packet2d half;
- enum {
+ enum
+ {
Vectorizable = 1,
AlignedOnScalar = 1,
size = 2,
- HasHalfPacket=0,
-
- HasDiv = 1,
- // FIXME check the Has*
+ HasHalfPacket = 0,
+
+ HasCmp = 1,
+ HasAdd = 1,
+ HasSub = 1,
+ HasShift = 1,
+ HasMul = 1,
+ HasNegate = 1,
+ HasAbs = 1,
+ HasArg = 0,
+ HasAbs2 = 1,
+ HasAbsDiff = 1,
+ HasMin = 1,
+ HasMax = 1,
+ HasConj = 1,
+ HasSetLinear = 0,
+ HasBlend = 0,
+
+ HasDiv = 1,
+ HasFloor = 1,
+ HasCeil = 1,
+ HasRint = 1,
+
HasSin = 0,
HasCos = 0,
- HasLog = 0,
- HasExp = 0,
- HasSqrt = 0
+ HasLog = 1,
+ HasExp = 1,
+ HasSqrt = 1,
+ HasRsqrt = 1,
+ HasTanh = 0,
+ HasErf = 0
};
};
-template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2, alignment=Aligned16}; typedef Packet2d half; };
+template<> struct unpacket_traits<Packet2d>
+{
+ typedef double type;
+ typedef Packet2d half;
+ typedef Packet2l integer_packet;
+ enum
+ {
+ size = 2,
+ alignment = Aligned16,
+ vectorizable = true,
+ masked_load_available = false,
+ masked_store_available = false
+ };
+};
template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return vdupq_n_f64(from); }
template<> EIGEN_STRONG_INLINE Packet2d plset<Packet2d>(const double& a)
{
- const double countdown_raw[] = {0.0,1.0};
- const Packet2d countdown = vld1q_f64(countdown_raw);
- return vaddq_f64(pset1<Packet2d>(a), countdown);
+ const double c[] = {0.0,1.0};
+ return vaddq_f64(pset1<Packet2d>(a), vld1q_f64(c));
}
+
template<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return vaddq_f64(a,b); }
template<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return vsubq_f64(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& , const Packet2d& );
+template<> EIGEN_STRONG_INLINE Packet2d paddsub<Packet2d>(const Packet2d& a, const Packet2d& b){
+ const Packet2d mask = {numext::bit_cast<double>(0x8000000000000000ull),0.0};
+ return padd(a, pxor(mask, b));
+}
+
template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a) { return vnegq_f64(a); }
template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; }
@@ -599,128 +3761,824 @@ template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const
#ifdef __ARM_FEATURE_FMA
// See bug 936. See above comment about FMA for float.
-template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return vfmaq_f64(c,a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c)
+{ return vfmaq_f64(c,a,b); }
#else
-template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return vmlaq_f64(c,a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c)
+{ return vmlaq_f64(c,a,b); }
#endif
template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { return vminq_f64(a,b); }
+#ifdef __ARM_FEATURE_NUMERIC_MAXMIN
+// numeric max and min are only available if ARM_FEATURE_NUMERIC_MAXMIN is defined (which can only be the case for Armv8 systems).
+template<> EIGEN_STRONG_INLINE Packet2d pmin<PropagateNumbers, Packet2d>(const Packet2d& a, const Packet2d& b) { return vminnmq_f64(a, b); }
+template<> EIGEN_STRONG_INLINE Packet2d pmax<PropagateNumbers, Packet2d>(const Packet2d& a, const Packet2d& b) { return vmaxnmq_f64(a, b); }
+
+#endif
+
+template<> EIGEN_STRONG_INLINE Packet2d pmin<PropagateNaN, Packet2d>(const Packet2d& a, const Packet2d& b) { return pmin<Packet2d>(a, b); }
+
template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) { return vmaxq_f64(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet2d pmax<PropagateNaN, Packet2d>(const Packet2d& a, const Packet2d& b) { return pmax<Packet2d>(a, b); }
+
// Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics
template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b)
-{
- return vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b)));
-}
+{ return vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); }
template<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b)
-{
- return vreinterpretq_f64_u64(vorrq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b)));
-}
+{ return vreinterpretq_f64_u64(vorrq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); }
template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b)
-{
- return vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b)));
-}
+{ return vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); }
template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b)
-{
- return vreinterpretq_f64_u64(vbicq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b)));
-}
+{ return vreinterpretq_f64_u64(vbicq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); }
-template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f64(from); }
+template<> EIGEN_STRONG_INLINE Packet2d pcmp_le(const Packet2d& a, const Packet2d& b)
+{ return vreinterpretq_f64_u64(vcleq_f64(a,b)); }
-template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f64(from); }
+template<> EIGEN_STRONG_INLINE Packet2d pcmp_lt(const Packet2d& a, const Packet2d& b)
+{ return vreinterpretq_f64_u64(vcltq_f64(a,b)); }
-template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double* from)
-{
- return vld1q_dup_f64(from);
-}
-template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_f64(to, from); }
+template<> EIGEN_STRONG_INLINE Packet2d pcmp_lt_or_nan(const Packet2d& a, const Packet2d& b)
+{ return vreinterpretq_f64_u32(vmvnq_u32(vreinterpretq_u32_u64(vcgeq_f64(a,b)))); }
+
+template<> EIGEN_STRONG_INLINE Packet2d pcmp_eq(const Packet2d& a, const Packet2d& b)
+{ return vreinterpretq_f64_u64(vceqq_f64(a,b)); }
+
+template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from)
+{ EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f64(from); }
-template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f64(to, from); }
+template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from)
+{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f64(from); }
-template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, Index stride)
+template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double* from) { return vld1q_dup_f64(from); }
+template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from)
+{ EIGEN_DEBUG_ALIGNED_STORE vst1q_f64(to,from); }
+
+template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from)
+{ EIGEN_DEBUG_UNALIGNED_STORE vst1q_f64(to,from); }
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet2d pgather<double, Packet2d>(const double* from, Index stride)
{
Packet2d res = pset1<Packet2d>(0.0);
- res = vsetq_lane_f64(from[0*stride], res, 0);
- res = vsetq_lane_f64(from[1*stride], res, 1);
+ res = vld1q_lane_f64(from + 0*stride, res, 0);
+ res = vld1q_lane_f64(from + 1*stride, res, 1);
return res;
}
-template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, Index stride)
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter<double, Packet2d>(double* to, const Packet2d& from, Index stride)
{
- to[stride*0] = vgetq_lane_f64(from, 0);
- to[stride*1] = vgetq_lane_f64(from, 1);
+ vst1q_lane_f64(to + stride*0, from, 0);
+ vst1q_lane_f64(to + stride*1, from, 1);
}
+
template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { EIGEN_ARM_PREFETCH(addr); }
// FIXME only store the 2 first elements ?
-template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return vgetq_lane_f64(a, 0); }
+template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return vgetq_lane_f64(a,0); }
-template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a) { return vcombine_f64(vget_high_f64(a), vget_low_f64(a)); }
+template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a)
+{ return vcombine_f64(vget_high_f64(a), vget_low_f64(a)); }
template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a) { return vabsq_f64(a); }
#if EIGEN_COMP_CLANG && defined(__apple_build_version__)
// workaround ICE, see bug 907
-template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a) { return (vget_low_f64(a) + vget_high_f64(a))[0]; }
+template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a)
+{ return (vget_low_f64(a) + vget_high_f64(a))[0]; }
#else
-template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a) { return vget_lane_f64(vget_low_f64(a) + vget_high_f64(a), 0); }
+template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a)
+{ return vget_lane_f64(vget_low_f64(a) + vget_high_f64(a), 0); }
#endif
-template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
-{
- float64x2_t trn1, trn2;
-
- // NEON zip performs interleaving of the supplied vectors.
- // We perform two interleaves in a row to acquire the transposed vector
- trn1 = vzip1q_f64(vecs[0], vecs[1]);
- trn2 = vzip2q_f64(vecs[0], vecs[1]);
-
- // Do the addition of the resulting vectors
- return vaddq_f64(trn1, trn2);
-}
// Other reduction functions:
// mul
#if EIGEN_COMP_CLANG && defined(__apple_build_version__)
-template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a) { return (vget_low_f64(a) * vget_high_f64(a))[0]; }
+template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a)
+{ return (vget_low_f64(a) * vget_high_f64(a))[0]; }
#else
-template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a) { return vget_lane_f64(vget_low_f64(a) * vget_high_f64(a), 0); }
+template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a)
+{ return vget_lane_f64(vget_low_f64(a) * vget_high_f64(a), 0); }
#endif
// min
-template<> EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a) { return vgetq_lane_f64(vpminq_f64(a, a), 0); }
+template<> EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a)
+{ return vgetq_lane_f64(vpminq_f64(a,a), 0); }
// max
-template<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a) { return vgetq_lane_f64(vpmaxq_f64(a, a), 0); }
-
-// this PALIGN_NEON business is to work around a bug in LLVM Clang 3.0 causing incorrect compilation errors,
-// see bug 347 and this LLVM bug: http://llvm.org/bugs/show_bug.cgi?id=11074
-#define PALIGN_NEON(Offset,Type,Command) \
-template<>\
-struct palign_impl<Offset,Type>\
-{\
- EIGEN_STRONG_INLINE static void run(Type& first, const Type& second)\
- {\
- if (Offset!=0)\
- first = Command(first, second, Offset);\
- }\
-};\
-
-PALIGN_NEON(0,Packet2d,vextq_f64)
-PALIGN_NEON(1,Packet2d,vextq_f64)
-#undef PALIGN_NEON
-
-EIGEN_DEVICE_FUNC inline void
-ptranspose(PacketBlock<Packet2d,2>& kernel) {
- float64x2_t trn1 = vzip1q_f64(kernel.packet[0], kernel.packet[1]);
- float64x2_t trn2 = vzip2q_f64(kernel.packet[0], kernel.packet[1]);
-
- kernel.packet[0] = trn1;
- kernel.packet[1] = trn2;
-}
-#endif // EIGEN_ARCH_ARM64
+template<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a)
+{ return vgetq_lane_f64(vpmaxq_f64(a,a), 0); }
+
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void
+ptranspose(PacketBlock<Packet2d, 2>& kernel)
+{
+ const float64x2_t tmp1 = vzip1q_f64(kernel.packet[0], kernel.packet[1]);
+ const float64x2_t tmp2 = vzip2q_f64(kernel.packet[0], kernel.packet[1]);
+
+ kernel.packet[0] = tmp1;
+ kernel.packet[1] = tmp2;
+}
+
+template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet2d pselect( const Packet2d& mask, const Packet2d& a, const Packet2d& b)
+{ return vbslq_f64(vreinterpretq_u64_f64(mask), a, b); }
+
+template<> EIGEN_STRONG_INLINE Packet2d print<Packet2d>(const Packet2d& a)
+{ return vrndnq_f64(a); }
+
+template<> EIGEN_STRONG_INLINE Packet2d pfloor<Packet2d>(const Packet2d& a)
+{ return vrndmq_f64(a); }
+
+template<> EIGEN_STRONG_INLINE Packet2d pceil<Packet2d>(const Packet2d& a)
+{ return vrndpq_f64(a); }
+
+template<> EIGEN_STRONG_INLINE Packet2d pldexp<Packet2d>(const Packet2d& a, const Packet2d& exponent)
+{ return pldexp_generic(a, exponent); }
+
+template<> EIGEN_STRONG_INLINE Packet2d pfrexp<Packet2d>(const Packet2d& a, Packet2d& exponent)
+{ return pfrexp_generic(a,exponent); }
+
+template<> EIGEN_STRONG_INLINE Packet2d pset1frombits<Packet2d>(uint64_t from)
+{ return vreinterpretq_f64_u64(vdupq_n_u64(from)); }
+
+template<> EIGEN_STRONG_INLINE Packet2d prsqrt(const Packet2d& a) {
+ // Compute approximate reciprocal sqrt.
+ Packet2d x = vrsqrteq_f64(a);
+ // Do Newton iterations for 1/sqrt(x).
+ x = vmulq_f64(vrsqrtsq_f64(vmulq_f64(a, x), x), x);
+ x = vmulq_f64(vrsqrtsq_f64(vmulq_f64(a, x), x), x);
+ x = vmulq_f64(vrsqrtsq_f64(vmulq_f64(a, x), x), x);
+ const Packet2d infinity = pset1<Packet2d>(NumTraits<double>::infinity());
+ return pselect(pcmp_eq(a, pzero(a)), infinity, x);
+}
+
+template<> EIGEN_STRONG_INLINE Packet2d psqrt(const Packet2d& _x){ return vsqrtq_f64(_x); }
+
+// Do we have an fp16 types and supporting Neon intrinsics?
+#if EIGEN_HAS_ARM64_FP16_VECTOR_ARITHMETIC
+typedef float16x4_t Packet4hf;
+typedef float16x8_t Packet8hf;
+
+template <>
+struct packet_traits<Eigen::half> : default_packet_traits {
+ typedef Packet8hf type;
+ typedef Packet4hf half;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 8,
+ HasHalfPacket = 1,
+
+ HasCmp = 1,
+ HasCast = 1,
+ HasAdd = 1,
+ HasSub = 1,
+ HasShift = 1,
+ HasMul = 1,
+ HasNegate = 1,
+ HasAbs = 1,
+ HasArg = 0,
+ HasAbs2 = 1,
+ HasAbsDiff = 0,
+ HasMin = 1,
+ HasMax = 1,
+ HasConj = 1,
+ HasSetLinear = 0,
+ HasBlend = 0,
+ HasInsert = 1,
+ HasReduxp = 1,
+ HasDiv = 1,
+ HasFloor = 1,
+ HasCeil = 1,
+ HasRint = 1,
+ HasSin = 0,
+ HasCos = 0,
+ HasLog = 0,
+ HasExp = 0,
+ HasSqrt = 1,
+ HasRsqrt = 1,
+ HasErf = EIGEN_FAST_MATH,
+ HasBessel = 0, // Issues with accuracy.
+ HasNdtri = 0
+ };
+};
+
+template <>
+struct unpacket_traits<Packet4hf> {
+ typedef Eigen::half type;
+ typedef Packet4hf half;
+ enum {
+ size = 4,
+ alignment = Aligned16,
+ vectorizable = true,
+ masked_load_available = false,
+ masked_store_available = false
+ };
+};
+
+template <>
+struct unpacket_traits<Packet8hf> {
+ typedef Eigen::half type;
+ typedef Packet4hf half;
+ enum {
+ size = 8,
+ alignment = Aligned16,
+ vectorizable = true,
+ masked_load_available = false,
+ masked_store_available = false
+ };
+};
+
+template<>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4hf predux_half_dowto4<Packet8hf>(const Packet8hf& a) {
+ return vadd_f16(vget_low_f16(a), vget_high_f16(a));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8hf pset1<Packet8hf>(const Eigen::half& from) {
+ return vdupq_n_f16(from.x);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4hf pset1<Packet4hf>(const Eigen::half& from) {
+ return vdup_n_f16(from.x);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8hf plset<Packet8hf>(const Eigen::half& a) {
+ const float16_t f[] = {0, 1, 2, 3, 4, 5, 6, 7};
+ Packet8hf countdown = vld1q_f16(f);
+ return vaddq_f16(pset1<Packet8hf>(a), countdown);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4hf plset<Packet4hf>(const Eigen::half& a) {
+ const float16_t f[] = {0, 1, 2, 3};
+ Packet4hf countdown = vld1_f16(f);
+ return vadd_f16(pset1<Packet4hf>(a), countdown);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8hf padd<Packet8hf>(const Packet8hf& a, const Packet8hf& b) {
+ return vaddq_f16(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4hf padd<Packet4hf>(const Packet4hf& a, const Packet4hf& b) {
+ return vadd_f16(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8hf psub<Packet8hf>(const Packet8hf& a, const Packet8hf& b) {
+ return vsubq_f16(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4hf psub<Packet4hf>(const Packet4hf& a, const Packet4hf& b) {
+ return vsub_f16(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8hf pnegate(const Packet8hf& a) {
+ return vnegq_f16(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4hf pnegate(const Packet4hf& a) {
+ return vneg_f16(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8hf pconj(const Packet8hf& a) {
+ return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4hf pconj(const Packet4hf& a) {
+ return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8hf pmul<Packet8hf>(const Packet8hf& a, const Packet8hf& b) {
+ return vmulq_f16(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4hf pmul<Packet4hf>(const Packet4hf& a, const Packet4hf& b) {
+ return vmul_f16(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8hf pdiv<Packet8hf>(const Packet8hf& a, const Packet8hf& b) {
+ return vdivq_f16(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4hf pdiv<Packet4hf>(const Packet4hf& a, const Packet4hf& b) {
+ return vdiv_f16(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8hf pmadd(const Packet8hf& a, const Packet8hf& b, const Packet8hf& c) {
+ return vfmaq_f16(c, a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4hf pmadd(const Packet4hf& a, const Packet4hf& b, const Packet4hf& c) {
+ return vfma_f16(c, a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8hf pmin<Packet8hf>(const Packet8hf& a, const Packet8hf& b) {
+ return vminq_f16(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4hf pmin<Packet4hf>(const Packet4hf& a, const Packet4hf& b) {
+ return vmin_f16(a, b);
+}
+
+#ifdef __ARM_FEATURE_NUMERIC_MAXMIN
+// numeric max and min are only available if ARM_FEATURE_NUMERIC_MAXMIN is defined (which can only be the case for Armv8 systems).
+template<> EIGEN_STRONG_INLINE Packet4hf pmin<PropagateNumbers, Packet4hf>(const Packet4hf& a, const Packet4hf& b) { return vminnm_f16(a, b); }
+template<> EIGEN_STRONG_INLINE Packet8hf pmin<PropagateNumbers, Packet8hf>(const Packet8hf& a, const Packet8hf& b) { return vminnmq_f16(a, b); }
+#endif
+
+template<> EIGEN_STRONG_INLINE Packet4hf pmin<PropagateNaN, Packet4hf>(const Packet4hf& a, const Packet4hf& b) { return pmin<Packet4hf>(a, b); }
+
+template<> EIGEN_STRONG_INLINE Packet8hf pmin<PropagateNaN, Packet8hf>(const Packet8hf& a, const Packet8hf& b) { return pmin<Packet8hf>(a, b); }
+
+template <>
+EIGEN_STRONG_INLINE Packet8hf pmax<Packet8hf>(const Packet8hf& a, const Packet8hf& b) {
+ return vmaxq_f16(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4hf pmax<Packet4hf>(const Packet4hf& a, const Packet4hf& b) {
+ return vmax_f16(a, b);
+}
+
+#ifdef __ARM_FEATURE_NUMERIC_MAXMIN
+// numeric max and min are only available if ARM_FEATURE_NUMERIC_MAXMIN is defined (which can only be the case for Armv8 systems).
+template<> EIGEN_STRONG_INLINE Packet4hf pmax<PropagateNumbers, Packet4hf>(const Packet4hf& a, const Packet4hf& b) { return vmaxnm_f16(a, b); }
+template<> EIGEN_STRONG_INLINE Packet8hf pmax<PropagateNumbers, Packet8hf>(const Packet8hf& a, const Packet8hf& b) { return vmaxnmq_f16(a, b); }
+#endif
+
+template<> EIGEN_STRONG_INLINE Packet4hf pmax<PropagateNaN, Packet4hf>(const Packet4hf& a, const Packet4hf& b) { return pmax<Packet4hf>(a, b); }
+
+template<> EIGEN_STRONG_INLINE Packet8hf pmax<PropagateNaN, Packet8hf>(const Packet8hf& a, const Packet8hf& b) { return pmax<Packet8hf>(a, b); }
+
+#define EIGEN_MAKE_ARM_FP16_CMP_8(name) \
+ template <> \
+ EIGEN_STRONG_INLINE Packet8hf pcmp_##name(const Packet8hf& a, const Packet8hf& b) { \
+ return vreinterpretq_f16_u16(vc##name##q_f16(a, b)); \
+ }
+
+#define EIGEN_MAKE_ARM_FP16_CMP_4(name) \
+ template <> \
+ EIGEN_STRONG_INLINE Packet4hf pcmp_##name(const Packet4hf& a, const Packet4hf& b) { \
+ return vreinterpret_f16_u16(vc##name##_f16(a, b)); \
+ }
+
+EIGEN_MAKE_ARM_FP16_CMP_8(eq)
+EIGEN_MAKE_ARM_FP16_CMP_8(lt)
+EIGEN_MAKE_ARM_FP16_CMP_8(le)
+
+EIGEN_MAKE_ARM_FP16_CMP_4(eq)
+EIGEN_MAKE_ARM_FP16_CMP_4(lt)
+EIGEN_MAKE_ARM_FP16_CMP_4(le)
+
+#undef EIGEN_MAKE_ARM_FP16_CMP_8
+#undef EIGEN_MAKE_ARM_FP16_CMP_4
+
+template <>
+EIGEN_STRONG_INLINE Packet8hf pcmp_lt_or_nan<Packet8hf>(const Packet8hf& a, const Packet8hf& b) {
+ return vreinterpretq_f16_u16(vmvnq_u16(vcgeq_f16(a, b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4hf pcmp_lt_or_nan<Packet4hf>(const Packet4hf& a, const Packet4hf& b) {
+ return vreinterpret_f16_u16(vmvn_u16(vcge_f16(a, b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8hf print<Packet8hf>(const Packet8hf& a)
+{ return vrndnq_f16(a); }
+
+template <>
+EIGEN_STRONG_INLINE Packet4hf print<Packet4hf>(const Packet4hf& a)
+{ return vrndn_f16(a); }
+
+template <>
+EIGEN_STRONG_INLINE Packet8hf pfloor<Packet8hf>(const Packet8hf& a)
+{ return vrndmq_f16(a); }
+
+template <>
+EIGEN_STRONG_INLINE Packet4hf pfloor<Packet4hf>(const Packet4hf& a)
+{ return vrndm_f16(a); }
+
+template <>
+EIGEN_STRONG_INLINE Packet8hf pceil<Packet8hf>(const Packet8hf& a)
+{ return vrndpq_f16(a); }
+
+template <>
+EIGEN_STRONG_INLINE Packet4hf pceil<Packet4hf>(const Packet4hf& a)
+{ return vrndp_f16(a); }
+
+template <>
+EIGEN_STRONG_INLINE Packet8hf psqrt<Packet8hf>(const Packet8hf& a) {
+ return vsqrtq_f16(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4hf psqrt<Packet4hf>(const Packet4hf& a) {
+ return vsqrt_f16(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8hf pand<Packet8hf>(const Packet8hf& a, const Packet8hf& b) {
+ return vreinterpretq_f16_u16(vandq_u16(vreinterpretq_u16_f16(a), vreinterpretq_u16_f16(b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4hf pand<Packet4hf>(const Packet4hf& a, const Packet4hf& b) {
+ return vreinterpret_f16_u16(vand_u16(vreinterpret_u16_f16(a), vreinterpret_u16_f16(b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8hf por<Packet8hf>(const Packet8hf& a, const Packet8hf& b) {
+ return vreinterpretq_f16_u16(vorrq_u16(vreinterpretq_u16_f16(a), vreinterpretq_u16_f16(b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4hf por<Packet4hf>(const Packet4hf& a, const Packet4hf& b) {
+ return vreinterpret_f16_u16(vorr_u16(vreinterpret_u16_f16(a), vreinterpret_u16_f16(b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8hf pxor<Packet8hf>(const Packet8hf& a, const Packet8hf& b) {
+ return vreinterpretq_f16_u16(veorq_u16(vreinterpretq_u16_f16(a), vreinterpretq_u16_f16(b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4hf pxor<Packet4hf>(const Packet4hf& a, const Packet4hf& b) {
+ return vreinterpret_f16_u16(veor_u16(vreinterpret_u16_f16(a), vreinterpret_u16_f16(b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8hf pandnot<Packet8hf>(const Packet8hf& a, const Packet8hf& b) {
+ return vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(a), vreinterpretq_u16_f16(b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4hf pandnot<Packet4hf>(const Packet4hf& a, const Packet4hf& b) {
+ return vreinterpret_f16_u16(vbic_u16(vreinterpret_u16_f16(a), vreinterpret_u16_f16(b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8hf pload<Packet8hf>(const Eigen::half* from) {
+ EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f16(reinterpret_cast<const float16_t*>(from));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4hf pload<Packet4hf>(const Eigen::half* from) {
+ EIGEN_DEBUG_ALIGNED_LOAD return vld1_f16(reinterpret_cast<const float16_t*>(from));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8hf ploadu<Packet8hf>(const Eigen::half* from) {
+ EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f16(reinterpret_cast<const float16_t*>(from));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4hf ploadu<Packet4hf>(const Eigen::half* from) {
+ EIGEN_DEBUG_UNALIGNED_LOAD return vld1_f16(reinterpret_cast<const float16_t*>(from));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8hf ploaddup<Packet8hf>(const Eigen::half* from) {
+ Packet8hf packet;
+ packet[0] = from[0].x;
+ packet[1] = from[0].x;
+ packet[2] = from[1].x;
+ packet[3] = from[1].x;
+ packet[4] = from[2].x;
+ packet[5] = from[2].x;
+ packet[6] = from[3].x;
+ packet[7] = from[3].x;
+ return packet;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4hf ploaddup<Packet4hf>(const Eigen::half* from) {
+ float16x4_t packet;
+ float16_t* tmp;
+ tmp = (float16_t*)&packet;
+ tmp[0] = from[0].x;
+ tmp[1] = from[0].x;
+ tmp[2] = from[1].x;
+ tmp[3] = from[1].x;
+ return packet;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8hf ploadquad<Packet8hf>(const Eigen::half* from) {
+ Packet4hf lo, hi;
+ lo = vld1_dup_f16(reinterpret_cast<const float16_t*>(from));
+ hi = vld1_dup_f16(reinterpret_cast<const float16_t*>(from+1));
+ return vcombine_f16(lo, hi);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet8hf pinsertfirst(const Packet8hf& a, Eigen::half b) { return vsetq_lane_f16(b.x, a, 0); }
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4hf pinsertfirst(const Packet4hf& a, Eigen::half b) { return vset_lane_f16(b.x, a, 0); }
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet8hf pselect(const Packet8hf& mask, const Packet8hf& a, const Packet8hf& b) {
+ return vbslq_f16(vreinterpretq_u16_f16(mask), a, b);
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4hf pselect(const Packet4hf& mask, const Packet4hf& a, const Packet4hf& b) {
+ return vbsl_f16(vreinterpret_u16_f16(mask), a, b);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet8hf pinsertlast(const Packet8hf& a, Eigen::half b) { return vsetq_lane_f16(b.x, a, 7); }
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4hf pinsertlast(const Packet4hf& a, Eigen::half b) { return vset_lane_f16(b.x, a, 3); }
+
+template <>
+EIGEN_STRONG_INLINE void pstore<Eigen::half>(Eigen::half* to, const Packet8hf& from) {
+ EIGEN_DEBUG_ALIGNED_STORE vst1q_f16(reinterpret_cast<float16_t*>(to), from);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<Eigen::half>(Eigen::half* to, const Packet4hf& from) {
+ EIGEN_DEBUG_ALIGNED_STORE vst1_f16(reinterpret_cast<float16_t*>(to), from);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<Eigen::half>(Eigen::half* to, const Packet8hf& from) {
+ EIGEN_DEBUG_UNALIGNED_STORE vst1q_f16(reinterpret_cast<float16_t*>(to), from);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<Eigen::half>(Eigen::half* to, const Packet4hf& from) {
+ EIGEN_DEBUG_UNALIGNED_STORE vst1_f16(reinterpret_cast<float16_t*>(to), from);
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet8hf pgather<Eigen::half, Packet8hf>(const Eigen::half* from, Index stride) {
+ Packet8hf res = pset1<Packet8hf>(Eigen::half(0.f));
+ res = vsetq_lane_f16(from[0 * stride].x, res, 0);
+ res = vsetq_lane_f16(from[1 * stride].x, res, 1);
+ res = vsetq_lane_f16(from[2 * stride].x, res, 2);
+ res = vsetq_lane_f16(from[3 * stride].x, res, 3);
+ res = vsetq_lane_f16(from[4 * stride].x, res, 4);
+ res = vsetq_lane_f16(from[5 * stride].x, res, 5);
+ res = vsetq_lane_f16(from[6 * stride].x, res, 6);
+ res = vsetq_lane_f16(from[7 * stride].x, res, 7);
+ return res;
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4hf pgather<Eigen::half, Packet4hf>(const Eigen::half* from, Index stride) {
+ Packet4hf res = pset1<Packet4hf>(Eigen::half(0.f));
+ res = vset_lane_f16(from[0 * stride].x, res, 0);
+ res = vset_lane_f16(from[1 * stride].x, res, 1);
+ res = vset_lane_f16(from[2 * stride].x, res, 2);
+ res = vset_lane_f16(from[3 * stride].x, res, 3);
+ return res;
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter<Eigen::half, Packet8hf>(Eigen::half* to, const Packet8hf& from, Index stride) {
+ to[stride * 0].x = vgetq_lane_f16(from, 0);
+ to[stride * 1].x = vgetq_lane_f16(from, 1);
+ to[stride * 2].x = vgetq_lane_f16(from, 2);
+ to[stride * 3].x = vgetq_lane_f16(from, 3);
+ to[stride * 4].x = vgetq_lane_f16(from, 4);
+ to[stride * 5].x = vgetq_lane_f16(from, 5);
+ to[stride * 6].x = vgetq_lane_f16(from, 6);
+ to[stride * 7].x = vgetq_lane_f16(from, 7);
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter<Eigen::half, Packet4hf>(Eigen::half* to, const Packet4hf& from, Index stride) {
+ to[stride * 0].x = vget_lane_f16(from, 0);
+ to[stride * 1].x = vget_lane_f16(from, 1);
+ to[stride * 2].x = vget_lane_f16(from, 2);
+ to[stride * 3].x = vget_lane_f16(from, 3);
+}
+
+template <>
+EIGEN_STRONG_INLINE void prefetch<Eigen::half>(const Eigen::half* addr) {
+ EIGEN_ARM_PREFETCH(addr);
+}
+
+template <>
+EIGEN_STRONG_INLINE Eigen::half pfirst<Packet8hf>(const Packet8hf& a) {
+ float16_t x[8];
+ vst1q_f16(x, a);
+ Eigen::half h;
+ h.x = x[0];
+ return h;
+}
+
+template <>
+EIGEN_STRONG_INLINE Eigen::half pfirst<Packet4hf>(const Packet4hf& a) {
+ float16_t x[4];
+ vst1_f16(x, a);
+ Eigen::half h;
+ h.x = x[0];
+ return h;
+}
+
+template<> EIGEN_STRONG_INLINE Packet8hf preverse(const Packet8hf& a) {
+ float16x4_t a_lo, a_hi;
+ Packet8hf a_r64;
+
+ a_r64 = vrev64q_f16(a);
+ a_lo = vget_low_f16(a_r64);
+ a_hi = vget_high_f16(a_r64);
+ return vcombine_f16(a_hi, a_lo);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4hf preverse<Packet4hf>(const Packet4hf& a) {
+ return vrev64_f16(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet8hf pabs<Packet8hf>(const Packet8hf& a) {
+ return vabsq_f16(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4hf pabs<Packet4hf>(const Packet4hf& a) {
+ return vabs_f16(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Eigen::half predux<Packet8hf>(const Packet8hf& a) {
+ float16x4_t a_lo, a_hi, sum;
+
+ a_lo = vget_low_f16(a);
+ a_hi = vget_high_f16(a);
+ sum = vpadd_f16(a_lo, a_hi);
+ sum = vpadd_f16(sum, sum);
+ sum = vpadd_f16(sum, sum);
+
+ Eigen::half h;
+ h.x = vget_lane_f16(sum, 0);
+ return h;
+}
+
+template <>
+EIGEN_STRONG_INLINE Eigen::half predux<Packet4hf>(const Packet4hf& a) {
+ float16x4_t sum;
+
+ sum = vpadd_f16(a, a);
+ sum = vpadd_f16(sum, sum);
+ Eigen::half h;
+ h.x = vget_lane_f16(sum, 0);
+ return h;
+}
+
+template <>
+EIGEN_STRONG_INLINE Eigen::half predux_mul<Packet8hf>(const Packet8hf& a) {
+ float16x4_t a_lo, a_hi, prod;
+
+ a_lo = vget_low_f16(a);
+ a_hi = vget_high_f16(a);
+ prod = vmul_f16(a_lo, a_hi);
+ prod = vmul_f16(prod, vrev64_f16(prod));
+
+ Eigen::half h;
+ h.x = vmulh_f16(vget_lane_f16(prod, 0), vget_lane_f16(prod, 1));
+ return h;
+}
+
+template <>
+EIGEN_STRONG_INLINE Eigen::half predux_mul<Packet4hf>(const Packet4hf& a) {
+ float16x4_t prod;
+ prod = vmul_f16(a, vrev64_f16(a));
+ Eigen::half h;
+ h.x = vmulh_f16(vget_lane_f16(prod, 0), vget_lane_f16(prod, 1));
+ return h;
+}
+
+template <>
+EIGEN_STRONG_INLINE Eigen::half predux_min<Packet8hf>(const Packet8hf& a) {
+ float16x4_t a_lo, a_hi, min;
+
+ a_lo = vget_low_f16(a);
+ a_hi = vget_high_f16(a);
+ min = vpmin_f16(a_lo, a_hi);
+ min = vpmin_f16(min, min);
+ min = vpmin_f16(min, min);
+
+ Eigen::half h;
+ h.x = vget_lane_f16(min, 0);
+ return h;
+}
+
+template <>
+EIGEN_STRONG_INLINE Eigen::half predux_min<Packet4hf>(const Packet4hf& a) {
+ Packet4hf tmp;
+ tmp = vpmin_f16(a, a);
+ tmp = vpmin_f16(tmp, tmp);
+ Eigen::half h;
+ h.x = vget_lane_f16(tmp, 0);
+ return h;
+}
+
+template <>
+EIGEN_STRONG_INLINE Eigen::half predux_max<Packet8hf>(const Packet8hf& a) {
+ float16x4_t a_lo, a_hi, max;
+
+ a_lo = vget_low_f16(a);
+ a_hi = vget_high_f16(a);
+ max = vpmax_f16(a_lo, a_hi);
+ max = vpmax_f16(max, max);
+ max = vpmax_f16(max, max);
+
+ Eigen::half h;
+ h.x = vget_lane_f16(max, 0);
+ return h;
+}
+
+template <>
+EIGEN_STRONG_INLINE Eigen::half predux_max<Packet4hf>(const Packet4hf& a) {
+ Packet4hf tmp;
+ tmp = vpmax_f16(a, a);
+ tmp = vpmax_f16(tmp, tmp);
+ Eigen::half h;
+ h.x = vget_lane_f16(tmp, 0);
+ return h;
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet8hf, 4>& kernel)
+{
+ const float16x8x2_t zip16_1 = vzipq_f16(kernel.packet[0], kernel.packet[1]);
+ const float16x8x2_t zip16_2 = vzipq_f16(kernel.packet[2], kernel.packet[3]);
+
+ const float32x4x2_t zip32_1 = vzipq_f32(vreinterpretq_f32_f16(zip16_1.val[0]), vreinterpretq_f32_f16(zip16_2.val[0]));
+ const float32x4x2_t zip32_2 = vzipq_f32(vreinterpretq_f32_f16(zip16_1.val[1]), vreinterpretq_f32_f16(zip16_2.val[1]));
+
+ kernel.packet[0] = vreinterpretq_f16_f32(zip32_1.val[0]);
+ kernel.packet[1] = vreinterpretq_f16_f32(zip32_1.val[1]);
+ kernel.packet[2] = vreinterpretq_f16_f32(zip32_2.val[0]);
+ kernel.packet[3] = vreinterpretq_f16_f32(zip32_2.val[1]);
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet4hf, 4>& kernel) {
+ EIGEN_ALIGN16 float16x4x4_t tmp_x4;
+ float16_t* tmp = (float16_t*)&kernel;
+ tmp_x4 = vld4_f16(tmp);
+
+ kernel.packet[0] = tmp_x4.val[0];
+ kernel.packet[1] = tmp_x4.val[1];
+ kernel.packet[2] = tmp_x4.val[2];
+ kernel.packet[3] = tmp_x4.val[3];
+}
+
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet8hf, 8>& kernel) {
+ float16x8x2_t T_1[4];
+
+ T_1[0] = vuzpq_f16(kernel.packet[0], kernel.packet[1]);
+ T_1[1] = vuzpq_f16(kernel.packet[2], kernel.packet[3]);
+ T_1[2] = vuzpq_f16(kernel.packet[4], kernel.packet[5]);
+ T_1[3] = vuzpq_f16(kernel.packet[6], kernel.packet[7]);
+
+ float16x8x2_t T_2[4];
+ T_2[0] = vuzpq_f16(T_1[0].val[0], T_1[1].val[0]);
+ T_2[1] = vuzpq_f16(T_1[0].val[1], T_1[1].val[1]);
+ T_2[2] = vuzpq_f16(T_1[2].val[0], T_1[3].val[0]);
+ T_2[3] = vuzpq_f16(T_1[2].val[1], T_1[3].val[1]);
+
+ float16x8x2_t T_3[4];
+ T_3[0] = vuzpq_f16(T_2[0].val[0], T_2[2].val[0]);
+ T_3[1] = vuzpq_f16(T_2[0].val[1], T_2[2].val[1]);
+ T_3[2] = vuzpq_f16(T_2[1].val[0], T_2[3].val[0]);
+ T_3[3] = vuzpq_f16(T_2[1].val[1], T_2[3].val[1]);
+
+ kernel.packet[0] = T_3[0].val[0];
+ kernel.packet[1] = T_3[2].val[0];
+ kernel.packet[2] = T_3[1].val[0];
+ kernel.packet[3] = T_3[3].val[0];
+ kernel.packet[4] = T_3[0].val[1];
+ kernel.packet[5] = T_3[2].val[1];
+ kernel.packet[6] = T_3[1].val[1];
+ kernel.packet[7] = T_3[3].val[1];
+}
+#endif // end EIGEN_HAS_ARM64_FP16_VECTOR_ARITHMETIC
+
+#endif // EIGEN_ARCH_ARM64
} // end namespace internal
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/NEON/TypeCasting.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/NEON/TypeCasting.h
new file mode 100644
index 000000000..54f97336e
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/NEON/TypeCasting.h
@@ -0,0 +1,1419 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2018 Rasmus Munk Larsen <rmlarsen@google.com>
+// Copyright (C) 2020 Antonio Sanchez <cantonios@google.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_TYPE_CASTING_NEON_H
+#define EIGEN_TYPE_CASTING_NEON_H
+
+namespace Eigen {
+
+namespace internal {
+
+//==============================================================================
+// pcast, SrcType = float
+//==============================================================================
+template <>
+struct type_casting_traits<float, float> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4f pcast<Packet4f, Packet4f>(const Packet4f& a) {
+ return a;
+}
+template <>
+EIGEN_STRONG_INLINE Packet2f pcast<Packet2f, Packet2f>(const Packet2f& a) {
+ return a;
+}
+
+template <>
+struct type_casting_traits<float, numext::int64_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
+};
+template <>
+struct type_casting_traits<float, numext::uint64_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
+};
+// If float64 exists, first convert to that to keep as much precision as possible.
+#if EIGEN_ARCH_ARM64
+template <>
+EIGEN_STRONG_INLINE Packet2l pcast<Packet4f, Packet2l>(const Packet4f& a) {
+ // Discard second half of input.
+ return vcvtq_s64_f64(vcvt_f64_f32(vget_low_f32(a)));
+}
+template <>
+EIGEN_STRONG_INLINE Packet2ul pcast<Packet4f, Packet2ul>(const Packet4f& a) {
+ // Discard second half of input.
+ return vcvtq_u64_f64(vcvt_f64_f32(vget_low_f32(a)));
+}
+#else
+template <>
+EIGEN_STRONG_INLINE Packet2l pcast<Packet4f, Packet2l>(const Packet4f& a) {
+ // Discard second half of input.
+ return vmovl_s32(vget_low_s32(vcvtq_s32_f32(a)));
+}
+template <>
+EIGEN_STRONG_INLINE Packet2ul pcast<Packet4f, Packet2ul>(const Packet4f& a) {
+ // Discard second half of input.
+ return vmovl_u32(vget_low_u32(vcvtq_u32_f32(a)));
+}
+#endif // EIGEN_ARCH_ARM64
+
+template <>
+struct type_casting_traits<float, numext::int32_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4i pcast<Packet4f, Packet4i>(const Packet4f& a) {
+ return vcvtq_s32_f32(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet2i pcast<Packet2f, Packet2i>(const Packet2f& a) {
+ return vcvt_s32_f32(a);
+}
+
+template <>
+struct type_casting_traits<float, numext::uint32_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4ui pcast<Packet4f, Packet4ui>(const Packet4f& a) {
+ return vcvtq_u32_f32(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet2ui pcast<Packet2f, Packet2ui>(const Packet2f& a) {
+ return vcvt_u32_f32(a);
+}
+
+template <>
+struct type_casting_traits<float, numext::int16_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet8s pcast<Packet4f, Packet8s>(const Packet4f& a, const Packet4f& b) {
+ return vcombine_s16(vmovn_s32(vcvtq_s32_f32(a)), vmovn_s32(vcvtq_s32_f32(b)));
+}
+template <>
+EIGEN_STRONG_INLINE Packet4s pcast<Packet2f, Packet4s>(const Packet2f& a, const Packet2f& b) {
+ return vmovn_s32(vcombine_s32(vcvt_s32_f32(a), vcvt_s32_f32(b)));
+}
+
+template <>
+struct type_casting_traits<float, numext::uint16_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet8us pcast<Packet4f, Packet8us>(const Packet4f& a, const Packet4f& b) {
+ return vcombine_u16(vmovn_u32(vcvtq_u32_f32(a)), vmovn_u32(vcvtq_u32_f32(b)));
+}
+template <>
+EIGEN_STRONG_INLINE Packet4us pcast<Packet2f, Packet4us>(const Packet2f& a, const Packet2f& b) {
+ return vmovn_u32(vcombine_u32(vcvt_u32_f32(a), vcvt_u32_f32(b)));
+}
+
+template <>
+struct type_casting_traits<float, numext::int8_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet16c pcast<Packet4f, Packet16c>(const Packet4f& a, const Packet4f& b, const Packet4f& c,
+ const Packet4f& d) {
+ const int16x8_t ab_s16 = pcast<Packet4f, Packet8s>(a, b);
+ const int16x8_t cd_s16 = pcast<Packet4f, Packet8s>(c, d);
+ return vcombine_s8(vmovn_s16(ab_s16), vmovn_s16(cd_s16));
+}
+template <>
+EIGEN_STRONG_INLINE Packet8c pcast<Packet2f, Packet8c>(const Packet2f& a, const Packet2f& b, const Packet2f& c,
+ const Packet2f& d) {
+ const int16x4_t ab_s16 = pcast<Packet2f, Packet4s>(a, b);
+ const int16x4_t cd_s16 = pcast<Packet2f, Packet4s>(c, d);
+ return vmovn_s16(vcombine_s16(ab_s16, cd_s16));
+}
+
+template <>
+struct type_casting_traits<float, numext::uint8_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet16uc pcast<Packet4f, Packet16uc>(const Packet4f& a, const Packet4f& b, const Packet4f& c,
+ const Packet4f& d) {
+ const uint16x8_t ab_u16 = pcast<Packet4f, Packet8us>(a, b);
+ const uint16x8_t cd_u16 = pcast<Packet4f, Packet8us>(c, d);
+ return vcombine_u8(vmovn_u16(ab_u16), vmovn_u16(cd_u16));
+}
+template <>
+EIGEN_STRONG_INLINE Packet8uc pcast<Packet2f, Packet8uc>(const Packet2f& a, const Packet2f& b, const Packet2f& c,
+ const Packet2f& d) {
+ const uint16x4_t ab_u16 = pcast<Packet2f, Packet4us>(a, b);
+ const uint16x4_t cd_u16 = pcast<Packet2f, Packet4us>(c, d);
+ return vmovn_u16(vcombine_u16(ab_u16, cd_u16));
+}
+
+//==============================================================================
+// pcast, SrcType = int8_t
+//==============================================================================
+template <>
+struct type_casting_traits<numext::int8_t, float> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4f pcast<Packet16c, Packet4f>(const Packet16c& a) {
+ // Discard all but first 4 bytes.
+ return vcvtq_f32_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_low_s8(a)))));
+}
+template <>
+EIGEN_STRONG_INLINE Packet2f pcast<Packet8c, Packet2f>(const Packet8c& a) {
+ // Discard all but first 2 bytes.
+ return vcvt_f32_s32(vget_low_s32(vmovl_s16(vget_low_s16(vmovl_s8(a)))));
+}
+
+template <>
+struct type_casting_traits<numext::int8_t, numext::int64_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 8 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet2l pcast<Packet16c, Packet2l>(const Packet16c& a) {
+ // Discard all but first two bytes.
+ return vmovl_s32(vget_low_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_low_s8(a))))));
+}
+
+template <>
+struct type_casting_traits<numext::int8_t, numext::uint64_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 8 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet2ul pcast<Packet16c, Packet2ul>(const Packet16c& a) {
+ return vreinterpretq_u64_s64(pcast<Packet16c, Packet2l>(a));
+}
+
+template <>
+struct type_casting_traits<numext::int8_t, numext::int32_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4i pcast<Packet16c, Packet4i>(const Packet16c& a) {
+ // Discard all but first 4 bytes.
+ return vmovl_s16(vget_low_s16(vmovl_s8(vget_low_s8(a))));
+}
+template <>
+EIGEN_STRONG_INLINE Packet2i pcast<Packet8c, Packet2i>(const Packet8c& a) {
+ // Discard all but first 2 bytes.
+ return vget_low_s32(vmovl_s16(vget_low_s16(vmovl_s8(a))));
+}
+
+template <>
+struct type_casting_traits<numext::int8_t, numext::uint32_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4ui pcast<Packet16c, Packet4ui>(const Packet16c& a) {
+ return vreinterpretq_u32_s32(pcast<Packet16c, Packet4i>(a));
+}
+template <>
+EIGEN_STRONG_INLINE Packet2ui pcast<Packet8c, Packet2ui>(const Packet8c& a) {
+ return vreinterpret_u32_s32(pcast<Packet8c, Packet2i>(a));
+}
+
+template <>
+struct type_casting_traits<numext::int8_t, numext::int16_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet8s pcast<Packet16c, Packet8s>(const Packet16c& a) {
+ // Discard second half of input.
+ return vmovl_s8(vget_low_s8(a));
+}
+template <>
+EIGEN_STRONG_INLINE Packet4s pcast<Packet8c, Packet4s>(const Packet8c& a) {
+ // Discard second half of input.
+ return vget_low_s16(vmovl_s8(a));
+}
+
+template <>
+struct type_casting_traits<numext::int8_t, numext::uint16_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet8us pcast<Packet16c, Packet8us>(const Packet16c& a) {
+ return vreinterpretq_u16_s16(pcast<Packet16c, Packet8s>(a));
+}
+template <>
+EIGEN_STRONG_INLINE Packet4us pcast<Packet8c, Packet4us>(const Packet8c& a) {
+ return vreinterpret_u16_s16(pcast<Packet8c, Packet4s>(a));
+}
+
+template <>
+struct type_casting_traits<numext::int8_t, numext::int8_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet16c pcast<Packet16c, Packet16c>(const Packet16c& a) {
+ return a;
+}
+template <>
+EIGEN_STRONG_INLINE Packet8c pcast<Packet8c, Packet8c>(const Packet8c& a) {
+ return a;
+}
+template <>
+EIGEN_STRONG_INLINE Packet4c pcast<Packet4c, Packet4c>(const Packet4c& a) {
+ return a;
+}
+
+template <>
+struct type_casting_traits<numext::int8_t, numext::uint8_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet16uc pcast<Packet16c, Packet16uc>(const Packet16c& a) {
+ return vreinterpretq_u8_s8(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet8uc pcast<Packet8c, Packet8uc>(const Packet8c& a) {
+ return vreinterpret_u8_s8(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet4uc pcast<Packet4c, Packet4uc>(const Packet4c& a) {
+ return static_cast<Packet4uc>(a);
+}
+
+//==============================================================================
+// pcast, SrcType = uint8_t
+//==============================================================================
+template <>
+struct type_casting_traits<numext::uint8_t, float> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4f pcast<Packet16uc, Packet4f>(const Packet16uc& a) {
+ // Discard all but first 4 bytes.
+ return vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(a)))));
+}
+template <>
+EIGEN_STRONG_INLINE Packet2f pcast<Packet8uc, Packet2f>(const Packet8uc& a) {
+ // Discard all but first 2 bytes.
+ return vcvt_f32_u32(vget_low_u32(vmovl_u16(vget_low_u16(vmovl_u8(a)))));
+}
+
+template <>
+struct type_casting_traits<numext::uint8_t, numext::uint64_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 8 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet2ul pcast<Packet16uc, Packet2ul>(const Packet16uc& a) {
+ // Discard all but first two bytes.
+ return vmovl_u32(vget_low_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(a))))));
+}
+
+template <>
+struct type_casting_traits<numext::uint8_t, numext::int64_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 8 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet2l pcast<Packet16uc, Packet2l>(const Packet16uc& a) {
+ return vreinterpretq_s64_u64(pcast<Packet16uc, Packet2ul>(a));
+}
+
+template <>
+struct type_casting_traits<numext::uint8_t, numext::uint32_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4ui pcast<Packet16uc, Packet4ui>(const Packet16uc& a) {
+ // Discard all but first 4 bytes.
+ return vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(a))));
+}
+template <>
+EIGEN_STRONG_INLINE Packet2ui pcast<Packet8uc, Packet2ui>(const Packet8uc& a) {
+ // Discard all but first 2 bytes.
+ return vget_low_u32(vmovl_u16(vget_low_u16(vmovl_u8(a))));
+}
+
+template <>
+struct type_casting_traits<numext::uint8_t, numext::int32_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4i pcast<Packet16uc, Packet4i>(const Packet16uc& a) {
+ return vreinterpretq_s32_u32(pcast<Packet16uc, Packet4ui>(a));
+}
+template <>
+EIGEN_STRONG_INLINE Packet2i pcast<Packet8uc, Packet2i>(const Packet8uc& a) {
+ return vreinterpret_s32_u32(pcast<Packet8uc, Packet2ui>(a));
+}
+
+template <>
+struct type_casting_traits<numext::uint8_t, numext::uint16_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet8us pcast<Packet16uc, Packet8us>(const Packet16uc& a) {
+ // Discard second half of input.
+ return vmovl_u8(vget_low_u8(a));
+}
+template <>
+EIGEN_STRONG_INLINE Packet4us pcast<Packet8uc, Packet4us>(const Packet8uc& a) {
+ // Discard second half of input.
+ return vget_low_u16(vmovl_u8(a));
+}
+
+template <>
+struct type_casting_traits<numext::uint8_t, numext::int16_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet8s pcast<Packet16uc, Packet8s>(const Packet16uc& a) {
+ return vreinterpretq_s16_u16(pcast<Packet16uc, Packet8us>(a));
+}
+template <>
+EIGEN_STRONG_INLINE Packet4s pcast<Packet8uc, Packet4s>(const Packet8uc& a) {
+ return vreinterpret_s16_u16(pcast<Packet8uc, Packet4us>(a));
+}
+
+template <>
+struct type_casting_traits<numext::uint8_t, numext::uint8_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet16uc pcast<Packet16uc, Packet16uc>(const Packet16uc& a) {
+ return a;
+}
+template <>
+EIGEN_STRONG_INLINE Packet8uc pcast<Packet8uc, Packet8uc>(const Packet8uc& a) {
+ return a;
+}
+template <>
+EIGEN_STRONG_INLINE Packet4uc pcast<Packet4uc, Packet4uc>(const Packet4uc& a) {
+ return a;
+}
+
+template <>
+struct type_casting_traits<numext::uint8_t, numext::int8_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet16c pcast<Packet16uc, Packet16c>(const Packet16uc& a) {
+ return vreinterpretq_s8_u8(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet8c pcast<Packet8uc, Packet8c>(const Packet8uc& a) {
+ return vreinterpret_s8_u8(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet4c pcast<Packet4uc, Packet4c>(const Packet4uc& a) {
+ return static_cast<Packet4c>(a);
+}
+
+//==============================================================================
+// pcast, SrcType = int16_t
+//==============================================================================
+template <>
+struct type_casting_traits<numext::int16_t, float> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4f pcast<Packet8s, Packet4f>(const Packet8s& a) {
+ // Discard second half of input.
+ return vcvtq_f32_s32(vmovl_s16(vget_low_s16(a)));
+}
+template <>
+EIGEN_STRONG_INLINE Packet2f pcast<Packet4s, Packet2f>(const Packet4s& a) {
+ // Discard second half of input.
+ return vcvt_f32_s32(vget_low_s32(vmovl_s16(a)));
+}
+
+template <>
+struct type_casting_traits<numext::int16_t, numext::int64_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet2l pcast<Packet8s, Packet2l>(const Packet8s& a) {
+ // Discard all but first two values.
+ return vmovl_s32(vget_low_s32(vmovl_s16(vget_low_s16(a))));
+}
+
+template <>
+struct type_casting_traits<numext::int16_t, numext::uint64_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet2ul pcast<Packet8s, Packet2ul>(const Packet8s& a) {
+ return vreinterpretq_u64_s64(pcast<Packet8s, Packet2l>(a));
+}
+
+template <>
+struct type_casting_traits<numext::int16_t, numext::int32_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4i pcast<Packet8s, Packet4i>(const Packet8s& a) {
+ // Discard second half of input.
+ return vmovl_s16(vget_low_s16(a));
+}
+template <>
+EIGEN_STRONG_INLINE Packet2i pcast<Packet4s, Packet2i>(const Packet4s& a) {
+ // Discard second half of input.
+ return vget_low_s32(vmovl_s16(a));
+}
+
+template <>
+struct type_casting_traits<numext::int16_t, numext::uint32_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4ui pcast<Packet8s, Packet4ui>(const Packet8s& a) {
+ return vreinterpretq_u32_s32(pcast<Packet8s, Packet4i>(a));
+}
+template <>
+EIGEN_STRONG_INLINE Packet2ui pcast<Packet4s, Packet2ui>(const Packet4s& a) {
+ return vreinterpret_u32_s32(pcast<Packet4s, Packet2i>(a));
+}
+
+template <>
+struct type_casting_traits<numext::int16_t, numext::int16_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet8s pcast<Packet8s, Packet8s>(const Packet8s& a) {
+ return a;
+}
+template <>
+EIGEN_STRONG_INLINE Packet4s pcast<Packet4s, Packet4s>(const Packet4s& a) {
+ return a;
+}
+
+template <>
+struct type_casting_traits<numext::int16_t, numext::uint16_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet8us pcast<Packet8s, Packet8us>(const Packet8s& a) {
+ return vreinterpretq_u16_s16(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet4us pcast<Packet4s, Packet4us>(const Packet4s& a) {
+ return vreinterpret_u16_s16(a);
+}
+
+template <>
+struct type_casting_traits<numext::int16_t, numext::int8_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet16c pcast<Packet8s, Packet16c>(const Packet8s& a, const Packet8s& b) {
+ return vcombine_s8(vmovn_s16(a), vmovn_s16(b));
+}
+template <>
+EIGEN_STRONG_INLINE Packet8c pcast<Packet4s, Packet8c>(const Packet4s& a, const Packet4s& b) {
+ return vmovn_s16(vcombine_s16(a, b));
+}
+
+template <>
+struct type_casting_traits<numext::int16_t, numext::uint8_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet16uc pcast<Packet8s, Packet16uc>(const Packet8s& a, const Packet8s& b) {
+ return vcombine_u8(vmovn_u16(vreinterpretq_u16_s16(a)), vmovn_u16(vreinterpretq_u16_s16(b)));
+}
+template <>
+EIGEN_STRONG_INLINE Packet8uc pcast<Packet4s, Packet8uc>(const Packet4s& a, const Packet4s& b) {
+ return vmovn_u16(vcombine_u16(vreinterpret_u16_s16(a), vreinterpret_u16_s16(b)));
+}
+
+//==============================================================================
+// pcast, SrcType = uint16_t
+//==============================================================================
+template <>
+struct type_casting_traits<numext::uint16_t, float> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4f pcast<Packet8us, Packet4f>(const Packet8us& a) {
+ // Discard second half of input.
+ return vcvtq_f32_u32(vmovl_u16(vget_low_u16(a)));
+}
+template <>
+EIGEN_STRONG_INLINE Packet2f pcast<Packet4us, Packet2f>(const Packet4us& a) {
+ // Discard second half of input.
+ return vcvt_f32_u32(vget_low_u32(vmovl_u16(a)));
+}
+
+template <>
+struct type_casting_traits<numext::uint16_t, numext::uint64_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet2ul pcast<Packet8us, Packet2ul>(const Packet8us& a) {
+ // Discard all but first two values.
+ return vmovl_u32(vget_low_u32(vmovl_u16(vget_low_u16(a))));
+}
+
+template <>
+struct type_casting_traits<numext::uint16_t, numext::int64_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet2l pcast<Packet8us, Packet2l>(const Packet8us& a) {
+ return vreinterpretq_s64_u64(pcast<Packet8us, Packet2ul>(a));
+}
+
+template <>
+struct type_casting_traits<numext::uint16_t, numext::uint32_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4ui pcast<Packet8us, Packet4ui>(const Packet8us& a) {
+ // Discard second half of input.
+ return vmovl_u16(vget_low_u16(a));
+}
+template <>
+EIGEN_STRONG_INLINE Packet2ui pcast<Packet4us, Packet2ui>(const Packet4us& a) {
+ // Discard second half of input.
+ return vget_low_u32(vmovl_u16(a));
+}
+
+template <>
+struct type_casting_traits<numext::uint16_t, numext::int32_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4i pcast<Packet8us, Packet4i>(const Packet8us& a) {
+ return vreinterpretq_s32_u32(pcast<Packet8us, Packet4ui>(a));
+}
+template <>
+EIGEN_STRONG_INLINE Packet2i pcast<Packet4us, Packet2i>(const Packet4us& a) {
+ return vreinterpret_s32_u32(pcast<Packet4us, Packet2ui>(a));
+}
+
+template <>
+struct type_casting_traits<numext::uint16_t, numext::uint16_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet8us pcast<Packet8us, Packet8us>(const Packet8us& a) {
+ return a;
+}
+template <>
+EIGEN_STRONG_INLINE Packet4us pcast<Packet4us, Packet4us>(const Packet4us& a) {
+ return a;
+}
+
+template <>
+struct type_casting_traits<numext::uint16_t, numext::int16_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet8s pcast<Packet8us, Packet8s>(const Packet8us& a) {
+ return vreinterpretq_s16_u16(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet4s pcast<Packet4us, Packet4s>(const Packet4us& a) {
+ return vreinterpret_s16_u16(a);
+}
+
+template <>
+struct type_casting_traits<numext::uint16_t, numext::uint8_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet16uc pcast<Packet8us, Packet16uc>(const Packet8us& a, const Packet8us& b) {
+ return vcombine_u8(vmovn_u16(a), vmovn_u16(b));
+}
+template <>
+EIGEN_STRONG_INLINE Packet8uc pcast<Packet4us, Packet8uc>(const Packet4us& a, const Packet4us& b) {
+ return vmovn_u16(vcombine_u16(a, b));
+}
+
+template <>
+struct type_casting_traits<numext::uint16_t, numext::int8_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet16c pcast<Packet8us, Packet16c>(const Packet8us& a, const Packet8us& b) {
+ return vreinterpretq_s8_u8(pcast<Packet8us, Packet16uc>(a, b));
+}
+template <>
+EIGEN_STRONG_INLINE Packet8c pcast<Packet4us, Packet8c>(const Packet4us& a, const Packet4us& b) {
+ return vreinterpret_s8_u8(pcast<Packet4us, Packet8uc>(a, b));
+}
+
+//==============================================================================
+// pcast, SrcType = int32_t
+//==============================================================================
+template <>
+struct type_casting_traits<numext::int32_t, float> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4f pcast<Packet4i, Packet4f>(const Packet4i& a) {
+ return vcvtq_f32_s32(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet2f pcast<Packet2i, Packet2f>(const Packet2i& a) {
+ return vcvt_f32_s32(a);
+}
+
+template <>
+struct type_casting_traits<numext::int32_t, numext::int64_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet2l pcast<Packet4i, Packet2l>(const Packet4i& a) {
+ // Discard second half of input.
+ return vmovl_s32(vget_low_s32(a));
+}
+
+template <>
+struct type_casting_traits<numext::int32_t, numext::uint64_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet2ul pcast<Packet4i, Packet2ul>(const Packet4i& a) {
+ return vreinterpretq_u64_s64(pcast<Packet4i, Packet2l>(a));
+}
+
+template <>
+struct type_casting_traits<numext::int32_t, numext::int32_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4i pcast<Packet4i, Packet4i>(const Packet4i& a) {
+ return a;
+}
+template <>
+EIGEN_STRONG_INLINE Packet2i pcast<Packet2i, Packet2i>(const Packet2i& a) {
+ return a;
+}
+
+template <>
+struct type_casting_traits<numext::int32_t, numext::uint32_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4ui pcast<Packet4i, Packet4ui>(const Packet4i& a) {
+ return vreinterpretq_u32_s32(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet2ui pcast<Packet2i, Packet2ui>(const Packet2i& a) {
+ return vreinterpret_u32_s32(a);
+}
+
+template <>
+struct type_casting_traits<numext::int32_t, numext::int16_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet8s pcast<Packet4i, Packet8s>(const Packet4i& a, const Packet4i& b) {
+ return vcombine_s16(vmovn_s32(a), vmovn_s32(b));
+}
+template <>
+EIGEN_STRONG_INLINE Packet4s pcast<Packet2i, Packet4s>(const Packet2i& a, const Packet2i& b) {
+ return vmovn_s32(vcombine_s32(a, b));
+}
+
+template <>
+struct type_casting_traits<numext::int32_t, numext::uint16_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet8us pcast<Packet4i, Packet8us>(const Packet4i& a, const Packet4i& b) {
+ return vcombine_u16(vmovn_u32(vreinterpretq_u32_s32(a)), vmovn_u32(vreinterpretq_u32_s32(b)));
+}
+template <>
+EIGEN_STRONG_INLINE Packet4us pcast<Packet2i, Packet4us>(const Packet2i& a, const Packet2i& b) {
+ return vmovn_u32(vreinterpretq_u32_s32(vcombine_s32(a, b)));
+}
+
+template <>
+struct type_casting_traits<numext::int32_t, numext::int8_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet16c pcast<Packet4i, Packet16c>(const Packet4i& a, const Packet4i& b, const Packet4i& c,
+ const Packet4i& d) {
+ const int16x8_t ab_s16 = pcast<Packet4i, Packet8s>(a, b);
+ const int16x8_t cd_s16 = pcast<Packet4i, Packet8s>(c, d);
+ return vcombine_s8(vmovn_s16(ab_s16), vmovn_s16(cd_s16));
+}
+template <>
+EIGEN_STRONG_INLINE Packet8c pcast<Packet2i, Packet8c>(const Packet2i& a, const Packet2i& b, const Packet2i& c,
+ const Packet2i& d) {
+ const int16x4_t ab_s16 = vmovn_s32(vcombine_s32(a, b));
+ const int16x4_t cd_s16 = vmovn_s32(vcombine_s32(c, d));
+ return vmovn_s16(vcombine_s16(ab_s16, cd_s16));
+}
+
+template <>
+struct type_casting_traits<numext::int32_t, numext::uint8_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet16uc pcast<Packet4i, Packet16uc>(const Packet4i& a, const Packet4i& b, const Packet4i& c,
+ const Packet4i& d) {
+ const uint16x8_t ab_u16 = pcast<Packet4i, Packet8us>(a, b);
+ const uint16x8_t cd_u16 = pcast<Packet4i, Packet8us>(c, d);
+ return vcombine_u8(vmovn_u16(ab_u16), vmovn_u16(cd_u16));
+}
+template <>
+EIGEN_STRONG_INLINE Packet8uc pcast<Packet2i, Packet8uc>(const Packet2i& a, const Packet2i& b, const Packet2i& c,
+ const Packet2i& d) {
+ const uint16x4_t ab_u16 = pcast<Packet2i, Packet4us>(a, b);
+ const uint16x4_t cd_u16 = pcast<Packet2i, Packet4us>(c, d);
+ return vmovn_u16(vcombine_u16(ab_u16, cd_u16));
+}
+
+//==============================================================================
+// pcast, SrcType = uint32_t
+//==============================================================================
+template <>
+struct type_casting_traits<numext::uint32_t, float> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4f pcast<Packet4ui, Packet4f>(const Packet4ui& a) {
+ return vcvtq_f32_u32(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet2f pcast<Packet2ui, Packet2f>(const Packet2ui& a) {
+ return vcvt_f32_u32(a);
+}
+
+template <>
+struct type_casting_traits<numext::uint32_t, numext::uint64_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet2ul pcast<Packet4ui, Packet2ul>(const Packet4ui& a) {
+ // Discard second half of input.
+ return vmovl_u32(vget_low_u32(a));
+}
+
+template <>
+struct type_casting_traits<numext::uint32_t, numext::int64_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet2l pcast<Packet4ui, Packet2l>(const Packet4ui& a) {
+ return vreinterpretq_s64_u64(pcast<Packet4ui, Packet2ul>(a));
+}
+
+template <>
+struct type_casting_traits<numext::uint32_t, numext::uint32_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4ui pcast<Packet4ui, Packet4ui>(const Packet4ui& a) {
+ return a;
+}
+template <>
+EIGEN_STRONG_INLINE Packet2ui pcast<Packet2ui, Packet2ui>(const Packet2ui& a) {
+ return a;
+}
+
+template <>
+struct type_casting_traits<numext::uint32_t, numext::int32_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4i pcast<Packet4ui, Packet4i>(const Packet4ui& a) {
+ return vreinterpretq_s32_u32(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet2i pcast<Packet2ui, Packet2i>(const Packet2ui& a) {
+ return vreinterpret_s32_u32(a);
+}
+
+template <>
+struct type_casting_traits<numext::uint32_t, numext::uint16_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet8us pcast<Packet4ui, Packet8us>(const Packet4ui& a, const Packet4ui& b) {
+ return vcombine_u16(vmovn_u32(a), vmovn_u32(b));
+}
+template <>
+EIGEN_STRONG_INLINE Packet4us pcast<Packet2ui, Packet4us>(const Packet2ui& a, const Packet2ui& b) {
+ return vmovn_u32(vcombine_u32(a, b));
+}
+
+template <>
+struct type_casting_traits<numext::uint32_t, numext::int16_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet8s pcast<Packet4ui, Packet8s>(const Packet4ui& a, const Packet4ui& b) {
+ return vreinterpretq_s16_u16(pcast<Packet4ui, Packet8us>(a, b));
+}
+template <>
+EIGEN_STRONG_INLINE Packet4s pcast<Packet2ui, Packet4s>(const Packet2ui& a, const Packet2ui& b) {
+ return vreinterpret_s16_u16(pcast<Packet2ui, Packet4us>(a, b));
+}
+
+template <>
+struct type_casting_traits<numext::uint32_t, numext::uint8_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet16uc pcast<Packet4ui, Packet16uc>(const Packet4ui& a, const Packet4ui& b, const Packet4ui& c,
+ const Packet4ui& d) {
+ const uint16x8_t ab_u16 = vcombine_u16(vmovn_u32(a), vmovn_u32(b));
+ const uint16x8_t cd_u16 = vcombine_u16(vmovn_u32(c), vmovn_u32(d));
+ return vcombine_u8(vmovn_u16(ab_u16), vmovn_u16(cd_u16));
+}
+template <>
+EIGEN_STRONG_INLINE Packet8uc pcast<Packet2ui, Packet8uc>(const Packet2ui& a, const Packet2ui& b, const Packet2ui& c,
+ const Packet2ui& d) {
+ const uint16x4_t ab_u16 = vmovn_u32(vcombine_u32(a, b));
+ const uint16x4_t cd_u16 = vmovn_u32(vcombine_u32(c, d));
+ return vmovn_u16(vcombine_u16(ab_u16, cd_u16));
+}
+
+template <>
+struct type_casting_traits<numext::uint32_t, numext::int8_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet16c pcast<Packet4ui, Packet16c>(const Packet4ui& a, const Packet4ui& b, const Packet4ui& c,
+ const Packet4ui& d) {
+ return vreinterpretq_s8_u8(pcast<Packet4ui, Packet16uc>(a, b, c, d));
+}
+template <>
+EIGEN_STRONG_INLINE Packet8c pcast<Packet2ui, Packet8c>(const Packet2ui& a, const Packet2ui& b, const Packet2ui& c,
+ const Packet2ui& d) {
+ return vreinterpret_s8_u8(pcast<Packet2ui, Packet8uc>(a, b, c, d));
+}
+
+//==============================================================================
+// pcast, SrcType = int64_t
+//==============================================================================
+template <>
+struct type_casting_traits<numext::int64_t, float> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4f pcast<Packet2l, Packet4f>(const Packet2l& a, const Packet2l& b) {
+ return vcvtq_f32_s32(vcombine_s32(vmovn_s64(a), vmovn_s64(b)));
+}
+
+template <>
+struct type_casting_traits<numext::int64_t, numext::int64_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet2l pcast<Packet2l, Packet2l>(const Packet2l& a) {
+ return a;
+}
+
+template <>
+struct type_casting_traits<numext::int64_t, numext::uint64_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet2ul pcast<Packet2l, Packet2ul>(const Packet2l& a) {
+ return vreinterpretq_u64_s64(a);
+}
+
+template <>
+struct type_casting_traits<numext::int64_t, numext::int32_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4i pcast<Packet2l, Packet4i>(const Packet2l& a, const Packet2l& b) {
+ return vcombine_s32(vmovn_s64(a), vmovn_s64(b));
+}
+
+template <>
+struct type_casting_traits<numext::int64_t, numext::uint32_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4ui pcast<Packet2l, Packet4ui>(const Packet2l& a, const Packet2l& b) {
+ return vcombine_u32(vmovn_u64(vreinterpretq_u64_s64(a)), vmovn_u64(vreinterpretq_u64_s64(b)));
+}
+
+template <>
+struct type_casting_traits<numext::int64_t, numext::int16_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet8s pcast<Packet2l, Packet8s>(const Packet2l& a, const Packet2l& b, const Packet2l& c,
+ const Packet2l& d) {
+ const int32x4_t ab_s32 = pcast<Packet2l, Packet4i>(a, b);
+ const int32x4_t cd_s32 = pcast<Packet2l, Packet4i>(c, d);
+ return vcombine_s16(vmovn_s32(ab_s32), vmovn_s32(cd_s32));
+}
+
+template <>
+struct type_casting_traits<numext::int64_t, numext::uint16_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet8us pcast<Packet2l, Packet8us>(const Packet2l& a, const Packet2l& b, const Packet2l& c,
+ const Packet2l& d) {
+ const uint32x4_t ab_u32 = pcast<Packet2l, Packet4ui>(a, b);
+ const uint32x4_t cd_u32 = pcast<Packet2l, Packet4ui>(c, d);
+ return vcombine_u16(vmovn_u32(ab_u32), vmovn_u32(cd_u32));
+}
+
+template <>
+struct type_casting_traits<numext::int64_t, numext::int8_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 8, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet16c pcast<Packet2l, Packet16c>(const Packet2l& a, const Packet2l& b, const Packet2l& c,
+ const Packet2l& d, const Packet2l& e, const Packet2l& f,
+ const Packet2l& g, const Packet2l& h) {
+ const int16x8_t abcd_s16 = pcast<Packet2l, Packet8s>(a, b, c, d);
+ const int16x8_t efgh_s16 = pcast<Packet2l, Packet8s>(e, f, g, h);
+ return vcombine_s8(vmovn_s16(abcd_s16), vmovn_s16(efgh_s16));
+}
+
+template <>
+struct type_casting_traits<numext::int64_t, numext::uint8_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 8, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet16uc pcast<Packet2l, Packet16uc>(const Packet2l& a, const Packet2l& b, const Packet2l& c,
+ const Packet2l& d, const Packet2l& e, const Packet2l& f,
+ const Packet2l& g, const Packet2l& h) {
+ const uint16x8_t abcd_u16 = pcast<Packet2l, Packet8us>(a, b, c, d);
+ const uint16x8_t efgh_u16 = pcast<Packet2l, Packet8us>(e, f, g, h);
+ return vcombine_u8(vmovn_u16(abcd_u16), vmovn_u16(efgh_u16));
+}
+
+//==============================================================================
+// pcast, SrcType = uint64_t
+//==============================================================================
+template <>
+struct type_casting_traits<numext::uint64_t, float> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4f pcast<Packet2ul, Packet4f>(const Packet2ul& a, const Packet2ul& b) {
+ return vcvtq_f32_u32(vcombine_u32(vmovn_u64(a), vmovn_u64(b)));
+}
+
+template <>
+struct type_casting_traits<numext::uint64_t, numext::uint64_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet2ul pcast<Packet2ul, Packet2ul>(const Packet2ul& a) {
+ return a;
+}
+
+template <>
+struct type_casting_traits<numext::uint64_t, numext::int64_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet2l pcast<Packet2ul, Packet2l>(const Packet2ul& a) {
+ return vreinterpretq_s64_u64(a);
+}
+
+template <>
+struct type_casting_traits<numext::uint64_t, numext::uint32_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4ui pcast<Packet2ul, Packet4ui>(const Packet2ul& a, const Packet2ul& b) {
+ return vcombine_u32(vmovn_u64(a), vmovn_u64(b));
+}
+
+template <>
+struct type_casting_traits<numext::uint64_t, numext::int32_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4i pcast<Packet2ul, Packet4i>(const Packet2ul& a, const Packet2ul& b) {
+ return vreinterpretq_s32_u32(pcast<Packet2ul, Packet4ui>(a, b));
+}
+
+template <>
+struct type_casting_traits<numext::uint64_t, numext::uint16_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet8us pcast<Packet2ul, Packet8us>(const Packet2ul& a, const Packet2ul& b, const Packet2ul& c,
+ const Packet2ul& d) {
+ const uint16x4_t ab_u16 = vmovn_u32(vcombine_u32(vmovn_u64(a), vmovn_u64(b)));
+ const uint16x4_t cd_u16 = vmovn_u32(vcombine_u32(vmovn_u64(c), vmovn_u64(d)));
+ return vcombine_u16(ab_u16, cd_u16);
+}
+
+template <>
+struct type_casting_traits<numext::uint64_t, numext::int16_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet8s pcast<Packet2ul, Packet8s>(const Packet2ul& a, const Packet2ul& b, const Packet2ul& c,
+ const Packet2ul& d) {
+ return vreinterpretq_s16_u16(pcast<Packet2ul, Packet8us>(a, b, c, d));
+}
+
+template <>
+struct type_casting_traits<numext::uint64_t, numext::uint8_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 8, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet16uc pcast<Packet2ul, Packet16uc>(const Packet2ul& a, const Packet2ul& b, const Packet2ul& c,
+ const Packet2ul& d, const Packet2ul& e, const Packet2ul& f,
+ const Packet2ul& g, const Packet2ul& h) {
+ const uint16x8_t abcd_u16 = pcast<Packet2ul, Packet8us>(a, b, c, d);
+ const uint16x8_t efgh_u16 = pcast<Packet2ul, Packet8us>(e, f, g, h);
+ return vcombine_u8(vmovn_u16(abcd_u16), vmovn_u16(efgh_u16));
+}
+
+template <>
+struct type_casting_traits<numext::uint64_t, numext::int8_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 8, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet16c pcast<Packet2ul, Packet16c>(const Packet2ul& a, const Packet2ul& b, const Packet2ul& c,
+ const Packet2ul& d, const Packet2ul& e, const Packet2ul& f,
+ const Packet2ul& g, const Packet2ul& h) {
+ return vreinterpretq_s8_u8(pcast<Packet2ul, Packet16uc>(a, b, c, d, e, f, g, h));
+}
+
+//==============================================================================
+// preinterpret
+//==============================================================================
+template <>
+EIGEN_STRONG_INLINE Packet2f preinterpret<Packet2f, Packet2i>(const Packet2i& a) {
+ return vreinterpret_f32_s32(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet2f preinterpret<Packet2f, Packet2ui>(const Packet2ui& a) {
+ return vreinterpret_f32_u32(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet4f preinterpret<Packet4f, Packet4i>(const Packet4i& a) {
+ return vreinterpretq_f32_s32(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet4f preinterpret<Packet4f, Packet4ui>(const Packet4ui& a) {
+ return vreinterpretq_f32_u32(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4c preinterpret<Packet4c, Packet4uc>(const Packet4uc& a) {
+ return static_cast<Packet4c>(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet8c preinterpret<Packet8c, Packet8uc>(const Packet8uc& a) {
+ return vreinterpret_s8_u8(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet16c preinterpret<Packet16c, Packet16uc>(const Packet16uc& a) {
+ return vreinterpretq_s8_u8(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4uc preinterpret<Packet4uc, Packet4c>(const Packet4c& a) {
+ return static_cast<Packet4uc>(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet8uc preinterpret<Packet8uc, Packet8c>(const Packet8c& a) {
+ return vreinterpret_u8_s8(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet16uc preinterpret<Packet16uc, Packet16c>(const Packet16c& a) {
+ return vreinterpretq_u8_s8(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4s preinterpret<Packet4s, Packet4us>(const Packet4us& a) {
+ return vreinterpret_s16_u16(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet8s preinterpret<Packet8s, Packet8us>(const Packet8us& a) {
+ return vreinterpretq_s16_u16(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4us preinterpret<Packet4us, Packet4s>(const Packet4s& a) {
+ return vreinterpret_u16_s16(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet8us preinterpret<Packet8us, Packet8s>(const Packet8s& a) {
+ return vreinterpretq_u16_s16(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2i preinterpret<Packet2i, Packet2f>(const Packet2f& a) {
+ return vreinterpret_s32_f32(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet2i preinterpret<Packet2i, Packet2ui>(const Packet2ui& a) {
+ return vreinterpret_s32_u32(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet4i preinterpret<Packet4i, Packet4f>(const Packet4f& a) {
+ return vreinterpretq_s32_f32(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet4i preinterpret<Packet4i, Packet4ui>(const Packet4ui& a) {
+ return vreinterpretq_s32_u32(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2ui preinterpret<Packet2ui, Packet2f>(const Packet2f& a) {
+ return vreinterpret_u32_f32(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet2ui preinterpret<Packet2ui, Packet2i>(const Packet2i& a) {
+ return vreinterpret_u32_s32(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet4ui preinterpret<Packet4ui, Packet4f>(const Packet4f& a) {
+ return vreinterpretq_u32_f32(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet4ui preinterpret<Packet4ui, Packet4i>(const Packet4i& a) {
+ return vreinterpretq_u32_s32(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2l preinterpret<Packet2l, Packet2ul>(const Packet2ul& a) {
+ return vreinterpretq_s64_u64(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet2ul preinterpret<Packet2ul, Packet2l>(const Packet2l& a) {
+ return vreinterpretq_u64_s64(a);
+}
+
+#if EIGEN_ARCH_ARM64
+
+//==============================================================================
+// pcast/preinterpret, Double
+//==============================================================================
+
+template <>
+struct type_casting_traits<double, double> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet2d pcast<Packet2d, Packet2d>(const Packet2d& a) {
+ return a;
+}
+
+template <>
+struct type_casting_traits<double, float> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4f pcast<Packet2d, Packet4f>(const Packet2d& a, const Packet2d& b) {
+ return vcombine_f32(vcvt_f32_f64(a), vcvt_f32_f64(b));
+}
+
+template <>
+struct type_casting_traits<double, numext::int64_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet2l pcast<Packet2d, Packet2l>(const Packet2d& a) {
+ return vcvtq_s64_f64(a);
+}
+
+template <>
+struct type_casting_traits<double, numext::uint64_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet2ul pcast<Packet2d, Packet2ul>(const Packet2d& a) {
+ return vcvtq_u64_f64(a);
+}
+
+template <>
+struct type_casting_traits<double, numext::int32_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4i pcast<Packet2d, Packet4i>(const Packet2d& a, const Packet2d& b) {
+ return vcombine_s32(vmovn_s64(vcvtq_s64_f64(a)), vmovn_s64(vcvtq_s64_f64(b)));
+}
+
+template <>
+struct type_casting_traits<double, numext::uint32_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet4ui pcast<Packet2d, Packet4ui>(const Packet2d& a, const Packet2d& b) {
+ return vcombine_u32(vmovn_u64(vcvtq_u64_f64(a)), vmovn_u64(vcvtq_u64_f64(b)));
+}
+
+template <>
+struct type_casting_traits<double, numext::int16_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet8s pcast<Packet2d, Packet8s>(const Packet2d& a, const Packet2d& b, const Packet2d& c,
+ const Packet2d& d) {
+ const int32x4_t ab_s32 = pcast<Packet2d, Packet4i>(a, b);
+ const int32x4_t cd_s32 = pcast<Packet2d, Packet4i>(c, d);
+ return vcombine_s16(vmovn_s32(ab_s32), vmovn_s32(cd_s32));
+}
+
+template <>
+struct type_casting_traits<double, numext::uint16_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet8us pcast<Packet2d, Packet8us>(const Packet2d& a, const Packet2d& b, const Packet2d& c,
+ const Packet2d& d) {
+ const uint32x4_t ab_u32 = pcast<Packet2d, Packet4ui>(a, b);
+ const uint32x4_t cd_u32 = pcast<Packet2d, Packet4ui>(c, d);
+ return vcombine_u16(vmovn_u32(ab_u32), vmovn_u32(cd_u32));
+}
+
+template <>
+struct type_casting_traits<double, numext::int8_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 8, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet16c pcast<Packet2d, Packet16c>(const Packet2d& a, const Packet2d& b, const Packet2d& c,
+ const Packet2d& d, const Packet2d& e, const Packet2d& f,
+ const Packet2d& g, const Packet2d& h) {
+ const int16x8_t abcd_s16 = pcast<Packet2d, Packet8s>(a, b, c, d);
+ const int16x8_t efgh_s16 = pcast<Packet2d, Packet8s>(e, f, g, h);
+ return vcombine_s8(vmovn_s16(abcd_s16), vmovn_s16(efgh_s16));
+}
+
+template <>
+struct type_casting_traits<double, numext::uint8_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 8, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet16uc pcast<Packet2d, Packet16uc>(const Packet2d& a, const Packet2d& b, const Packet2d& c,
+ const Packet2d& d, const Packet2d& e, const Packet2d& f,
+ const Packet2d& g, const Packet2d& h) {
+ const uint16x8_t abcd_u16 = pcast<Packet2d, Packet8us>(a, b, c, d);
+ const uint16x8_t efgh_u16 = pcast<Packet2d, Packet8us>(e, f, g, h);
+ return vcombine_u8(vmovn_u16(abcd_u16), vmovn_u16(efgh_u16));
+}
+
+template <>
+struct type_casting_traits<float, double> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet2d pcast<Packet4f, Packet2d>(const Packet4f& a) {
+ // Discard second-half of input.
+ return vcvt_f64_f32(vget_low_f32(a));
+}
+
+template <>
+struct type_casting_traits<numext::int8_t, double> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 8 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet2d pcast<Packet16c, Packet2d>(const Packet16c& a) {
+ // Discard all but first two values.
+ return vcvt_f64_f32(pcast<Packet8c, Packet2f>(vget_low_s8(a)));
+}
+
+template <>
+struct type_casting_traits<numext::uint8_t, double> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 8 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet2d pcast<Packet16uc, Packet2d>(const Packet16uc& a) {
+ // Discard all but first two values.
+ return vcvt_f64_f32(pcast<Packet8uc, Packet2f>(vget_low_u8(a)));
+}
+
+template <>
+struct type_casting_traits<numext::int16_t, double> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet2d pcast<Packet8s, Packet2d>(const Packet8s& a) {
+ // Discard all but first two values.
+ return vcvt_f64_f32(pcast<Packet4s, Packet2f>(vget_low_s16(a)));
+}
+
+template <>
+struct type_casting_traits<numext::uint16_t, double> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet2d pcast<Packet8us, Packet2d>(const Packet8us& a) {
+ // Discard all but first two values.
+ return vcvt_f64_f32(pcast<Packet4us, Packet2f>(vget_low_u16(a)));
+}
+
+template <>
+struct type_casting_traits<numext::int32_t, double> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet2d pcast<Packet4i, Packet2d>(const Packet4i& a) {
+ // Discard second half of input.
+ return vcvtq_f64_s64(vmovl_s32(vget_low_s32(a)));
+}
+
+template <>
+struct type_casting_traits<numext::uint32_t, double> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet2d pcast<Packet4ui, Packet2d>(const Packet4ui& a) {
+ // Discard second half of input.
+ return vcvtq_f64_u64(vmovl_u32(vget_low_u32(a)));
+}
+
+template <>
+struct type_casting_traits<numext::int64_t, double> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet2d pcast<Packet2l, Packet2d>(const Packet2l& a) {
+ return vcvtq_f64_s64(a);
+}
+
+template <>
+struct type_casting_traits<numext::uint64_t, double> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+template <>
+EIGEN_STRONG_INLINE Packet2d pcast<Packet2ul, Packet2d>(const Packet2ul& a) {
+ return vcvtq_f64_u64(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2d preinterpret<Packet2d, Packet2l>(const Packet2l& a) {
+ return vreinterpretq_f64_s64(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet2d preinterpret<Packet2d, Packet2ul>(const Packet2ul& a) {
+ return vreinterpretq_f64_u64(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet2l preinterpret<Packet2l, Packet2d>(const Packet2d& a) {
+ return vreinterpretq_s64_f64(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet2ul preinterpret<Packet2ul, Packet2d>(const Packet2d& a) {
+ return vreinterpretq_u64_f64(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet2d preinterpret<Packet2d, Packet4i>(const Packet4i& a) {
+ return vreinterpretq_f64_s32(a);
+}
+template <>
+EIGEN_STRONG_INLINE Packet4i preinterpret<Packet4i, Packet2d>(const Packet2d& a) {
+ return vreinterpretq_s32_f64(a);
+}
+
+#endif // EIGEN_ARCH_ARM64
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_TYPE_CASTING_NEON_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/SSE/Complex.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/SSE/Complex.h
index 23e717f28..215bfd7bb 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/arch/SSE/Complex.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/SSE/Complex.h
@@ -19,7 +19,7 @@ struct Packet2cf
{
EIGEN_STRONG_INLINE Packet2cf() {}
EIGEN_STRONG_INLINE explicit Packet2cf(const __m128& a) : v(a) {}
- __m128 v;
+ Packet4f v;
};
// Use the packet_traits defined in AVX/PacketMath.h instead if we're going
@@ -40,20 +40,33 @@ template<> struct packet_traits<std::complex<float> > : default_packet_traits
HasMul = 1,
HasDiv = 1,
HasNegate = 1,
+ HasSqrt = 1,
HasAbs = 0,
HasAbs2 = 0,
HasMin = 0,
HasMax = 0,
HasSetLinear = 0,
- HasBlend = 1
+ HasBlend = 1
};
};
#endif
-template<> struct unpacket_traits<Packet2cf> { typedef std::complex<float> type; enum {size=2, alignment=Aligned16}; typedef Packet2cf half; };
+template<> struct unpacket_traits<Packet2cf> {
+ typedef std::complex<float> type;
+ typedef Packet2cf half;
+ typedef Packet4f as_real;
+ enum {
+ size=2,
+ alignment=Aligned16,
+ vectorizable=true,
+ masked_load_available=false,
+ masked_store_available=false
+ };
+};
template<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_add_ps(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet2cf psub<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_sub_ps(a.v,b.v)); }
+
template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a)
{
const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x80000000,0x80000000,0x80000000));
@@ -82,30 +95,20 @@ template<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, con
#endif
}
+template<> EIGEN_STRONG_INLINE Packet2cf ptrue <Packet2cf>(const Packet2cf& a) { return Packet2cf(ptrue(Packet4f(a.v))); }
template<> EIGEN_STRONG_INLINE Packet2cf pand <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_and_ps(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet2cf por <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_or_ps(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet2cf pxor <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_xor_ps(a.v,b.v)); }
-template<> EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_andnot_ps(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_andnot_ps(b.v,a.v)); }
template<> EIGEN_STRONG_INLINE Packet2cf pload <Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>(&numext::real_ref(*from))); }
template<> EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>(&numext::real_ref(*from))); }
template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>& from)
{
- Packet2cf res;
-#if EIGEN_GNUC_AT_MOST(4,2)
- // Workaround annoying "may be used uninitialized in this function" warning with gcc 4.2
- res.v = _mm_loadl_pi(_mm_set1_ps(0.0f), reinterpret_cast<const __m64*>(&from));
-#elif EIGEN_GNUC_AT_LEAST(4,6)
- // Suppress annoying "may be used uninitialized in this function" warning with gcc >= 4.6
- #pragma GCC diagnostic push
- #pragma GCC diagnostic ignored "-Wuninitialized"
- res.v = _mm_loadl_pi(res.v, (const __m64*)&from);
- #pragma GCC diagnostic pop
-#else
- res.v = _mm_loadl_pi(res.v, (const __m64*)&from);
-#endif
- return Packet2cf(_mm_movelh_ps(res.v,res.v));
+ const float re = std::real(from);
+ const float im = std::imag(from);
+ return Packet2cf(_mm_set_ps(im, re, im, re));
}
template<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<float>* from) { return pset1<Packet2cf>(*from); }
@@ -128,7 +131,7 @@ template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf
_mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 3)));
}
-template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> * addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
+template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> * addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet2cf>(const Packet2cf& a)
{
@@ -152,97 +155,26 @@ template<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet2cf>(const Packe
return pfirst(Packet2cf(_mm_add_ps(a.v, _mm_movehl_ps(a.v,a.v))));
}
-template<> EIGEN_STRONG_INLINE Packet2cf preduxp<Packet2cf>(const Packet2cf* vecs)
-{
- return Packet2cf(_mm_add_ps(_mm_movelh_ps(vecs[0].v,vecs[1].v), _mm_movehl_ps(vecs[1].v,vecs[0].v)));
-}
-
template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const Packet2cf& a)
{
return pfirst(pmul(a, Packet2cf(_mm_movehl_ps(a.v,a.v))));
}
-template<int Offset>
-struct palign_impl<Offset,Packet2cf>
-{
- static EIGEN_STRONG_INLINE void run(Packet2cf& first, const Packet2cf& second)
- {
- if (Offset==1)
- {
- first.v = _mm_movehl_ps(first.v, first.v);
- first.v = _mm_movelh_ps(first.v, second.v);
- }
- }
-};
-
-template<> struct conj_helper<Packet2cf, Packet2cf, false,true>
-{
- EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
- { return padd(pmul(x,y),c); }
-
- EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
- {
- #ifdef EIGEN_VECTORIZE_SSE3
- return internal::pmul(a, pconj(b));
- #else
- const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000));
- return Packet2cf(_mm_add_ps(_mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v), mask),
- _mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3),
- vec4f_swizzle1(b.v, 1, 0, 3, 2))));
- #endif
- }
-};
-
-template<> struct conj_helper<Packet2cf, Packet2cf, true,false>
-{
- EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
- { return padd(pmul(x,y),c); }
-
- EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
- {
- #ifdef EIGEN_VECTORIZE_SSE3
- return internal::pmul(pconj(a), b);
- #else
- const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000));
- return Packet2cf(_mm_add_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v),
- _mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3),
- vec4f_swizzle1(b.v, 1, 0, 3, 2)), mask)));
- #endif
- }
-};
-
-template<> struct conj_helper<Packet2cf, Packet2cf, true,true>
+EIGEN_STRONG_INLINE Packet2cf pcplxflip/* <Packet2cf> */(const Packet2cf& x)
{
- EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
- { return padd(pmul(x,y),c); }
-
- EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
- {
- #ifdef EIGEN_VECTORIZE_SSE3
- return pconj(internal::pmul(a, b));
- #else
- const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000));
- return Packet2cf(_mm_sub_ps(_mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v), mask),
- _mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3),
- vec4f_swizzle1(b.v, 1, 0, 3, 2))));
- #endif
- }
-};
+ return Packet2cf(vec4f_swizzle1(x.v, 1, 0, 3, 2));
+}
EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf,Packet4f)
template<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
{
// TODO optimize it for SSE3 and 4
- Packet2cf res = conj_helper<Packet2cf,Packet2cf,false,true>().pmul(a,b);
+ Packet2cf res = pmul(a, pconj(b));
__m128 s = _mm_mul_ps(b.v,b.v);
- return Packet2cf(_mm_div_ps(res.v,_mm_add_ps(s,_mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(s), 0xb1)))));
+ return Packet2cf(_mm_div_ps(res.v,_mm_add_ps(s,vec4f_swizzle1(s, 1, 0, 3, 2))));
}
-EIGEN_STRONG_INLINE Packet2cf pcplxflip/* <Packet2cf> */(const Packet2cf& x)
-{
- return Packet2cf(vec4f_swizzle1(x.v, 1, 0, 3, 2));
-}
//---------- double ----------
@@ -250,7 +182,7 @@ struct Packet1cd
{
EIGEN_STRONG_INLINE Packet1cd() {}
EIGEN_STRONG_INLINE explicit Packet1cd(const __m128d& a) : v(a) {}
- __m128d v;
+ Packet2d v;
};
// Use the packet_traits defined in AVX/PacketMath.h instead if we're going
@@ -271,6 +203,7 @@ template<> struct packet_traits<std::complex<double> > : default_packet_traits
HasMul = 1,
HasDiv = 1,
HasNegate = 1,
+ HasSqrt = 1,
HasAbs = 0,
HasAbs2 = 0,
HasMin = 0,
@@ -280,7 +213,18 @@ template<> struct packet_traits<std::complex<double> > : default_packet_traits
};
#endif
-template<> struct unpacket_traits<Packet1cd> { typedef std::complex<double> type; enum {size=1, alignment=Aligned16}; typedef Packet1cd half; };
+template<> struct unpacket_traits<Packet1cd> {
+ typedef std::complex<double> type;
+ typedef Packet1cd half;
+ typedef Packet2d as_real;
+ enum {
+ size=1,
+ alignment=Aligned16,
+ vectorizable=true,
+ masked_load_available=false,
+ masked_store_available=false
+ };
+};
template<> EIGEN_STRONG_INLINE Packet1cd padd<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_add_pd(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet1cd psub<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_sub_pd(a.v,b.v)); }
@@ -305,10 +249,11 @@ template<> EIGEN_STRONG_INLINE Packet1cd pmul<Packet1cd>(const Packet1cd& a, con
#endif
}
+template<> EIGEN_STRONG_INLINE Packet1cd ptrue <Packet1cd>(const Packet1cd& a) { return Packet1cd(ptrue(Packet2d(a.v))); }
template<> EIGEN_STRONG_INLINE Packet1cd pand <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_and_pd(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet1cd por <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_or_pd(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet1cd pxor <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_xor_pd(a.v,b.v)); }
-template<> EIGEN_STRONG_INLINE Packet1cd pandnot<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_andnot_pd(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet1cd pandnot<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_andnot_pd(b.v,a.v)); }
// FIXME force unaligned load, this is a temporary fix
template<> EIGEN_STRONG_INLINE Packet1cd pload <Packet1cd>(const std::complex<double>* from)
@@ -324,7 +269,7 @@ template<> EIGEN_STRONG_INLINE Packet1cd ploaddup<Packet1cd>(const std::complex<
template<> EIGEN_STRONG_INLINE void pstore <std::complex<double> >(std::complex<double> * to, const Packet1cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, Packet2d(from.v)); }
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double> * to, const Packet1cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, Packet2d(from.v)); }
-template<> EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::complex<double> * addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
+template<> EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::complex<double> * addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
template<> EIGEN_STRONG_INLINE std::complex<double> pfirst<Packet1cd>(const Packet1cd& a)
{
@@ -340,86 +285,17 @@ template<> EIGEN_STRONG_INLINE std::complex<double> predux<Packet1cd>(const Pack
return pfirst(a);
}
-template<> EIGEN_STRONG_INLINE Packet1cd preduxp<Packet1cd>(const Packet1cd* vecs)
-{
- return vecs[0];
-}
-
template<> EIGEN_STRONG_INLINE std::complex<double> predux_mul<Packet1cd>(const Packet1cd& a)
{
return pfirst(a);
}
-template<int Offset>
-struct palign_impl<Offset,Packet1cd>
-{
- static EIGEN_STRONG_INLINE void run(Packet1cd& /*first*/, const Packet1cd& /*second*/)
- {
- // FIXME is it sure we never have to align a Packet1cd?
- // Even though a std::complex<double> has 16 bytes, it is not necessarily aligned on a 16 bytes boundary...
- }
-};
-
-template<> struct conj_helper<Packet1cd, Packet1cd, false,true>
-{
- EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const
- { return padd(pmul(x,y),c); }
-
- EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
- {
- #ifdef EIGEN_VECTORIZE_SSE3
- return internal::pmul(a, pconj(b));
- #else
- const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0));
- return Packet1cd(_mm_add_pd(_mm_xor_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v), mask),
- _mm_mul_pd(vec2d_swizzle1(a.v, 1, 1),
- vec2d_swizzle1(b.v, 1, 0))));
- #endif
- }
-};
-
-template<> struct conj_helper<Packet1cd, Packet1cd, true,false>
-{
- EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const
- { return padd(pmul(x,y),c); }
-
- EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
- {
- #ifdef EIGEN_VECTORIZE_SSE3
- return internal::pmul(pconj(a), b);
- #else
- const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0));
- return Packet1cd(_mm_add_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v),
- _mm_xor_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 1, 1),
- vec2d_swizzle1(b.v, 1, 0)), mask)));
- #endif
- }
-};
-
-template<> struct conj_helper<Packet1cd, Packet1cd, true,true>
-{
- EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const
- { return padd(pmul(x,y),c); }
-
- EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
- {
- #ifdef EIGEN_VECTORIZE_SSE3
- return pconj(internal::pmul(a, b));
- #else
- const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0));
- return Packet1cd(_mm_sub_pd(_mm_xor_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v), mask),
- _mm_mul_pd(vec2d_swizzle1(a.v, 1, 1),
- vec2d_swizzle1(b.v, 1, 0))));
- #endif
- }
-};
-
EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cd,Packet2d)
template<> EIGEN_STRONG_INLINE Packet1cd pdiv<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
{
// TODO optimize it for SSE3 and 4
- Packet1cd res = conj_helper<Packet1cd,Packet1cd,false,true>().pmul(a,b);
+ Packet1cd res = pmul(a,pconj(b));
__m128d s = _mm_mul_pd(b.v,b.v);
return Packet1cd(_mm_div_pd(res.v, _mm_add_pd(s,_mm_shuffle_pd(s, s, 0x1))));
}
@@ -439,33 +315,32 @@ ptranspose(PacketBlock<Packet2cf,2>& kernel) {
kernel.packet[1].v = tmp;
}
-template<> EIGEN_STRONG_INLINE Packet2cf pblend(const Selector<2>& ifPacket, const Packet2cf& thenPacket, const Packet2cf& elsePacket) {
- __m128d result = pblend<Packet2d>(ifPacket, _mm_castps_pd(thenPacket.v), _mm_castps_pd(elsePacket.v));
- return Packet2cf(_mm_castpd_ps(result));
+template<> EIGEN_STRONG_INLINE Packet2cf pcmp_eq(const Packet2cf& a, const Packet2cf& b)
+{
+ __m128 eq = _mm_cmpeq_ps(a.v, b.v);
+ return Packet2cf(pand<Packet4f>(eq, vec4f_swizzle1(eq, 1, 0, 3, 2)));
}
-template<> EIGEN_STRONG_INLINE Packet2cf pinsertfirst(const Packet2cf& a, std::complex<float> b)
+template<> EIGEN_STRONG_INLINE Packet1cd pcmp_eq(const Packet1cd& a, const Packet1cd& b)
{
- return Packet2cf(_mm_loadl_pi(a.v, reinterpret_cast<const __m64*>(&b)));
+ __m128d eq = _mm_cmpeq_pd(a.v, b.v);
+ return Packet1cd(pand<Packet2d>(eq, vec2d_swizzle1(eq, 1, 0)));
}
-template<> EIGEN_STRONG_INLINE Packet1cd pinsertfirst(const Packet1cd&, std::complex<double> b)
-{
- return pset1<Packet1cd>(b);
+template<> EIGEN_STRONG_INLINE Packet2cf pblend(const Selector<2>& ifPacket, const Packet2cf& thenPacket, const Packet2cf& elsePacket) {
+ __m128d result = pblend<Packet2d>(ifPacket, _mm_castps_pd(thenPacket.v), _mm_castps_pd(elsePacket.v));
+ return Packet2cf(_mm_castpd_ps(result));
}
-template<> EIGEN_STRONG_INLINE Packet2cf pinsertlast(const Packet2cf& a, std::complex<float> b)
-{
- return Packet2cf(_mm_loadh_pi(a.v, reinterpret_cast<const __m64*>(&b)));
+template<> EIGEN_STRONG_INLINE Packet1cd psqrt<Packet1cd>(const Packet1cd& a) {
+ return psqrt_complex<Packet1cd>(a);
}
-template<> EIGEN_STRONG_INLINE Packet1cd pinsertlast(const Packet1cd&, std::complex<double> b)
-{
- return pset1<Packet1cd>(b);
+template<> EIGEN_STRONG_INLINE Packet2cf psqrt<Packet2cf>(const Packet2cf& a) {
+ return psqrt_complex<Packet2cf>(a);
}
} // end namespace internal
-
} // end namespace Eigen
#endif // EIGEN_COMPLEX_SSE_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/SSE/MathFunctions.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/SSE/MathFunctions.h
index 7b5f948e1..8736d0d6b 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/arch/SSE/MathFunctions.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/SSE/MathFunctions.h
@@ -8,7 +8,7 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-/* The sin, cos, exp, and log functions of this file come from
+/* The sin and cos and functions of this file come from
* Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/
*/
@@ -20,426 +20,57 @@ namespace Eigen {
namespace internal {
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
-Packet4f plog<Packet4f>(const Packet4f& _x)
-{
- Packet4f x = _x;
- _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
- _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
- _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f);
-
- _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(inv_mant_mask, ~0x7f800000);
-
- /* the smallest non denormalized float number */
- _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(min_norm_pos, 0x00800000);
- _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_inf, 0xff800000);//-1.f/0.f);
-
- /* natural logarithm computed for 4 simultaneous float
- return NaN for x <= 0
- */
- _EIGEN_DECLARE_CONST_Packet4f(cephes_SQRTHF, 0.707106781186547524f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p0, 7.0376836292E-2f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p1, - 1.1514610310E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p2, 1.1676998740E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p3, - 1.2420140846E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p4, + 1.4249322787E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p5, - 1.6668057665E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p6, + 2.0000714765E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p7, - 2.4999993993E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p8, + 3.3333331174E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q1, -2.12194440e-4f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q2, 0.693359375f);
-
-
- Packet4i emm0;
-
- Packet4f invalid_mask = _mm_cmpnge_ps(x, _mm_setzero_ps()); // not greater equal is true if x is NaN
- Packet4f iszero_mask = _mm_cmpeq_ps(x, _mm_setzero_ps());
-
- x = pmax(x, p4f_min_norm_pos); /* cut off denormalized stuff */
- emm0 = _mm_srli_epi32(_mm_castps_si128(x), 23);
-
- /* keep only the fractional part */
- x = _mm_and_ps(x, p4f_inv_mant_mask);
- x = _mm_or_ps(x, p4f_half);
-
- emm0 = _mm_sub_epi32(emm0, p4i_0x7f);
- Packet4f e = padd(Packet4f(_mm_cvtepi32_ps(emm0)), p4f_1);
-
- /* part2:
- if( x < SQRTHF ) {
- e -= 1;
- x = x + x - 1.0;
- } else { x = x - 1.0; }
- */
- Packet4f mask = _mm_cmplt_ps(x, p4f_cephes_SQRTHF);
- Packet4f tmp = pand(x, mask);
- x = psub(x, p4f_1);
- e = psub(e, pand(p4f_1, mask));
- x = padd(x, tmp);
-
- Packet4f x2 = pmul(x,x);
- Packet4f x3 = pmul(x2,x);
-
- Packet4f y, y1, y2;
- y = pmadd(p4f_cephes_log_p0, x, p4f_cephes_log_p1);
- y1 = pmadd(p4f_cephes_log_p3, x, p4f_cephes_log_p4);
- y2 = pmadd(p4f_cephes_log_p6, x, p4f_cephes_log_p7);
- y = pmadd(y , x, p4f_cephes_log_p2);
- y1 = pmadd(y1, x, p4f_cephes_log_p5);
- y2 = pmadd(y2, x, p4f_cephes_log_p8);
- y = pmadd(y, x3, y1);
- y = pmadd(y, x3, y2);
- y = pmul(y, x3);
-
- y1 = pmul(e, p4f_cephes_log_q1);
- tmp = pmul(x2, p4f_half);
- y = padd(y, y1);
- x = psub(x, tmp);
- y2 = pmul(e, p4f_cephes_log_q2);
- x = padd(x, y);
- x = padd(x, y2);
- // negative arg will be NAN, 0 will be -INF
- return _mm_or_ps(_mm_andnot_ps(iszero_mask, _mm_or_ps(x, invalid_mask)),
- _mm_and_ps(iszero_mask, p4f_minus_inf));
+Packet4f plog<Packet4f>(const Packet4f& _x) {
+ return plog_float(_x);
}
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
-Packet4f pexp<Packet4f>(const Packet4f& _x)
-{
- Packet4f x = _x;
- _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
- _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
- _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f);
-
-
- _EIGEN_DECLARE_CONST_Packet4f(exp_hi, 88.3762626647950f);
- _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -88.3762626647949f);
-
- _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f);
-
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500E-4f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507E-3f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073E-3f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894E-2f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f);
-
- Packet4f tmp, fx;
- Packet4i emm0;
+Packet2d plog<Packet2d>(const Packet2d& _x) {
+ return plog_double(_x);
+}
- // clamp x
- x = pmax(pmin(x, p4f_exp_hi), p4f_exp_lo);
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet4f plog2<Packet4f>(const Packet4f& _x) {
+ return plog2_float(_x);
+}
- /* express exp(x) as exp(g + n*log(2)) */
- fx = pmadd(x, p4f_cephes_LOG2EF, p4f_half);
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet2d plog2<Packet2d>(const Packet2d& _x) {
+ return plog2_double(_x);
+}
-#ifdef EIGEN_VECTORIZE_SSE4_1
- fx = _mm_floor_ps(fx);
-#else
- emm0 = _mm_cvttps_epi32(fx);
- tmp = _mm_cvtepi32_ps(emm0);
- /* if greater, substract 1 */
- Packet4f mask = _mm_cmpgt_ps(tmp, fx);
- mask = _mm_and_ps(mask, p4f_1);
- fx = psub(tmp, mask);
-#endif
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet4f plog1p<Packet4f>(const Packet4f& _x) {
+ return generic_plog1p(_x);
+}
- tmp = pmul(fx, p4f_cephes_exp_C1);
- Packet4f z = pmul(fx, p4f_cephes_exp_C2);
- x = psub(x, tmp);
- x = psub(x, z);
-
- z = pmul(x,x);
-
- Packet4f y = p4f_cephes_exp_p0;
- y = pmadd(y, x, p4f_cephes_exp_p1);
- y = pmadd(y, x, p4f_cephes_exp_p2);
- y = pmadd(y, x, p4f_cephes_exp_p3);
- y = pmadd(y, x, p4f_cephes_exp_p4);
- y = pmadd(y, x, p4f_cephes_exp_p5);
- y = pmadd(y, z, x);
- y = padd(y, p4f_1);
-
- // build 2^n
- emm0 = _mm_cvttps_epi32(fx);
- emm0 = _mm_add_epi32(emm0, p4i_0x7f);
- emm0 = _mm_slli_epi32(emm0, 23);
- return pmax(pmul(y, Packet4f(_mm_castsi128_ps(emm0))), _x);
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet4f pexpm1<Packet4f>(const Packet4f& _x) {
+ return generic_expm1(_x);
}
+
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
-Packet2d pexp<Packet2d>(const Packet2d& _x)
+Packet4f pexp<Packet4f>(const Packet4f& _x)
{
- Packet2d x = _x;
-
- _EIGEN_DECLARE_CONST_Packet2d(1 , 1.0);
- _EIGEN_DECLARE_CONST_Packet2d(2 , 2.0);
- _EIGEN_DECLARE_CONST_Packet2d(half, 0.5);
-
- _EIGEN_DECLARE_CONST_Packet2d(exp_hi, 709.437);
- _EIGEN_DECLARE_CONST_Packet2d(exp_lo, -709.436139303);
-
- _EIGEN_DECLARE_CONST_Packet2d(cephes_LOG2EF, 1.4426950408889634073599);
-
- _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p0, 1.26177193074810590878e-4);
- _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p1, 3.02994407707441961300e-2);
- _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p2, 9.99999999999999999910e-1);
-
- _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q0, 3.00198505138664455042e-6);
- _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q1, 2.52448340349684104192e-3);
- _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q2, 2.27265548208155028766e-1);
- _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q3, 2.00000000000000000009e0);
-
- _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C1, 0.693145751953125);
- _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6);
- static const __m128i p4i_1023_0 = _mm_setr_epi32(1023, 1023, 0, 0);
-
- Packet2d tmp, fx;
- Packet4i emm0;
-
- // clamp x
- x = pmax(pmin(x, p2d_exp_hi), p2d_exp_lo);
- /* express exp(x) as exp(g + n*log(2)) */
- fx = pmadd(p2d_cephes_LOG2EF, x, p2d_half);
-
-#ifdef EIGEN_VECTORIZE_SSE4_1
- fx = _mm_floor_pd(fx);
-#else
- emm0 = _mm_cvttpd_epi32(fx);
- tmp = _mm_cvtepi32_pd(emm0);
- /* if greater, substract 1 */
- Packet2d mask = _mm_cmpgt_pd(tmp, fx);
- mask = _mm_and_pd(mask, p2d_1);
- fx = psub(tmp, mask);
-#endif
-
- tmp = pmul(fx, p2d_cephes_exp_C1);
- Packet2d z = pmul(fx, p2d_cephes_exp_C2);
- x = psub(x, tmp);
- x = psub(x, z);
-
- Packet2d x2 = pmul(x,x);
-
- Packet2d px = p2d_cephes_exp_p0;
- px = pmadd(px, x2, p2d_cephes_exp_p1);
- px = pmadd(px, x2, p2d_cephes_exp_p2);
- px = pmul (px, x);
-
- Packet2d qx = p2d_cephes_exp_q0;
- qx = pmadd(qx, x2, p2d_cephes_exp_q1);
- qx = pmadd(qx, x2, p2d_cephes_exp_q2);
- qx = pmadd(qx, x2, p2d_cephes_exp_q3);
-
- x = pdiv(px,psub(qx,px));
- x = pmadd(p2d_2,x,p2d_1);
-
- // build 2^n
- emm0 = _mm_cvttpd_epi32(fx);
- emm0 = _mm_add_epi32(emm0, p4i_1023_0);
- emm0 = _mm_slli_epi32(emm0, 20);
- emm0 = _mm_shuffle_epi32(emm0, _MM_SHUFFLE(1,2,0,3));
- return pmax(pmul(x, Packet2d(_mm_castsi128_pd(emm0))), _x);
+ return pexp_float(_x);
}
-/* evaluation of 4 sines at onces, using SSE2 intrinsics.
-
- The code is the exact rewriting of the cephes sinf function.
- Precision is excellent as long as x < 8192 (I did not bother to
- take into account the special handling they have for greater values
- -- it does not return garbage for arguments over 8192, though, but
- the extra precision is missing).
-
- Note that it is such that sinf((float)M_PI) = 8.74e-8, which is the
- surprising but correct result.
-*/
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet2d pexp<Packet2d>(const Packet2d& x)
+{
+ return pexp_double(x);
+}
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet4f psin<Packet4f>(const Packet4f& _x)
{
- Packet4f x = _x;
- _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
- _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
-
- _EIGEN_DECLARE_CONST_Packet4i(1, 1);
- _EIGEN_DECLARE_CONST_Packet4i(not1, ~1);
- _EIGEN_DECLARE_CONST_Packet4i(2, 2);
- _EIGEN_DECLARE_CONST_Packet4i(4, 4);
-
- _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(sign_mask, 0x80000000);
-
- _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP1,-0.78515625f);
- _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP2, -2.4187564849853515625e-4f);
- _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP3, -3.77489497744594108e-8f);
- _EIGEN_DECLARE_CONST_Packet4f(sincof_p0, -1.9515295891E-4f);
- _EIGEN_DECLARE_CONST_Packet4f(sincof_p1, 8.3321608736E-3f);
- _EIGEN_DECLARE_CONST_Packet4f(sincof_p2, -1.6666654611E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(coscof_p0, 2.443315711809948E-005f);
- _EIGEN_DECLARE_CONST_Packet4f(coscof_p1, -1.388731625493765E-003f);
- _EIGEN_DECLARE_CONST_Packet4f(coscof_p2, 4.166664568298827E-002f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI
-
- Packet4f xmm1, xmm2, xmm3, sign_bit, y;
-
- Packet4i emm0, emm2;
- sign_bit = x;
- /* take the absolute value */
- x = pabs(x);
-
- /* take the modulo */
-
- /* extract the sign bit (upper one) */
- sign_bit = _mm_and_ps(sign_bit, p4f_sign_mask);
-
- /* scale by 4/Pi */
- y = pmul(x, p4f_cephes_FOPI);
-
- /* store the integer part of y in mm0 */
- emm2 = _mm_cvttps_epi32(y);
- /* j=(j+1) & (~1) (see the cephes sources) */
- emm2 = _mm_add_epi32(emm2, p4i_1);
- emm2 = _mm_and_si128(emm2, p4i_not1);
- y = _mm_cvtepi32_ps(emm2);
- /* get the swap sign flag */
- emm0 = _mm_and_si128(emm2, p4i_4);
- emm0 = _mm_slli_epi32(emm0, 29);
- /* get the polynom selection mask
- there is one polynom for 0 <= x <= Pi/4
- and another one for Pi/4<x<=Pi/2
-
- Both branches will be computed.
- */
- emm2 = _mm_and_si128(emm2, p4i_2);
- emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
-
- Packet4f swap_sign_bit = _mm_castsi128_ps(emm0);
- Packet4f poly_mask = _mm_castsi128_ps(emm2);
- sign_bit = _mm_xor_ps(sign_bit, swap_sign_bit);
-
- /* The magic pass: "Extended precision modular arithmetic"
- x = ((x - y * DP1) - y * DP2) - y * DP3; */
- xmm1 = pmul(y, p4f_minus_cephes_DP1);
- xmm2 = pmul(y, p4f_minus_cephes_DP2);
- xmm3 = pmul(y, p4f_minus_cephes_DP3);
- x = padd(x, xmm1);
- x = padd(x, xmm2);
- x = padd(x, xmm3);
-
- /* Evaluate the first polynom (0 <= x <= Pi/4) */
- y = p4f_coscof_p0;
- Packet4f z = _mm_mul_ps(x,x);
-
- y = pmadd(y, z, p4f_coscof_p1);
- y = pmadd(y, z, p4f_coscof_p2);
- y = pmul(y, z);
- y = pmul(y, z);
- Packet4f tmp = pmul(z, p4f_half);
- y = psub(y, tmp);
- y = padd(y, p4f_1);
-
- /* Evaluate the second polynom (Pi/4 <= x <= 0) */
-
- Packet4f y2 = p4f_sincof_p0;
- y2 = pmadd(y2, z, p4f_sincof_p1);
- y2 = pmadd(y2, z, p4f_sincof_p2);
- y2 = pmul(y2, z);
- y2 = pmul(y2, x);
- y2 = padd(y2, x);
-
- /* select the correct result from the two polynoms */
- y2 = _mm_and_ps(poly_mask, y2);
- y = _mm_andnot_ps(poly_mask, y);
- y = _mm_or_ps(y,y2);
- /* update the sign */
- return _mm_xor_ps(y, sign_bit);
+ return psin_float(_x);
}
-/* almost the same as psin */
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet4f pcos<Packet4f>(const Packet4f& _x)
{
- Packet4f x = _x;
- _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
- _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
-
- _EIGEN_DECLARE_CONST_Packet4i(1, 1);
- _EIGEN_DECLARE_CONST_Packet4i(not1, ~1);
- _EIGEN_DECLARE_CONST_Packet4i(2, 2);
- _EIGEN_DECLARE_CONST_Packet4i(4, 4);
-
- _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP1,-0.78515625f);
- _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP2, -2.4187564849853515625e-4f);
- _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP3, -3.77489497744594108e-8f);
- _EIGEN_DECLARE_CONST_Packet4f(sincof_p0, -1.9515295891E-4f);
- _EIGEN_DECLARE_CONST_Packet4f(sincof_p1, 8.3321608736E-3f);
- _EIGEN_DECLARE_CONST_Packet4f(sincof_p2, -1.6666654611E-1f);
- _EIGEN_DECLARE_CONST_Packet4f(coscof_p0, 2.443315711809948E-005f);
- _EIGEN_DECLARE_CONST_Packet4f(coscof_p1, -1.388731625493765E-003f);
- _EIGEN_DECLARE_CONST_Packet4f(coscof_p2, 4.166664568298827E-002f);
- _EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI
-
- Packet4f xmm1, xmm2, xmm3, y;
- Packet4i emm0, emm2;
-
- x = pabs(x);
-
- /* scale by 4/Pi */
- y = pmul(x, p4f_cephes_FOPI);
-
- /* get the integer part of y */
- emm2 = _mm_cvttps_epi32(y);
- /* j=(j+1) & (~1) (see the cephes sources) */
- emm2 = _mm_add_epi32(emm2, p4i_1);
- emm2 = _mm_and_si128(emm2, p4i_not1);
- y = _mm_cvtepi32_ps(emm2);
-
- emm2 = _mm_sub_epi32(emm2, p4i_2);
-
- /* get the swap sign flag */
- emm0 = _mm_andnot_si128(emm2, p4i_4);
- emm0 = _mm_slli_epi32(emm0, 29);
- /* get the polynom selection mask */
- emm2 = _mm_and_si128(emm2, p4i_2);
- emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
-
- Packet4f sign_bit = _mm_castsi128_ps(emm0);
- Packet4f poly_mask = _mm_castsi128_ps(emm2);
-
- /* The magic pass: "Extended precision modular arithmetic"
- x = ((x - y * DP1) - y * DP2) - y * DP3; */
- xmm1 = pmul(y, p4f_minus_cephes_DP1);
- xmm2 = pmul(y, p4f_minus_cephes_DP2);
- xmm3 = pmul(y, p4f_minus_cephes_DP3);
- x = padd(x, xmm1);
- x = padd(x, xmm2);
- x = padd(x, xmm3);
-
- /* Evaluate the first polynom (0 <= x <= Pi/4) */
- y = p4f_coscof_p0;
- Packet4f z = pmul(x,x);
-
- y = pmadd(y,z,p4f_coscof_p1);
- y = pmadd(y,z,p4f_coscof_p2);
- y = pmul(y, z);
- y = pmul(y, z);
- Packet4f tmp = _mm_mul_ps(z, p4f_half);
- y = psub(y, tmp);
- y = padd(y, p4f_1);
-
- /* Evaluate the second polynom (Pi/4 <= x <= 0) */
- Packet4f y2 = p4f_sincof_p0;
- y2 = pmadd(y2, z, p4f_sincof_p1);
- y2 = pmadd(y2, z, p4f_sincof_p2);
- y2 = pmul(y2, z);
- y2 = pmadd(y2, x, x);
-
- /* select the correct result from the two polynoms */
- y2 = _mm_and_ps(poly_mask, y2);
- y = _mm_andnot_ps(poly_mask, y);
- y = _mm_or_ps(y,y2);
-
- /* update the sign */
- return _mm_xor_ps(y, sign_bit);
+ return pcos_float(_x);
}
#if EIGEN_FAST_MATH
@@ -455,17 +86,17 @@ Packet4f pcos<Packet4f>(const Packet4f& _x)
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet4f psqrt<Packet4f>(const Packet4f& _x)
{
- Packet4f half = pmul(_x, pset1<Packet4f>(.5f));
- Packet4f denormal_mask = _mm_and_ps(
- _mm_cmpge_ps(_x, _mm_setzero_ps()),
- _mm_cmplt_ps(_x, pset1<Packet4f>((std::numeric_limits<float>::min)())));
+ Packet4f minus_half_x = pmul(_x, pset1<Packet4f>(-0.5f));
+ Packet4f denormal_mask = pandnot(
+ pcmp_lt(_x, pset1<Packet4f>((std::numeric_limits<float>::min)())),
+ pcmp_lt(_x, pzero(_x)));
// Compute approximate reciprocal sqrt.
Packet4f x = _mm_rsqrt_ps(_x);
// Do a single step of Newton's iteration.
- x = pmul(x, psub(pset1<Packet4f>(1.5f), pmul(half, pmul(x,x))));
+ x = pmul(x, pmadd(minus_half_x, pmul(x,x), pset1<Packet4f>(1.5f)));
// Flush results for denormals to zero.
- return _mm_andnot_ps(denormal_mask, pmul(_x,x));
+ return pandnot(pmul(_x,x), denormal_mask);
}
#else
@@ -478,41 +109,48 @@ Packet4f psqrt<Packet4f>(const Packet4f& x) { return _mm_sqrt_ps(x); }
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet2d psqrt<Packet2d>(const Packet2d& x) { return _mm_sqrt_pd(x); }
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet16b psqrt<Packet16b>(const Packet16b& x) { return x; }
+
#if EIGEN_FAST_MATH
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet4f prsqrt<Packet4f>(const Packet4f& _x) {
- _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(inf, 0x7f800000);
- _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(nan, 0x7fc00000);
_EIGEN_DECLARE_CONST_Packet4f(one_point_five, 1.5f);
_EIGEN_DECLARE_CONST_Packet4f(minus_half, -0.5f);
- _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(flt_min, 0x00800000);
+ _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(inf, 0x7f800000u);
+ _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(flt_min, 0x00800000u);
Packet4f neg_half = pmul(_x, p4f_minus_half);
- // select only the inverse sqrt of positive normal inputs (denormals are
- // flushed to zero and cause infs as well).
- Packet4f le_zero_mask = _mm_cmple_ps(_x, p4f_flt_min);
- Packet4f x = _mm_andnot_ps(le_zero_mask, _mm_rsqrt_ps(_x));
-
- // Fill in NaNs and Infs for the negative/zero entries.
- Packet4f neg_mask = _mm_cmplt_ps(_x, _mm_setzero_ps());
- Packet4f zero_mask = _mm_andnot_ps(neg_mask, le_zero_mask);
- Packet4f infs_and_nans = _mm_or_ps(_mm_and_ps(neg_mask, p4f_nan),
- _mm_and_ps(zero_mask, p4f_inf));
-
- // Do a single step of Newton's iteration.
- x = pmul(x, pmadd(neg_half, pmul(x, x), p4f_one_point_five));
-
- // Insert NaNs and Infs in all the right places.
- return _mm_or_ps(x, infs_and_nans);
+ // Identity infinite, zero, negative and denormal arguments.
+ Packet4f lt_min_mask = _mm_cmplt_ps(_x, p4f_flt_min);
+ Packet4f inf_mask = _mm_cmpeq_ps(_x, p4f_inf);
+ Packet4f not_normal_finite_mask = _mm_or_ps(lt_min_mask, inf_mask);
+
+ // Compute an approximate result using the rsqrt intrinsic.
+ Packet4f y_approx = _mm_rsqrt_ps(_x);
+
+ // Do a single step of Newton-Raphson iteration to improve the approximation.
+ // This uses the formula y_{n+1} = y_n * (1.5 - y_n * (0.5 * x) * y_n).
+ // It is essential to evaluate the inner term like this because forming
+ // y_n^2 may over- or underflow.
+ Packet4f y_newton = pmul(
+ y_approx, pmadd(y_approx, pmul(neg_half, y_approx), p4f_one_point_five));
+
+ // Select the result of the Newton-Raphson step for positive normal arguments.
+ // For other arguments, choose the output of the intrinsic. This will
+ // return rsqrt(+inf) = 0, rsqrt(x) = NaN if x < 0, and rsqrt(x) = +inf if
+ // x is zero or a positive denormalized float (equivalent to flushing positive
+ // denormalized inputs to zero).
+ return pselect<Packet4f>(not_normal_finite_mask, y_approx, y_newton);
}
#else
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet4f prsqrt<Packet4f>(const Packet4f& x) {
- // Unfortunately we can't use the much faster mm_rqsrt_ps since it only provides an approximation.
+ // Unfortunately we can't use the much faster mm_rsqrt_ps since it only provides an approximation.
return _mm_div_ps(pset1<Packet4f>(1.0f), _mm_sqrt_ps(x));
}
@@ -520,7 +158,6 @@ Packet4f prsqrt<Packet4f>(const Packet4f& x) {
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet2d prsqrt<Packet2d>(const Packet2d& x) {
- // Unfortunately we can't use the much faster mm_rqsrt_pd since it only provides an approximation.
return _mm_div_pd(pset1<Packet2d>(1.0), _mm_sqrt_pd(x));
}
@@ -548,7 +185,7 @@ double sqrt(const double &x)
{
#if EIGEN_COMP_GNUC_STRICT
// This works around a GCC bug generating poor code for _mm_sqrt_pd
- // See https://bitbucket.org/eigen/eigen/commits/14f468dba4d350d7c19c9b93072e19f7b3df563b
+ // See https://gitlab.com/libeigen/eigen/commit/8dca9f97e38970
return internal::pfirst(internal::Packet2d(__builtin_ia32_sqrtsd(_mm_set_sd(x))));
#else
return internal::pfirst(internal::Packet2d(_mm_sqrt_pd(_mm_set_sd(x))));
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/SSE/PacketMath.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/SSE/PacketMath.h
index d3b74aca4..db102c73a 100644..100755
--- a/examples/ThirdPartyLibs/Eigen/src/Core/arch/SSE/PacketMath.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/SSE/PacketMath.h
@@ -18,63 +18,93 @@ namespace internal {
#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
#endif
-#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
+#if !defined(EIGEN_VECTORIZE_AVX) && !defined(EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS)
+// 32 bits => 8 registers
+// 64 bits => 16 registers
#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*))
#endif
-#ifdef __FMA__
+#ifdef EIGEN_VECTORIZE_FMA
#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
-#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD 1
+#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
#endif
#endif
-#if (defined EIGEN_VECTORIZE_AVX) && (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_MINGW || EIGEN_COMP_LCC_E2K) && (__GXX_ABI_VERSION < 1004)
+#if ((defined EIGEN_VECTORIZE_AVX) && (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_MINGW) && (__GXX_ABI_VERSION < 1004)) || EIGEN_OS_QNX
// With GCC's default ABI version, a __m128 or __m256 are the same types and therefore we cannot
// have overloads for both types without linking error.
// One solution is to increase ABI version using -fabi-version=4 (or greater).
// Otherwise, we workaround this inconvenience by wrapping 128bit types into the following helper
// structure:
-template<typename T>
-struct eigen_packet_wrapper
-{
- EIGEN_ALWAYS_INLINE operator T&() { return m_val; }
- EIGEN_ALWAYS_INLINE operator const T&() const { return m_val; }
- EIGEN_ALWAYS_INLINE eigen_packet_wrapper() {}
- EIGEN_ALWAYS_INLINE eigen_packet_wrapper(const T &v) : m_val(v) {}
- EIGEN_ALWAYS_INLINE eigen_packet_wrapper& operator=(const T &v) {
- m_val = v;
- return *this;
- }
-
- T m_val;
-};
typedef eigen_packet_wrapper<__m128> Packet4f;
-typedef eigen_packet_wrapper<__m128i> Packet4i;
typedef eigen_packet_wrapper<__m128d> Packet2d;
#else
typedef __m128 Packet4f;
-typedef __m128i Packet4i;
typedef __m128d Packet2d;
#endif
+typedef eigen_packet_wrapper<__m128i, 0> Packet4i;
+typedef eigen_packet_wrapper<__m128i, 1> Packet16b;
+
template<> struct is_arithmetic<__m128> { enum { value = true }; };
template<> struct is_arithmetic<__m128i> { enum { value = true }; };
template<> struct is_arithmetic<__m128d> { enum { value = true }; };
+template<> struct is_arithmetic<Packet4i> { enum { value = true }; };
+template<> struct is_arithmetic<Packet16b> { enum { value = true }; };
+
+template<int p, int q, int r, int s>
+struct shuffle_mask{
+ enum { mask = (s)<<6|(r)<<4|(q)<<2|(p) };
+};
+// TODO: change the implementation of all swizzle* ops from macro to template,
#define vec4f_swizzle1(v,p,q,r,s) \
- (_mm_castsi128_ps(_mm_shuffle_epi32( _mm_castps_si128(v), ((s)<<6|(r)<<4|(q)<<2|(p)))))
+ Packet4f(_mm_castsi128_ps(_mm_shuffle_epi32( _mm_castps_si128(v), (shuffle_mask<p,q,r,s>::mask))))
#define vec4i_swizzle1(v,p,q,r,s) \
- (_mm_shuffle_epi32( v, ((s)<<6|(r)<<4|(q)<<2|(p))))
+ Packet4i(_mm_shuffle_epi32( v, (shuffle_mask<p,q,r,s>::mask)))
#define vec2d_swizzle1(v,p,q) \
- (_mm_castsi128_pd(_mm_shuffle_epi32( _mm_castpd_si128(v), ((q*2+1)<<6|(q*2)<<4|(p*2+1)<<2|(p*2)))))
+ Packet2d(_mm_castsi128_pd(_mm_shuffle_epi32( _mm_castpd_si128(v), (shuffle_mask<2*p,2*p+1,2*q,2*q+1>::mask))))
#define vec4f_swizzle2(a,b,p,q,r,s) \
- (_mm_shuffle_ps( (a), (b), ((s)<<6|(r)<<4|(q)<<2|(p))))
+ Packet4f(_mm_shuffle_ps( (a), (b), (shuffle_mask<p,q,r,s>::mask)))
#define vec4i_swizzle2(a,b,p,q,r,s) \
- (_mm_castps_si128( (_mm_shuffle_ps( _mm_castsi128_ps(a), _mm_castsi128_ps(b), ((s)<<6|(r)<<4|(q)<<2|(p))))))
+ Packet4i(_mm_castps_si128( (_mm_shuffle_ps( _mm_castsi128_ps(a), _mm_castsi128_ps(b), (shuffle_mask<p,q,r,s>::mask)))))
+
+EIGEN_STRONG_INLINE Packet4f vec4f_movelh(const Packet4f& a, const Packet4f& b)
+{
+ return Packet4f(_mm_movelh_ps(a,b));
+}
+EIGEN_STRONG_INLINE Packet4f vec4f_movehl(const Packet4f& a, const Packet4f& b)
+{
+ return Packet4f(_mm_movehl_ps(a,b));
+}
+EIGEN_STRONG_INLINE Packet4f vec4f_unpacklo(const Packet4f& a, const Packet4f& b)
+{
+ return Packet4f(_mm_unpacklo_ps(a,b));
+}
+EIGEN_STRONG_INLINE Packet4f vec4f_unpackhi(const Packet4f& a, const Packet4f& b)
+{
+ return Packet4f(_mm_unpackhi_ps(a,b));
+}
+#define vec4f_duplane(a,p) \
+ vec4f_swizzle2(a,a,p,p,p,p)
+
+#define vec2d_swizzle2(a,b,mask) \
+ Packet2d(_mm_shuffle_pd(a,b,mask))
+
+EIGEN_STRONG_INLINE Packet2d vec2d_unpacklo(const Packet2d& a, const Packet2d& b)
+{
+ return Packet2d(_mm_unpacklo_pd(a,b));
+}
+EIGEN_STRONG_INLINE Packet2d vec2d_unpackhi(const Packet2d& a, const Packet2d& b)
+{
+ return Packet2d(_mm_unpackhi_pd(a,b));
+}
+#define vec2d_duplane(a,p) \
+ vec2d_swizzle2(a,a,(p<<1)|p)
#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
const Packet4f p4f_##NAME = pset1<Packet4f>(X)
@@ -83,7 +113,7 @@ template<> struct is_arithmetic<__m128d> { enum { value = true }; };
const Packet2d p2d_##NAME = pset1<Packet2d>(X)
#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
- const Packet4f p4f_##NAME = _mm_castsi128_ps(pset1<Packet4i>(X))
+ const Packet4f p4f_##NAME = pset1frombits<Packet4f>(X)
#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \
const Packet4i p4i_##NAME = pset1<Packet4i>(X)
@@ -92,36 +122,41 @@ template<> struct is_arithmetic<__m128d> { enum { value = true }; };
// Use the packet_traits defined in AVX/PacketMath.h instead if we're going
// to leverage AVX instructions.
#ifndef EIGEN_VECTORIZE_AVX
-template<> struct packet_traits<float> : default_packet_traits
-{
+template <>
+struct packet_traits<float> : default_packet_traits {
typedef Packet4f type;
typedef Packet4f half;
enum {
Vectorizable = 1,
AlignedOnScalar = 1,
- size=4,
+ size = 4,
HasHalfPacket = 0,
- HasDiv = 1,
- HasSin = EIGEN_FAST_MATH,
- HasCos = EIGEN_FAST_MATH,
- HasLog = 1,
- HasExp = 1,
+ HasCmp = 1,
+ HasDiv = 1,
+ HasSin = EIGEN_FAST_MATH,
+ HasCos = EIGEN_FAST_MATH,
+ HasLog = 1,
+ HasLog1p = 1,
+ HasExpm1 = 1,
+ HasNdtri = 1,
+ HasExp = 1,
+ HasBessel = 1,
HasSqrt = 1,
HasRsqrt = 1,
- HasTanh = EIGEN_FAST_MATH,
- HasBlend = 1
-
+ HasTanh = EIGEN_FAST_MATH,
+ HasErf = EIGEN_FAST_MATH,
+ HasBlend = 1,
+ HasCeil = 1,
+ HasFloor = 1,
#ifdef EIGEN_VECTORIZE_SSE4_1
- ,
HasRound = 1,
- HasFloor = 1,
- HasCeil = 1
#endif
+ HasRint = 1
};
};
-template<> struct packet_traits<double> : default_packet_traits
-{
+template <>
+struct packet_traits<double> : default_packet_traits {
typedef Packet2d type;
typedef Packet2d half;
enum {
@@ -130,18 +165,19 @@ template<> struct packet_traits<double> : default_packet_traits
size=2,
HasHalfPacket = 0,
+ HasCmp = 1,
HasDiv = 1,
+ HasLog = 1,
HasExp = 1,
HasSqrt = 1,
HasRsqrt = 1,
- HasBlend = 1
-
+ HasBlend = 1,
+ HasFloor = 1,
+ HasCeil = 1,
#ifdef EIGEN_VECTORIZE_SSE4_1
- ,
HasRound = 1,
- HasFloor = 1,
- HasCeil = 1
#endif
+ HasRint = 1
};
};
#endif
@@ -154,13 +190,56 @@ template<> struct packet_traits<int> : default_packet_traits
AlignedOnScalar = 1,
size=4,
+ HasShift = 1,
HasBlend = 1
};
};
-template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4, alignment=Aligned16}; typedef Packet4f half; };
-template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2, alignment=Aligned16}; typedef Packet2d half; };
-template<> struct unpacket_traits<Packet4i> { typedef int type; enum {size=4, alignment=Aligned16}; typedef Packet4i half; };
+template<> struct packet_traits<bool> : default_packet_traits
+{
+ typedef Packet16b type;
+ typedef Packet16b half;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ HasHalfPacket = 0,
+ size=16,
+
+ HasAdd = 1,
+ HasSub = 1,
+ HasShift = 0,
+ HasMul = 1,
+ HasNegate = 1,
+ HasAbs = 0,
+ HasAbs2 = 0,
+ HasMin = 0,
+ HasMax = 0,
+ HasConj = 0,
+ HasSqrt = 1
+ };
+};
+
+template<> struct unpacket_traits<Packet4f> {
+ typedef float type;
+ typedef Packet4f half;
+ typedef Packet4i integer_packet;
+ enum {size=4, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false};
+};
+template<> struct unpacket_traits<Packet2d> {
+ typedef double type;
+ typedef Packet2d half;
+ enum {size=2, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false};
+};
+template<> struct unpacket_traits<Packet4i> {
+ typedef int type;
+ typedef Packet4i half;
+ enum {size=4, alignment=Aligned16, vectorizable=false, masked_load_available=false, masked_store_available=false};
+};
+template<> struct unpacket_traits<Packet16b> {
+ typedef bool type;
+ typedef Packet16b half;
+ enum {size=16, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false};
+};
#ifndef EIGEN_VECTORIZE_AVX
template<> struct scalar_div_cost<float,true> { enum { value = 7 }; };
@@ -179,6 +258,18 @@ template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { re
template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set1_pd(from); }
template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) { return _mm_set1_epi32(from); }
#endif
+template<> EIGEN_STRONG_INLINE Packet16b pset1<Packet16b>(const bool& from) { return _mm_set1_epi8(static_cast<char>(from)); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pset1frombits<Packet4f>(unsigned int from) { return _mm_castsi128_ps(pset1<Packet4i>(from)); }
+template<> EIGEN_STRONG_INLINE Packet2d pset1frombits<Packet2d>(uint64_t from) { return _mm_castsi128_pd(_mm_set1_epi64x(from)); }
+
+template<> EIGEN_STRONG_INLINE Packet4f peven_mask(const Packet4f& /*a*/) { return _mm_castsi128_ps(_mm_set_epi32(0, -1, 0, -1)); }
+template<> EIGEN_STRONG_INLINE Packet4i peven_mask(const Packet4i& /*a*/) { return _mm_set_epi32(0, -1, 0, -1); }
+template<> EIGEN_STRONG_INLINE Packet2d peven_mask(const Packet2d& /*a*/) { return _mm_castsi128_pd(_mm_set_epi32(0, 0, -1, -1)); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pzero(const Packet4f& /*a*/) { return _mm_setzero_ps(); }
+template<> EIGEN_STRONG_INLINE Packet2d pzero(const Packet2d& /*a*/) { return _mm_setzero_pd(); }
+template<> EIGEN_STRONG_INLINE Packet4i pzero(const Packet4i& /*a*/) { return _mm_setzero_si128(); }
// GCC generates a shufps instruction for _mm_set1_ps/_mm_load1_ps instead of the more efficient pshufd instruction.
// However, using inrinsics for pset1 makes gcc to generate crappy code in some cases (see bug 203)
@@ -199,9 +290,34 @@ template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const
template<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_add_pd(a,b); }
template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_add_epi32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16b padd<Packet16b>(const Packet16b& a, const Packet16b& b) { return _mm_or_si128(a,b); }
+
template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_sub_ps(a,b); }
template<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_sub_pd(a,b); }
template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_sub_epi32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16b psub<Packet16b>(const Packet16b& a, const Packet16b& b) { return _mm_xor_si128(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b);
+template<> EIGEN_STRONG_INLINE Packet4f paddsub<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+#ifdef EIGEN_VECTORIZE_SSE3
+ return _mm_addsub_ps(a,b);
+#else
+ const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x0,0x80000000,0x0));
+ return padd(a, pxor(mask, b));
+#endif
+}
+
+template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& , const Packet2d& );
+template<> EIGEN_STRONG_INLINE Packet2d paddsub<Packet2d>(const Packet2d& a, const Packet2d& b)
+{
+#ifdef EIGEN_VECTORIZE_SSE3
+ return _mm_addsub_pd(a,b);
+#else
+ const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x80000000,0x0,0x0));
+ return padd(a, pxor(mask, b));
+#endif
+}
template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a)
{
@@ -218,6 +334,11 @@ template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a)
return psub(Packet4i(_mm_setr_epi32(0,0,0,0)), a);
}
+template<> EIGEN_STRONG_INLINE Packet16b pnegate(const Packet16b& a)
+{
+ return psub(pset1<Packet16b>(false), a);
+}
+
template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }
template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; }
template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }
@@ -240,24 +361,101 @@ template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const
#endif
}
+template<> EIGEN_STRONG_INLINE Packet16b pmul<Packet16b>(const Packet16b& a, const Packet16b& b) { return _mm_and_si128(a,b); }
+
template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_div_ps(a,b); }
template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_div_pd(a,b); }
// for some weird raisons, it has to be overloaded for packet of integers
template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); }
-#ifdef __FMA__
+#ifdef EIGEN_VECTORIZE_FMA
template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return _mm_fmadd_ps(a,b,c); }
template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return _mm_fmadd_pd(a,b,c); }
#endif
+#ifdef EIGEN_VECTORIZE_SSE4_1
+template<> EIGEN_DEVICE_FUNC inline Packet4f pselect(const Packet4f& mask, const Packet4f& a, const Packet4f& b) {
+ return _mm_blendv_ps(b,a,mask);
+}
+
+template<> EIGEN_DEVICE_FUNC inline Packet4i pselect(const Packet4i& mask, const Packet4i& a, const Packet4i& b) {
+ return _mm_castps_si128(_mm_blendv_ps(_mm_castsi128_ps(b),_mm_castsi128_ps(a),_mm_castsi128_ps(mask)));
+}
+
+template<> EIGEN_DEVICE_FUNC inline Packet2d pselect(const Packet2d& mask, const Packet2d& a, const Packet2d& b) { return _mm_blendv_pd(b,a,mask); }
+
+template<> EIGEN_DEVICE_FUNC inline Packet16b pselect(const Packet16b& mask, const Packet16b& a, const Packet16b& b) {
+ return _mm_blendv_epi8(b,a,mask);
+}
+#else
+template<> EIGEN_DEVICE_FUNC inline Packet16b pselect(const Packet16b& mask, const Packet16b& a, const Packet16b& b) {
+ Packet16b a_part = _mm_and_si128(mask, a);
+ Packet16b b_part = _mm_andnot_si128(mask, b);
+ return _mm_or_si128(a_part, b_part);
+}
+#endif
+
+template<> EIGEN_STRONG_INLINE Packet4i ptrue<Packet4i>(const Packet4i& a) { return _mm_cmpeq_epi32(a, a); }
+template<> EIGEN_STRONG_INLINE Packet16b ptrue<Packet16b>(const Packet16b& a) { return _mm_cmpeq_epi8(a, a); }
+template<> EIGEN_STRONG_INLINE Packet4f
+ptrue<Packet4f>(const Packet4f& a) {
+ Packet4i b = _mm_castps_si128(a);
+ return _mm_castsi128_ps(_mm_cmpeq_epi32(b, b));
+}
+template<> EIGEN_STRONG_INLINE Packet2d
+ptrue<Packet2d>(const Packet2d& a) {
+ Packet4i b = _mm_castpd_si128(a);
+ return _mm_castsi128_pd(_mm_cmpeq_epi32(b, b));
+}
+
+
+template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_and_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_and_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_and_si128(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16b pand<Packet16b>(const Packet16b& a, const Packet16b& b) { return _mm_and_si128(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_or_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_or_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_or_si128(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16b por<Packet16b>(const Packet16b& a, const Packet16b& b) { return _mm_or_si128(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_xor_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_xor_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_xor_si128(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16b pxor<Packet16b>(const Packet16b& a, const Packet16b& b) { return _mm_xor_si128(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_andnot_ps(b,a); }
+template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_andnot_pd(b,a); }
+template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_andnot_si128(b,a); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pcmp_le(const Packet4f& a, const Packet4f& b) { return _mm_cmple_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt(const Packet4f& a, const Packet4f& b) { return _mm_cmplt_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt_or_nan(const Packet4f& a, const Packet4f& b) { return _mm_cmpnge_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4f pcmp_eq(const Packet4f& a, const Packet4f& b) { return _mm_cmpeq_ps(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet2d pcmp_le(const Packet2d& a, const Packet2d& b) { return _mm_cmple_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pcmp_lt(const Packet2d& a, const Packet2d& b) { return _mm_cmplt_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pcmp_lt_or_nan(const Packet2d& a, const Packet2d& b) { return _mm_cmpnge_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pcmp_eq(const Packet2d& a, const Packet2d& b) { return _mm_cmpeq_pd(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4i pcmp_lt(const Packet4i& a, const Packet4i& b) { return _mm_cmplt_epi32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pcmp_eq(const Packet4i& a, const Packet4i& b) { return _mm_cmpeq_epi32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet16b pcmp_eq(const Packet16b& a, const Packet16b& b) { return _mm_cmpeq_epi8(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pcmp_le(const Packet4i& a, const Packet4i& b) { return por(pcmp_lt(a,b), pcmp_eq(a,b)); }
+
template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) {
-#if EIGEN_COMP_GNUC && !(EIGEN_COMP_LCC_E2K)
+#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
// There appears to be a bug in GCC, by which the optimizer may
// flip the argument order in calls to _mm_min_ps, so we have to
// resort to inline ASM here. This is supposed to be fixed in gcc6.3,
// see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867
+ #ifdef EIGEN_VECTORIZE_AVX
+ Packet4f res;
+ asm("vminps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
+ #else
Packet4f res = b;
asm("minps %[a], %[res]" : [res] "+x" (res) : [a] "x" (a));
+ #endif
return res;
#else
// Arguments are reversed to match NaN propagation behavior of std::min.
@@ -265,13 +463,18 @@ template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const
#endif
}
template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) {
-#if EIGEN_COMP_GNUC && !(EIGEN_COMP_LCC_E2K)
+#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
// There appears to be a bug in GCC, by which the optimizer may
// flip the argument order in calls to _mm_min_pd, so we have to
// resort to inline ASM here. This is supposed to be fixed in gcc6.3,
// see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867
+ #ifdef EIGEN_VECTORIZE_AVX
+ Packet2d res;
+ asm("vminpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
+ #else
Packet2d res = b;
asm("minpd %[a], %[res]" : [res] "+x" (res) : [a] "x" (a));
+ #endif
return res;
#else
// Arguments are reversed to match NaN propagation behavior of std::min.
@@ -289,14 +492,20 @@ template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const
#endif
}
+
template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) {
-#if EIGEN_COMP_GNUC && !(EIGEN_COMP_LCC_E2K)
+#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
// There appears to be a bug in GCC, by which the optimizer may
// flip the argument order in calls to _mm_max_ps, so we have to
// resort to inline ASM here. This is supposed to be fixed in gcc6.3,
// see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867
+ #ifdef EIGEN_VECTORIZE_AVX
+ Packet4f res;
+ asm("vmaxps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
+ #else
Packet4f res = b;
asm("maxps %[a], %[res]" : [res] "+x" (res) : [a] "x" (a));
+ #endif
return res;
#else
// Arguments are reversed to match NaN propagation behavior of std::max.
@@ -304,13 +513,18 @@ template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const
#endif
}
template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) {
-#if EIGEN_COMP_GNUC && !(EIGEN_COMP_LCC_E2K)
+#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
// There appears to be a bug in GCC, by which the optimizer may
// flip the argument order in calls to _mm_max_pd, so we have to
// resort to inline ASM here. This is supposed to be fixed in gcc6.3,
// see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867
+ #ifdef EIGEN_VECTORIZE_AVX
+ Packet2d res;
+ asm("vmaxpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
+ #else
Packet2d res = b;
asm("maxpd %[a], %[res]" : [res] "+x" (res) : [a] "x" (a));
+ #endif
return res;
#else
// Arguments are reversed to match NaN propagation behavior of std::max.
@@ -328,36 +542,180 @@ template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const
#endif
}
+template <typename Packet, typename Op>
+EIGEN_STRONG_INLINE Packet pminmax_propagate_numbers(const Packet& a, const Packet& b, Op op) {
+ // In this implementation, we take advantage of the fact that pmin/pmax for SSE
+ // always return a if either a or b is NaN.
+ Packet not_nan_mask_a = pcmp_eq(a, a);
+ Packet m = op(a, b);
+ return pselect<Packet>(not_nan_mask_a, m, b);
+}
+
+template <typename Packet, typename Op>
+EIGEN_STRONG_INLINE Packet pminmax_propagate_nan(const Packet& a, const Packet& b, Op op) {
+ // In this implementation, we take advantage of the fact that pmin/pmax for SSE
+ // always return a if either a or b is NaN.
+ Packet not_nan_mask_a = pcmp_eq(a, a);
+ Packet m = op(b, a);
+ return pselect<Packet>(not_nan_mask_a, m, a);
+}
+
+// Add specializations for min/max with prescribed NaN progation.
+template<>
+EIGEN_STRONG_INLINE Packet4f pmin<PropagateNumbers, Packet4f>(const Packet4f& a, const Packet4f& b) {
+ return pminmax_propagate_numbers(a, b, pmin<Packet4f>);
+}
+template<>
+EIGEN_STRONG_INLINE Packet2d pmin<PropagateNumbers, Packet2d>(const Packet2d& a, const Packet2d& b) {
+ return pminmax_propagate_numbers(a, b, pmin<Packet2d>);
+}
+template<>
+EIGEN_STRONG_INLINE Packet4f pmax<PropagateNumbers, Packet4f>(const Packet4f& a, const Packet4f& b) {
+ return pminmax_propagate_numbers(a, b, pmax<Packet4f>);
+}
+template<>
+EIGEN_STRONG_INLINE Packet2d pmax<PropagateNumbers, Packet2d>(const Packet2d& a, const Packet2d& b) {
+ return pminmax_propagate_numbers(a, b, pmax<Packet2d>);
+}
+template<>
+EIGEN_STRONG_INLINE Packet4f pmin<PropagateNaN, Packet4f>(const Packet4f& a, const Packet4f& b) {
+ return pminmax_propagate_nan(a, b, pmin<Packet4f>);
+}
+template<>
+EIGEN_STRONG_INLINE Packet2d pmin<PropagateNaN, Packet2d>(const Packet2d& a, const Packet2d& b) {
+ return pminmax_propagate_nan(a, b, pmin<Packet2d>);
+}
+template<>
+EIGEN_STRONG_INLINE Packet4f pmax<PropagateNaN, Packet4f>(const Packet4f& a, const Packet4f& b) {
+ return pminmax_propagate_nan(a, b, pmax<Packet4f>);
+}
+template<>
+EIGEN_STRONG_INLINE Packet2d pmax<PropagateNaN, Packet2d>(const Packet2d& a, const Packet2d& b) {
+ return pminmax_propagate_nan(a, b, pmax<Packet2d>);
+}
+
+template<int N> EIGEN_STRONG_INLINE Packet4i parithmetic_shift_right(const Packet4i& a) { return _mm_srai_epi32(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet4i plogical_shift_right (const Packet4i& a) { return _mm_srli_epi32(a,N); }
+template<int N> EIGEN_STRONG_INLINE Packet4i plogical_shift_left (const Packet4i& a) { return _mm_slli_epi32(a,N); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a)
+{
+ const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
+ return _mm_and_ps(a,mask);
+}
+template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a)
+{
+ const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
+ return _mm_and_pd(a,mask);
+}
+template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a)
+{
+ #ifdef EIGEN_VECTORIZE_SSSE3
+ return _mm_abs_epi32(a);
+ #else
+ Packet4i aux = _mm_srai_epi32(a,31);
+ return _mm_sub_epi32(_mm_xor_si128(a,aux),aux);
+ #endif
+}
+
#ifdef EIGEN_VECTORIZE_SSE4_1
-template<> EIGEN_STRONG_INLINE Packet4f pround<Packet4f>(const Packet4f& a) { return _mm_round_ps(a, 0); }
-template<> EIGEN_STRONG_INLINE Packet2d pround<Packet2d>(const Packet2d& a) { return _mm_round_pd(a, 0); }
+template<> EIGEN_STRONG_INLINE Packet4f pround<Packet4f>(const Packet4f& a)
+{
+ // Unfortunatly _mm_round_ps doesn't have a rounding mode to implement numext::round.
+ const Packet4f mask = pset1frombits<Packet4f>(0x80000000u);
+ const Packet4f prev0dot5 = pset1frombits<Packet4f>(0x3EFFFFFFu);
+ return _mm_round_ps(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
+}
+
+template<> EIGEN_STRONG_INLINE Packet2d pround<Packet2d>(const Packet2d& a)
+{
+ const Packet2d mask = _mm_castsi128_pd(_mm_set_epi64x(0x8000000000000000ull, 0x8000000000000000ull));
+ const Packet2d prev0dot5 = _mm_castsi128_pd(_mm_set_epi64x(0x3FDFFFFFFFFFFFFFull, 0x3FDFFFFFFFFFFFFFull));
+ return _mm_round_pd(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f print<Packet4f>(const Packet4f& a) { return _mm_round_ps(a, _MM_FROUND_CUR_DIRECTION); }
+template<> EIGEN_STRONG_INLINE Packet2d print<Packet2d>(const Packet2d& a) { return _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION); }
template<> EIGEN_STRONG_INLINE Packet4f pceil<Packet4f>(const Packet4f& a) { return _mm_ceil_ps(a); }
template<> EIGEN_STRONG_INLINE Packet2d pceil<Packet2d>(const Packet2d& a) { return _mm_ceil_pd(a); }
template<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a) { return _mm_floor_ps(a); }
template<> EIGEN_STRONG_INLINE Packet2d pfloor<Packet2d>(const Packet2d& a) { return _mm_floor_pd(a); }
-#endif
+#else
+template<> EIGEN_STRONG_INLINE Packet4f print(const Packet4f& a) {
+ // Adds and subtracts signum(a) * 2^23 to force rounding.
+ const Packet4f limit = pset1<Packet4f>(static_cast<float>(1<<23));
+ const Packet4f abs_a = pabs(a);
+ Packet4f r = padd(abs_a, limit);
+ // Don't compile-away addition and subtraction.
+ EIGEN_OPTIMIZATION_BARRIER(r);
+ r = psub(r, limit);
+ // If greater than limit, simply return a. Otherwise, account for sign.
+ r = pselect(pcmp_lt(abs_a, limit),
+ pselect(pcmp_lt(a, pzero(a)), pnegate(r), r), a);
+ return r;
+}
-template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_and_ps(a,b); }
-template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_and_pd(a,b); }
-template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_and_si128(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d print(const Packet2d& a) {
+ // Adds and subtracts signum(a) * 2^52 to force rounding.
+ const Packet2d limit = pset1<Packet2d>(static_cast<double>(1ull<<52));
+ const Packet2d abs_a = pabs(a);
+ Packet2d r = padd(abs_a, limit);
+ // Don't compile-away addition and subtraction.
+ EIGEN_OPTIMIZATION_BARRIER(r);
+ r = psub(r, limit);
+ // If greater than limit, simply return a. Otherwise, account for sign.
+ r = pselect(pcmp_lt(abs_a, limit),
+ pselect(pcmp_lt(a, pzero(a)), pnegate(r), r), a);
+ return r;
+}
-template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_or_ps(a,b); }
-template<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_or_pd(a,b); }
-template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_or_si128(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a)
+{
+ const Packet4f cst_1 = pset1<Packet4f>(1.0f);
+ Packet4f tmp = print<Packet4f>(a);
+ // If greater, subtract one.
+ Packet4f mask = _mm_cmpgt_ps(tmp, a);
+ mask = pand(mask, cst_1);
+ return psub(tmp, mask);
+}
-template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_xor_ps(a,b); }
-template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_xor_pd(a,b); }
-template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_xor_si128(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pfloor<Packet2d>(const Packet2d& a)
+{
+ const Packet2d cst_1 = pset1<Packet2d>(1.0);
+ Packet2d tmp = print<Packet2d>(a);
+ // If greater, subtract one.
+ Packet2d mask = _mm_cmpgt_pd(tmp, a);
+ mask = pand(mask, cst_1);
+ return psub(tmp, mask);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pceil<Packet4f>(const Packet4f& a)
+{
+ const Packet4f cst_1 = pset1<Packet4f>(1.0f);
+ Packet4f tmp = print<Packet4f>(a);
+ // If smaller, add one.
+ Packet4f mask = _mm_cmplt_ps(tmp, a);
+ mask = pand(mask, cst_1);
+ return padd(tmp, mask);
+}
-template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_andnot_ps(a,b); }
-template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_andnot_pd(a,b); }
-template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_andnot_si128(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pceil<Packet2d>(const Packet2d& a)
+{
+ const Packet2d cst_1 = pset1<Packet2d>(1.0);
+ Packet2d tmp = print<Packet2d>(a);
+ // If smaller, add one.
+ Packet2d mask = _mm_cmplt_pd(tmp, a);
+ mask = pand(mask, cst_1);
+ return padd(tmp, mask);
+}
+#endif
template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_ps(from); }
template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_pd(from); }
template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast<const __m128i*>(from)); }
+template<> EIGEN_STRONG_INLINE Packet16b pload<Packet16b>(const bool* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast<const __m128i*>(from)); }
#if EIGEN_COMP_MSVC
template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from) {
@@ -392,6 +750,10 @@ template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
EIGEN_DEBUG_UNALIGNED_LOAD
return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));
}
+template<> EIGEN_STRONG_INLINE Packet16b ploadu<Packet16b>(const bool* from) {
+ EIGEN_DEBUG_UNALIGNED_LOAD
+ return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));
+}
template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
@@ -407,13 +769,32 @@ template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int* from)
return vec4i_swizzle1(tmp, 0, 0, 1, 1);
}
+// Loads 8 bools from memory and returns the packet
+// {b0, b0, b1, b1, b2, b2, b3, b3, b4, b4, b5, b5, b6, b6, b7, b7}
+template<> EIGEN_STRONG_INLINE Packet16b ploaddup<Packet16b>(const bool* from)
+{
+ __m128i tmp = _mm_castpd_si128(pload1<Packet2d>(reinterpret_cast<const double*>(from)));
+ return _mm_unpacklo_epi8(tmp, tmp);
+}
+
+// Loads 4 bools from memory and returns the packet
+// {b0, b0 b0, b0, b1, b1, b1, b1, b2, b2, b2, b2, b3, b3, b3, b3}
+template<> EIGEN_STRONG_INLINE Packet16b
+ploadquad<Packet16b>(const bool* from) {
+ __m128i tmp = _mm_castps_si128(pload1<Packet4f>(reinterpret_cast<const float*>(from)));
+ tmp = _mm_unpacklo_epi8(tmp, tmp);
+ return _mm_unpacklo_epi16(tmp, tmp);
+}
+
template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_ps(to, from); }
template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_pd(to, from); }
template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<__m128i*>(to), from); }
+template<> EIGEN_STRONG_INLINE void pstore<bool>(bool* to, const Packet16b& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<__m128i*>(to), from); }
template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_pd(to, from); }
template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_ps(to, from); }
template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from); }
+template<> EIGEN_STRONG_INLINE void pstoreu<bool>(bool* to, const Packet16b& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from); }
template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)
{
@@ -426,7 +807,15 @@ template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const dou
template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, Index stride)
{
return _mm_set_epi32(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
- }
+}
+
+template<> EIGEN_DEVICE_FUNC inline Packet16b pgather<bool, Packet16b>(const bool* from, Index stride)
+{
+ return _mm_set_epi8(from[15*stride], from[14*stride], from[13*stride], from[12*stride],
+ from[11*stride], from[10*stride], from[9*stride], from[8*stride],
+ from[7*stride], from[6*stride], from[5*stride], from[4*stride],
+ from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
+}
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)
{
@@ -447,6 +836,14 @@ template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const
to[stride*2] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 2));
to[stride*3] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 3));
}
+template<> EIGEN_DEVICE_FUNC inline void pscatter<bool, Packet16b>(bool* to, const Packet16b& from, Index stride)
+{
+ to[4*stride*0] = _mm_cvtsi128_si32(from);
+ to[4*stride*1] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 1));
+ to[4*stride*2] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 2));
+ to[4*stride*3] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 3));
+}
+
// some compilers might be tempted to perform multiple moves instead of using a vector path.
template<> EIGEN_STRONG_INLINE void pstore1<Packet4f>(float* to, const float& a)
@@ -461,10 +858,16 @@ template<> EIGEN_STRONG_INLINE void pstore1<Packet2d>(double* to, const double&
pstore(to, Packet2d(vec2d_swizzle1(pa,0,0)));
}
+#if EIGEN_COMP_PGI && EIGEN_COMP_PGI < 1900
+typedef const void * SsePrefetchPtrType;
+#else
+typedef const char * SsePrefetchPtrType;
+#endif
+
#ifndef EIGEN_VECTORIZE_AVX
-template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
-template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
-template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
+template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
+template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
+template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
#endif
#if EIGEN_COMP_MSVC_STRICT && EIGEN_OS_WIN64
@@ -483,32 +886,62 @@ template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { retu
template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return _mm_cvtsd_f64(a); }
template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { return _mm_cvtsi128_si32(a); }
#endif
+template<> EIGEN_STRONG_INLINE bool pfirst<Packet16b>(const Packet16b& a) { int x = _mm_cvtsi128_si32(a); return static_cast<bool>(x & 1); }
-template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)
-{ return _mm_shuffle_ps(a,a,0x1B); }
-template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a)
-{ return _mm_shuffle_pd(a,a,0x1); }
-template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a)
-{ return _mm_shuffle_epi32(a,0x1B); }
+template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) { return _mm_shuffle_ps(a,a,0x1B); }
+template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a) { return _mm_shuffle_pd(a,a,0x1); }
+template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) { return _mm_shuffle_epi32(a,0x1B); }
+template<> EIGEN_STRONG_INLINE Packet16b preverse(const Packet16b& a) {
+#ifdef EIGEN_VECTORIZE_SSSE3
+ __m128i mask = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ return _mm_shuffle_epi8(a, mask);
+#else
+ Packet16b tmp = _mm_shuffle_epi32(a, _MM_SHUFFLE(0, 1, 2, 3));
+ tmp = _mm_shufflehi_epi16(_mm_shufflelo_epi16(tmp, _MM_SHUFFLE(2, 3, 0, 1)), _MM_SHUFFLE(2, 3, 0, 1));
+ return _mm_or_si128(_mm_slli_epi16(tmp, 8), _mm_srli_epi16(tmp, 8));
+#endif
+}
-template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a)
-{
- const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
- return _mm_and_ps(a,mask);
+template<> EIGEN_STRONG_INLINE Packet4f pfrexp<Packet4f>(const Packet4f& a, Packet4f& exponent) {
+ return pfrexp_generic(a,exponent);
}
-template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a)
-{
- const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
- return _mm_and_pd(a,mask);
+
+// Extract exponent without existence of Packet2l.
+template<>
+EIGEN_STRONG_INLINE
+Packet2d pfrexp_generic_get_biased_exponent(const Packet2d& a) {
+ const Packet2d cst_exp_mask = pset1frombits<Packet2d>(static_cast<uint64_t>(0x7ff0000000000000ull));
+ __m128i a_expo = _mm_srli_epi64(_mm_castpd_si128(pand(a, cst_exp_mask)), 52);
+ return _mm_cvtepi32_pd(vec4i_swizzle1(a_expo, 0, 2, 1, 3));
}
-template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a)
-{
- #ifdef EIGEN_VECTORIZE_SSSE3
- return _mm_abs_epi32(a);
- #else
- Packet4i aux = _mm_srai_epi32(a,31);
- return _mm_sub_epi32(_mm_xor_si128(a,aux),aux);
- #endif
+
+template<> EIGEN_STRONG_INLINE Packet2d pfrexp<Packet2d>(const Packet2d& a, Packet2d& exponent) {
+ return pfrexp_generic(a, exponent);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pldexp<Packet4f>(const Packet4f& a, const Packet4f& exponent) {
+ return pldexp_generic(a,exponent);
+}
+
+// We specialize pldexp here, since the generic implementation uses Packet2l, which is not well
+// supported by SSE, and has more range than is needed for exponents.
+template<> EIGEN_STRONG_INLINE Packet2d pldexp<Packet2d>(const Packet2d& a, const Packet2d& exponent) {
+ // Clamp exponent to [-2099, 2099]
+ const Packet2d max_exponent = pset1<Packet2d>(2099.0);
+ const Packet2d e = pmin(pmax(exponent, pnegate(max_exponent)), max_exponent);
+
+ // Convert e to integer and swizzle to low-order bits.
+ const Packet4i ei = vec4i_swizzle1(_mm_cvtpd_epi32(e), 0, 3, 1, 3);
+
+ // Split 2^e into four factors and multiply:
+ const Packet4i bias = _mm_set_epi32(0, 1023, 0, 1023);
+ Packet4i b = parithmetic_shift_right<2>(ei); // floor(e/4)
+ Packet2d c = _mm_castsi128_pd(_mm_slli_epi64(padd(b, bias), 52)); // 2^b
+ Packet2d out = pmul(pmul(pmul(a, c), c), c); // a * 2^(3b)
+ b = psub(psub(psub(ei, b), b), b); // e - 3b
+ c = _mm_castsi128_pd(_mm_slli_epi64(padd(b, bias), 52)); // 2^(e - 3b)
+ out = pmul(out, c); // a * 2^e
+ return out;
}
// with AVX, the default implementations based on pload1 are faster
@@ -551,38 +984,6 @@ EIGEN_STRONG_INLINE void punpackp(Packet4f* vecs)
vecs[0] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x00));
}
-#ifdef EIGEN_VECTORIZE_SSE3
-template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
-{
- return _mm_hadd_ps(_mm_hadd_ps(vecs[0], vecs[1]),_mm_hadd_ps(vecs[2], vecs[3]));
-}
-
-template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
-{
- return _mm_hadd_pd(vecs[0], vecs[1]);
-}
-
-#else
-template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
-{
- Packet4f tmp0, tmp1, tmp2;
- tmp0 = _mm_unpacklo_ps(vecs[0], vecs[1]);
- tmp1 = _mm_unpackhi_ps(vecs[0], vecs[1]);
- tmp2 = _mm_unpackhi_ps(vecs[2], vecs[3]);
- tmp0 = _mm_add_ps(tmp0, tmp1);
- tmp1 = _mm_unpacklo_ps(vecs[2], vecs[3]);
- tmp1 = _mm_add_ps(tmp1, tmp2);
- tmp2 = _mm_movehl_ps(tmp1, tmp0);
- tmp0 = _mm_movelh_ps(tmp0, tmp1);
- return _mm_add_ps(tmp0, tmp2);
-}
-
-template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
-{
- return _mm_add_pd(_mm_unpacklo_pd(vecs[0], vecs[1]), _mm_unpackhi_pd(vecs[0], vecs[1]));
-}
-#endif // SSE3
-
template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
{
// Disable SSE3 _mm_hadd_pd that is extremely slow on all existing Intel's architectures
@@ -608,38 +1009,28 @@ template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a)
}
#ifdef EIGEN_VECTORIZE_SSSE3
-template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
-{
- return _mm_hadd_epi32(_mm_hadd_epi32(vecs[0], vecs[1]),_mm_hadd_epi32(vecs[2], vecs[3]));
-}
template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
{
Packet4i tmp0 = _mm_hadd_epi32(a,a);
return pfirst<Packet4i>(_mm_hadd_epi32(tmp0,tmp0));
}
+
#else
template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
{
Packet4i tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a,a));
return pfirst(tmp) + pfirst<Packet4i>(_mm_shuffle_epi32(tmp, 1));
}
+#endif
-template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
-{
- Packet4i tmp0, tmp1, tmp2;
- tmp0 = _mm_unpacklo_epi32(vecs[0], vecs[1]);
- tmp1 = _mm_unpackhi_epi32(vecs[0], vecs[1]);
- tmp2 = _mm_unpackhi_epi32(vecs[2], vecs[3]);
- tmp0 = _mm_add_epi32(tmp0, tmp1);
- tmp1 = _mm_unpacklo_epi32(vecs[2], vecs[3]);
- tmp1 = _mm_add_epi32(tmp1, tmp2);
- tmp2 = _mm_unpacklo_epi64(tmp0, tmp1);
- tmp0 = _mm_unpackhi_epi64(tmp0, tmp1);
- return _mm_add_epi32(tmp0, tmp2);
+template<> EIGEN_STRONG_INLINE bool predux<Packet16b>(const Packet16b& a) {
+ Packet4i tmp = _mm_or_si128(a, _mm_unpackhi_epi64(a,a));
+ return (pfirst(tmp) != 0) || (pfirst<Packet4i>(_mm_shuffle_epi32(tmp, 1)) != 0);
}
-#endif
+
// Other reduction functions:
+
// mul
template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
{
@@ -657,7 +1048,13 @@ template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
// TODO try to call _mm_mul_epu32 directly
EIGEN_ALIGN16 int aux[4];
pstore(aux, a);
- return (aux[0] * aux[1]) * (aux[2] * aux[3]);;
+ return (aux[0] * aux[1]) * (aux[2] * aux[3]);
+}
+
+template<> EIGEN_STRONG_INLINE bool predux_mul<Packet16b>(const Packet16b& a) {
+ Packet4i tmp = _mm_and_si128(a, _mm_unpackhi_epi64(a,a));
+ return ((pfirst<Packet4i>(tmp) == 0x01010101) &&
+ (pfirst<Packet4i>(_mm_shuffle_epi32(tmp, 1)) == 0x01010101));
}
// min
@@ -712,113 +1109,16 @@ template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
#endif // EIGEN_VECTORIZE_SSE4_1
}
-#if EIGEN_COMP_GNUC && !(EIGEN_COMP_LCC_E2K)
-// template <> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c)
-// {
-// Packet4f res = b;
-// asm("mulps %[a], %[b] \n\taddps %[c], %[b]" : [b] "+x" (res) : [a] "x" (a), [c] "x" (c));
-// return res;
-// }
-// EIGEN_STRONG_INLINE Packet4i _mm_alignr_epi8(const Packet4i& a, const Packet4i& b, const int i)
+// not needed yet
+// template<> EIGEN_STRONG_INLINE bool predux_all(const Packet4f& x)
// {
-// Packet4i res = a;
-// asm("palignr %[i], %[a], %[b] " : [b] "+x" (res) : [a] "x" (a), [i] "i" (i));
-// return res;
+// return _mm_movemask_ps(x) == 0xF;
// }
-#endif
-
-#ifdef EIGEN_VECTORIZE_SSSE3
-// SSSE3 versions
-template<int Offset>
-struct palign_impl<Offset,Packet4f>
-{
- static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
- {
- if (Offset!=0)
- first = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(second), _mm_castps_si128(first), Offset*4));
- }
-};
-
-template<int Offset>
-struct palign_impl<Offset,Packet4i>
-{
- static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)
- {
- if (Offset!=0)
- first = _mm_alignr_epi8(second,first, Offset*4);
- }
-};
-template<int Offset>
-struct palign_impl<Offset,Packet2d>
+template<> EIGEN_STRONG_INLINE bool predux_any(const Packet4f& x)
{
- static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)
- {
- if (Offset==1)
- first = _mm_castsi128_pd(_mm_alignr_epi8(_mm_castpd_si128(second), _mm_castpd_si128(first), 8));
- }
-};
-#else
-// SSE2 versions
-template<int Offset>
-struct palign_impl<Offset,Packet4f>
-{
- static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
- {
- if (Offset==1)
- {
- first = _mm_move_ss(first,second);
- first = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(first),0x39));
- }
- else if (Offset==2)
- {
- first = _mm_movehl_ps(first,first);
- first = _mm_movelh_ps(first,second);
- }
- else if (Offset==3)
- {
- first = _mm_move_ss(first,second);
- first = _mm_shuffle_ps(first,second,0x93);
- }
- }
-};
-
-template<int Offset>
-struct palign_impl<Offset,Packet4i>
-{
- static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)
- {
- if (Offset==1)
- {
- first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
- first = _mm_shuffle_epi32(first,0x39);
- }
- else if (Offset==2)
- {
- first = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(first)));
- first = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
- }
- else if (Offset==3)
- {
- first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
- first = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second),0x93));
- }
- }
-};
-
-template<int Offset>
-struct palign_impl<Offset,Packet2d>
-{
- static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)
- {
- if (Offset==1)
- {
- first = _mm_castps_pd(_mm_movehl_ps(_mm_castpd_ps(first),_mm_castpd_ps(first)));
- first = _mm_castps_pd(_mm_movelh_ps(_mm_castpd_ps(first),_mm_castpd_ps(second)));
- }
- }
-};
-#endif
+ return _mm_movemask_ps(x) != 0x0;
+}
EIGEN_DEVICE_FUNC inline void
ptranspose(PacketBlock<Packet4f,4>& kernel) {
@@ -845,6 +1145,100 @@ ptranspose(PacketBlock<Packet4i,4>& kernel) {
kernel.packet[3] = _mm_unpackhi_epi64(T2, T3);
}
+EIGEN_DEVICE_FUNC inline void
+ptranspose(PacketBlock<Packet16b,4>& kernel) {
+ __m128i T0 = _mm_unpacklo_epi8(kernel.packet[0], kernel.packet[1]);
+ __m128i T1 = _mm_unpackhi_epi8(kernel.packet[0], kernel.packet[1]);
+ __m128i T2 = _mm_unpacklo_epi8(kernel.packet[2], kernel.packet[3]);
+ __m128i T3 = _mm_unpackhi_epi8(kernel.packet[2], kernel.packet[3]);
+ kernel.packet[0] = _mm_unpacklo_epi16(T0, T2);
+ kernel.packet[1] = _mm_unpackhi_epi16(T0, T2);
+ kernel.packet[2] = _mm_unpacklo_epi16(T1, T3);
+ kernel.packet[3] = _mm_unpackhi_epi16(T1, T3);
+}
+
+EIGEN_DEVICE_FUNC inline void
+ptranspose(PacketBlock<Packet16b,16>& kernel) {
+ // If we number the elements in the input thus:
+ // kernel.packet[ 0] = {00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 0a, 0b, 0c, 0d, 0e, 0f}
+ // kernel.packet[ 1] = {10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 1a, 1b, 1c, 1d, 1e, 1f}
+ // ...
+ // kernel.packet[15] = {f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, fa, fb, fc, fd, fe, ff},
+ //
+ // the desired output is:
+ // kernel.packet[ 0] = {00, 10, 20, 30, 40, 50, 60, 70, 80, 90, a0, b0, c0, d0, e0, f0}
+ // kernel.packet[ 1] = {01, 11, 21, 31, 41, 51, 61, 71, 81, 91, a1, b1, c1, d1, e1, f1}
+ // ...
+ // kernel.packet[15] = {0f, 1f, 2f, 3f, 4f, 5f, 6f, 7f, 8f, 9f, af, bf, cf, df, ef, ff},
+ __m128i t0 = _mm_unpacklo_epi8(kernel.packet[0], kernel.packet[1]); // 00 10 01 11 02 12 03 13 04 14 05 15 06 16 07 17
+ __m128i t1 = _mm_unpackhi_epi8(kernel.packet[0], kernel.packet[1]); // 08 18 09 19 0a 1a 0b 1b 0c 1c 0d 1d 0e 1e 0f 1f
+ __m128i t2 = _mm_unpacklo_epi8(kernel.packet[2], kernel.packet[3]); // 20 30 21 31 22 32 ... 27 37
+ __m128i t3 = _mm_unpackhi_epi8(kernel.packet[2], kernel.packet[3]); // 28 38 29 39 2a 3a ... 2f 3f
+ __m128i t4 = _mm_unpacklo_epi8(kernel.packet[4], kernel.packet[5]); // 40 50 41 51 42 52 47 57
+ __m128i t5 = _mm_unpackhi_epi8(kernel.packet[4], kernel.packet[5]); // 48 58 49 59 4a 5a
+ __m128i t6 = _mm_unpacklo_epi8(kernel.packet[6], kernel.packet[7]);
+ __m128i t7 = _mm_unpackhi_epi8(kernel.packet[6], kernel.packet[7]);
+ __m128i t8 = _mm_unpacklo_epi8(kernel.packet[8], kernel.packet[9]);
+ __m128i t9 = _mm_unpackhi_epi8(kernel.packet[8], kernel.packet[9]);
+ __m128i ta = _mm_unpacklo_epi8(kernel.packet[10], kernel.packet[11]);
+ __m128i tb = _mm_unpackhi_epi8(kernel.packet[10], kernel.packet[11]);
+ __m128i tc = _mm_unpacklo_epi8(kernel.packet[12], kernel.packet[13]);
+ __m128i td = _mm_unpackhi_epi8(kernel.packet[12], kernel.packet[13]);
+ __m128i te = _mm_unpacklo_epi8(kernel.packet[14], kernel.packet[15]);
+ __m128i tf = _mm_unpackhi_epi8(kernel.packet[14], kernel.packet[15]);
+
+ __m128i s0 = _mm_unpacklo_epi16(t0, t2); // 00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33
+ __m128i s1 = _mm_unpackhi_epi16(t0, t2); // 04 14 24 34
+ __m128i s2 = _mm_unpacklo_epi16(t1, t3); // 08 18 28 38 ...
+ __m128i s3 = _mm_unpackhi_epi16(t1, t3); // 0c 1c 2c 3c ...
+ __m128i s4 = _mm_unpacklo_epi16(t4, t6); // 40 50 60 70 41 51 61 71 42 52 62 72 43 53 63 73
+ __m128i s5 = _mm_unpackhi_epi16(t4, t6); // 44 54 64 74 ...
+ __m128i s6 = _mm_unpacklo_epi16(t5, t7);
+ __m128i s7 = _mm_unpackhi_epi16(t5, t7);
+ __m128i s8 = _mm_unpacklo_epi16(t8, ta);
+ __m128i s9 = _mm_unpackhi_epi16(t8, ta);
+ __m128i sa = _mm_unpacklo_epi16(t9, tb);
+ __m128i sb = _mm_unpackhi_epi16(t9, tb);
+ __m128i sc = _mm_unpacklo_epi16(tc, te);
+ __m128i sd = _mm_unpackhi_epi16(tc, te);
+ __m128i se = _mm_unpacklo_epi16(td, tf);
+ __m128i sf = _mm_unpackhi_epi16(td, tf);
+
+ __m128i u0 = _mm_unpacklo_epi32(s0, s4); // 00 10 20 30 40 50 60 70 01 11 21 31 41 51 61 71
+ __m128i u1 = _mm_unpackhi_epi32(s0, s4); // 02 12 22 32 42 52 62 72 03 13 23 33 43 53 63 73
+ __m128i u2 = _mm_unpacklo_epi32(s1, s5);
+ __m128i u3 = _mm_unpackhi_epi32(s1, s5);
+ __m128i u4 = _mm_unpacklo_epi32(s2, s6);
+ __m128i u5 = _mm_unpackhi_epi32(s2, s6);
+ __m128i u6 = _mm_unpacklo_epi32(s3, s7);
+ __m128i u7 = _mm_unpackhi_epi32(s3, s7);
+ __m128i u8 = _mm_unpacklo_epi32(s8, sc);
+ __m128i u9 = _mm_unpackhi_epi32(s8, sc);
+ __m128i ua = _mm_unpacklo_epi32(s9, sd);
+ __m128i ub = _mm_unpackhi_epi32(s9, sd);
+ __m128i uc = _mm_unpacklo_epi32(sa, se);
+ __m128i ud = _mm_unpackhi_epi32(sa, se);
+ __m128i ue = _mm_unpacklo_epi32(sb, sf);
+ __m128i uf = _mm_unpackhi_epi32(sb, sf);
+
+ kernel.packet[0] = _mm_unpacklo_epi64(u0, u8);
+ kernel.packet[1] = _mm_unpackhi_epi64(u0, u8);
+ kernel.packet[2] = _mm_unpacklo_epi64(u1, u9);
+ kernel.packet[3] = _mm_unpackhi_epi64(u1, u9);
+ kernel.packet[4] = _mm_unpacklo_epi64(u2, ua);
+ kernel.packet[5] = _mm_unpackhi_epi64(u2, ua);
+ kernel.packet[6] = _mm_unpacklo_epi64(u3, ub);
+ kernel.packet[7] = _mm_unpackhi_epi64(u3, ub);
+ kernel.packet[8] = _mm_unpacklo_epi64(u4, uc);
+ kernel.packet[9] = _mm_unpackhi_epi64(u4, uc);
+ kernel.packet[10] = _mm_unpacklo_epi64(u5, ud);
+ kernel.packet[11] = _mm_unpackhi_epi64(u5, ud);
+ kernel.packet[12] = _mm_unpacklo_epi64(u6, ue);
+ kernel.packet[13] = _mm_unpackhi_epi64(u6, ue);
+ kernel.packet[14] = _mm_unpacklo_epi64(u7, uf);
+ kernel.packet[15] = _mm_unpackhi_epi64(u7, uf);
+}
+
template<> EIGEN_STRONG_INLINE Packet4i pblend(const Selector<4>& ifPacket, const Packet4i& thenPacket, const Packet4i& elsePacket) {
const __m128i zero = _mm_setzero_si128();
const __m128i select = _mm_set_epi32(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
@@ -876,56 +1270,236 @@ template<> EIGEN_STRONG_INLINE Packet2d pblend(const Selector<2>& ifPacket, cons
#endif
}
-template<> EIGEN_STRONG_INLINE Packet4f pinsertfirst(const Packet4f& a, float b)
-{
-#ifdef EIGEN_VECTORIZE_SSE4_1
- return _mm_blend_ps(a,pset1<Packet4f>(b),1);
-#else
- return _mm_move_ss(a, _mm_load_ss(&b));
+// Scalar path for pmadd with FMA to ensure consistency with vectorized path.
+#ifdef EIGEN_VECTORIZE_FMA
+template<> EIGEN_STRONG_INLINE float pmadd(const float& a, const float& b, const float& c) {
+ return ::fmaf(a,b,c);
+}
+template<> EIGEN_STRONG_INLINE double pmadd(const double& a, const double& b, const double& c) {
+ return ::fma(a,b,c);
+}
#endif
+
+
+// Packet math for Eigen::half
+// Disable the following code since it's broken on too many platforms / compilers.
+//#elif defined(EIGEN_VECTORIZE_SSE) && (!EIGEN_ARCH_x86_64) && (!EIGEN_COMP_MSVC)
+#if 0
+
+typedef struct {
+ __m64 x;
+} Packet4h;
+
+
+template<> struct is_arithmetic<Packet4h> { enum { value = true }; };
+
+template <>
+struct packet_traits<Eigen::half> : default_packet_traits {
+ typedef Packet4h type;
+ // There is no half-size packet for Packet4h.
+ typedef Packet4h half;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 4,
+ HasHalfPacket = 0,
+ HasAdd = 1,
+ HasSub = 1,
+ HasMul = 1,
+ HasDiv = 1,
+ HasNegate = 0,
+ HasAbs = 0,
+ HasAbs2 = 0,
+ HasMin = 0,
+ HasMax = 0,
+ HasConj = 0,
+ HasSetLinear = 0,
+ HasSqrt = 0,
+ HasRsqrt = 0,
+ HasExp = 0,
+ HasLog = 0,
+ HasBlend = 0
+ };
+};
+
+
+template<> struct unpacket_traits<Packet4h> { typedef Eigen::half type; enum {size=4, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet4h half; };
+
+template<> EIGEN_STRONG_INLINE Packet4h pset1<Packet4h>(const Eigen::half& from) {
+ Packet4h result;
+ result.x = _mm_set1_pi16(from.x);
+ return result;
}
-template<> EIGEN_STRONG_INLINE Packet2d pinsertfirst(const Packet2d& a, double b)
-{
-#ifdef EIGEN_VECTORIZE_SSE4_1
- return _mm_blend_pd(a,pset1<Packet2d>(b),1);
-#else
- return _mm_move_sd(a, _mm_load_sd(&b));
-#endif
+template<> EIGEN_STRONG_INLINE Eigen::half pfirst<Packet4h>(const Packet4h& from) {
+ return half_impl::raw_uint16_to_half(static_cast<unsigned short>(_mm_cvtsi64_si32(from.x)));
}
-template<> EIGEN_STRONG_INLINE Packet4f pinsertlast(const Packet4f& a, float b)
-{
-#ifdef EIGEN_VECTORIZE_SSE4_1
- return _mm_blend_ps(a,pset1<Packet4f>(b),(1<<3));
-#else
- const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x0,0x0,0x0,0xFFFFFFFF));
- return _mm_or_ps(_mm_andnot_ps(mask, a), _mm_and_ps(mask, pset1<Packet4f>(b)));
-#endif
+template<> EIGEN_STRONG_INLINE Packet4h pconj(const Packet4h& a) { return a; }
+
+template<> EIGEN_STRONG_INLINE Packet4h padd<Packet4h>(const Packet4h& a, const Packet4h& b) {
+ __int64_t a64 = _mm_cvtm64_si64(a.x);
+ __int64_t b64 = _mm_cvtm64_si64(b.x);
+
+ Eigen::half h[4];
+
+ Eigen::half ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64));
+ Eigen::half hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64));
+ h[0] = ha + hb;
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 16));
+ h[1] = ha + hb;
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 32));
+ h[2] = ha + hb;
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 48));
+ h[3] = ha + hb;
+ Packet4h result;
+ result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x);
+ return result;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4h psub<Packet4h>(const Packet4h& a, const Packet4h& b) {
+ __int64_t a64 = _mm_cvtm64_si64(a.x);
+ __int64_t b64 = _mm_cvtm64_si64(b.x);
+
+ Eigen::half h[4];
+
+ Eigen::half ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64));
+ Eigen::half hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64));
+ h[0] = ha - hb;
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 16));
+ h[1] = ha - hb;
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 32));
+ h[2] = ha - hb;
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 48));
+ h[3] = ha - hb;
+ Packet4h result;
+ result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x);
+ return result;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4h pmul<Packet4h>(const Packet4h& a, const Packet4h& b) {
+ __int64_t a64 = _mm_cvtm64_si64(a.x);
+ __int64_t b64 = _mm_cvtm64_si64(b.x);
+
+ Eigen::half h[4];
+
+ Eigen::half ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64));
+ Eigen::half hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64));
+ h[0] = ha * hb;
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 16));
+ h[1] = ha * hb;
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 32));
+ h[2] = ha * hb;
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 48));
+ h[3] = ha * hb;
+ Packet4h result;
+ result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x);
+ return result;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4h pdiv<Packet4h>(const Packet4h& a, const Packet4h& b) {
+ __int64_t a64 = _mm_cvtm64_si64(a.x);
+ __int64_t b64 = _mm_cvtm64_si64(b.x);
+
+ Eigen::half h[4];
+
+ Eigen::half ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64));
+ Eigen::half hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64));
+ h[0] = ha / hb;
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 16));
+ h[1] = ha / hb;
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 32));
+ h[2] = ha / hb;
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 48));
+ h[3] = ha / hb;
+ Packet4h result;
+ result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x);
+ return result;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4h pload<Packet4h>(const Eigen::half* from) {
+ Packet4h result;
+ result.x = _mm_cvtsi64_m64(*reinterpret_cast<const __int64_t*>(from));
+ return result;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4h ploadu<Packet4h>(const Eigen::half* from) {
+ Packet4h result;
+ result.x = _mm_cvtsi64_m64(*reinterpret_cast<const __int64_t*>(from));
+ return result;
}
-template<> EIGEN_STRONG_INLINE Packet2d pinsertlast(const Packet2d& a, double b)
+template<> EIGEN_STRONG_INLINE void pstore<Eigen::half>(Eigen::half* to, const Packet4h& from) {
+ __int64_t r = _mm_cvtm64_si64(from.x);
+ *(reinterpret_cast<__int64_t*>(to)) = r;
+}
+
+template<> EIGEN_STRONG_INLINE void pstoreu<Eigen::half>(Eigen::half* to, const Packet4h& from) {
+ __int64_t r = _mm_cvtm64_si64(from.x);
+ *(reinterpret_cast<__int64_t*>(to)) = r;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4h
+ploadquad<Packet4h>(const Eigen::half* from) {
+ return pset1<Packet4h>(*from);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4h pgather<Eigen::half, Packet4h>(const Eigen::half* from, Index stride)
{
-#ifdef EIGEN_VECTORIZE_SSE4_1
- return _mm_blend_pd(a,pset1<Packet2d>(b),(1<<1));
-#else
- const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x0,0xFFFFFFFF,0xFFFFFFFF));
- return _mm_or_pd(_mm_andnot_pd(mask, a), _mm_and_pd(mask, pset1<Packet2d>(b)));
-#endif
+ Packet4h result;
+ result.x = _mm_set_pi16(from[3*stride].x, from[2*stride].x, from[1*stride].x, from[0*stride].x);
+ return result;
}
-// Scalar path for pmadd with FMA to ensure consistency with vectorized path.
-#ifdef __FMA__
-template<> EIGEN_STRONG_INLINE float pmadd(const float& a, const float& b, const float& c) {
- return ::fmaf(a,b,c);
+template<> EIGEN_STRONG_INLINE void pscatter<Eigen::half, Packet4h>(Eigen::half* to, const Packet4h& from, Index stride)
+{
+ __int64_t a = _mm_cvtm64_si64(from.x);
+ to[stride*0].x = static_cast<unsigned short>(a);
+ to[stride*1].x = static_cast<unsigned short>(a >> 16);
+ to[stride*2].x = static_cast<unsigned short>(a >> 32);
+ to[stride*3].x = static_cast<unsigned short>(a >> 48);
}
-template<> EIGEN_STRONG_INLINE double pmadd(const double& a, const double& b, const double& c) {
- return ::fma(a,b,c);
+
+EIGEN_STRONG_INLINE void
+ptranspose(PacketBlock<Packet4h,4>& kernel) {
+ __m64 T0 = _mm_unpacklo_pi16(kernel.packet[0].x, kernel.packet[1].x);
+ __m64 T1 = _mm_unpacklo_pi16(kernel.packet[2].x, kernel.packet[3].x);
+ __m64 T2 = _mm_unpackhi_pi16(kernel.packet[0].x, kernel.packet[1].x);
+ __m64 T3 = _mm_unpackhi_pi16(kernel.packet[2].x, kernel.packet[3].x);
+
+ kernel.packet[0].x = _mm_unpacklo_pi32(T0, T1);
+ kernel.packet[1].x = _mm_unpackhi_pi32(T0, T1);
+ kernel.packet[2].x = _mm_unpacklo_pi32(T2, T3);
+ kernel.packet[3].x = _mm_unpackhi_pi32(T2, T3);
}
+
#endif
+
} // end namespace internal
} // end namespace Eigen
+#if EIGEN_COMP_PGI && EIGEN_COMP_PGI < 1900
+// PGI++ does not define the following intrinsics in C++ mode.
+static inline __m128 _mm_castpd_ps (__m128d x) { return reinterpret_cast<__m128&>(x); }
+static inline __m128i _mm_castpd_si128(__m128d x) { return reinterpret_cast<__m128i&>(x); }
+static inline __m128d _mm_castps_pd (__m128 x) { return reinterpret_cast<__m128d&>(x); }
+static inline __m128i _mm_castps_si128(__m128 x) { return reinterpret_cast<__m128i&>(x); }
+static inline __m128 _mm_castsi128_ps(__m128i x) { return reinterpret_cast<__m128&>(x); }
+static inline __m128d _mm_castsi128_pd(__m128i x) { return reinterpret_cast<__m128d&>(x); }
+#endif
+
#endif // EIGEN_PACKET_MATH_SSE_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/SSE/TypeCasting.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/SSE/TypeCasting.h
index c84893230..d2a0037e0 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/arch/SSE/TypeCasting.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/SSE/TypeCasting.h
@@ -14,6 +14,7 @@ namespace Eigen {
namespace internal {
+#ifndef EIGEN_VECTORIZE_AVX
template <>
struct type_casting_traits<float, int> {
enum {
@@ -23,11 +24,6 @@ struct type_casting_traits<float, int> {
};
};
-template<> EIGEN_STRONG_INLINE Packet4i pcast<Packet4f, Packet4i>(const Packet4f& a) {
- return _mm_cvttps_epi32(a);
-}
-
-
template <>
struct type_casting_traits<int, float> {
enum {
@@ -37,11 +33,6 @@ struct type_casting_traits<int, float> {
};
};
-template<> EIGEN_STRONG_INLINE Packet4f pcast<Packet4i, Packet4f>(const Packet4i& a) {
- return _mm_cvtepi32_ps(a);
-}
-
-
template <>
struct type_casting_traits<double, float> {
enum {
@@ -51,10 +42,6 @@ struct type_casting_traits<double, float> {
};
};
-template<> EIGEN_STRONG_INLINE Packet4f pcast<Packet2d, Packet4f>(const Packet2d& a, const Packet2d& b) {
- return _mm_shuffle_ps(_mm_cvtpd_ps(a), _mm_cvtpd_ps(b), (1 << 2) | (1 << 6));
-}
-
template <>
struct type_casting_traits<float, double> {
enum {
@@ -63,12 +50,90 @@ struct type_casting_traits<float, double> {
TgtCoeffRatio = 2
};
};
+#endif
+
+template<> EIGEN_STRONG_INLINE Packet4i pcast<Packet4f, Packet4i>(const Packet4f& a) {
+ return _mm_cvttps_epi32(a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pcast<Packet4i, Packet4f>(const Packet4i& a) {
+ return _mm_cvtepi32_ps(a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pcast<Packet2d, Packet4f>(const Packet2d& a, const Packet2d& b) {
+ return _mm_shuffle_ps(_mm_cvtpd_ps(a), _mm_cvtpd_ps(b), (1 << 2) | (1 << 6));
+}
template<> EIGEN_STRONG_INLINE Packet2d pcast<Packet4f, Packet2d>(const Packet4f& a) {
// Simply discard the second half of the input
return _mm_cvtps_pd(a);
}
+template<> EIGEN_STRONG_INLINE Packet4i preinterpret<Packet4i,Packet4f>(const Packet4f& a) {
+ return _mm_castps_si128(a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f preinterpret<Packet4f,Packet4i>(const Packet4i& a) {
+ return _mm_castsi128_ps(a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet2d preinterpret<Packet2d,Packet4i>(const Packet4i& a) {
+ return _mm_castsi128_pd(a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4i preinterpret<Packet4i,Packet2d>(const Packet2d& a) {
+ return _mm_castpd_si128(a);
+}
+
+// Disable the following code since it's broken on too many platforms / compilers.
+//#elif defined(EIGEN_VECTORIZE_SSE) && (!EIGEN_ARCH_x86_64) && (!EIGEN_COMP_MSVC)
+#if 0
+
+template <>
+struct type_casting_traits<Eigen::half, float> {
+ enum {
+ VectorizedCast = 1,
+ SrcCoeffRatio = 1,
+ TgtCoeffRatio = 1
+ };
+};
+
+template<> EIGEN_STRONG_INLINE Packet4f pcast<Packet4h, Packet4f>(const Packet4h& a) {
+ __int64_t a64 = _mm_cvtm64_si64(a.x);
+ Eigen::half h = raw_uint16_to_half(static_cast<unsigned short>(a64));
+ float f1 = static_cast<float>(h);
+ h = raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));
+ float f2 = static_cast<float>(h);
+ h = raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));
+ float f3 = static_cast<float>(h);
+ h = raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));
+ float f4 = static_cast<float>(h);
+ return _mm_set_ps(f4, f3, f2, f1);
+}
+
+template <>
+struct type_casting_traits<float, Eigen::half> {
+ enum {
+ VectorizedCast = 1,
+ SrcCoeffRatio = 1,
+ TgtCoeffRatio = 1
+ };
+};
+
+template<> EIGEN_STRONG_INLINE Packet4h pcast<Packet4f, Packet4h>(const Packet4f& a) {
+ EIGEN_ALIGN16 float aux[4];
+ pstore(aux, a);
+ Eigen::half h0(aux[0]);
+ Eigen::half h1(aux[1]);
+ Eigen::half h2(aux[2]);
+ Eigen::half h3(aux[3]);
+
+ Packet4h result;
+ result.x = _mm_set_pi16(h3.x, h2.x, h1.x, h0.x);
+ return result;
+}
+
+#endif
} // end namespace internal
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/SVE/MathFunctions.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/SVE/MathFunctions.h
new file mode 100644
index 000000000..b139ea2e4
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/SVE/MathFunctions.h
@@ -0,0 +1,44 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2020, Arm Limited and Contributors
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MATH_FUNCTIONS_SVE_H
+#define EIGEN_MATH_FUNCTIONS_SVE_H
+
+namespace Eigen {
+namespace internal {
+
+template <>
+EIGEN_STRONG_INLINE EIGEN_UNUSED PacketXf pexp<PacketXf>(const PacketXf& x) {
+ return pexp_float(x);
+}
+
+template <>
+EIGEN_STRONG_INLINE EIGEN_UNUSED PacketXf plog<PacketXf>(const PacketXf& x) {
+ return plog_float(x);
+}
+
+template <>
+EIGEN_STRONG_INLINE EIGEN_UNUSED PacketXf psin<PacketXf>(const PacketXf& x) {
+ return psin_float(x);
+}
+
+template <>
+EIGEN_STRONG_INLINE EIGEN_UNUSED PacketXf pcos<PacketXf>(const PacketXf& x) {
+ return pcos_float(x);
+}
+
+// Hyperbolic Tangent function.
+template <>
+EIGEN_STRONG_INLINE EIGEN_UNUSED PacketXf ptanh<PacketXf>(const PacketXf& x) {
+ return internal::generic_fast_tanh_float(x);
+}
+} // end namespace internal
+} // end namespace Eigen
+
+#endif // EIGEN_MATH_FUNCTIONS_SVE_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/SVE/PacketMath.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/SVE/PacketMath.h
new file mode 100644
index 000000000..9060b372f
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/SVE/PacketMath.h
@@ -0,0 +1,752 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2020, Arm Limited and Contributors
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_PACKET_MATH_SVE_H
+#define EIGEN_PACKET_MATH_SVE_H
+
+namespace Eigen
+{
+namespace internal
+{
+#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
+#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
+#endif
+
+#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
+#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
+#endif
+
+#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32
+
+template <typename Scalar, int SVEVectorLength>
+struct sve_packet_size_selector {
+ enum { size = SVEVectorLength / (sizeof(Scalar) * CHAR_BIT) };
+};
+
+/********************************* int32 **************************************/
+typedef svint32_t PacketXi __attribute__((arm_sve_vector_bits(EIGEN_ARM64_SVE_VL)));
+
+template <>
+struct packet_traits<numext::int32_t> : default_packet_traits {
+ typedef PacketXi type;
+ typedef PacketXi half; // Half not implemented yet
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = sve_packet_size_selector<numext::int32_t, EIGEN_ARM64_SVE_VL>::size,
+ HasHalfPacket = 0,
+
+ HasAdd = 1,
+ HasSub = 1,
+ HasShift = 1,
+ HasMul = 1,
+ HasNegate = 1,
+ HasAbs = 1,
+ HasArg = 0,
+ HasAbs2 = 1,
+ HasMin = 1,
+ HasMax = 1,
+ HasConj = 1,
+ HasSetLinear = 0,
+ HasBlend = 0,
+ HasReduxp = 0 // Not implemented in SVE
+ };
+};
+
+template <>
+struct unpacket_traits<PacketXi> {
+ typedef numext::int32_t type;
+ typedef PacketXi half; // Half not yet implemented
+ enum {
+ size = sve_packet_size_selector<numext::int32_t, EIGEN_ARM64_SVE_VL>::size,
+ alignment = Aligned64,
+ vectorizable = true,
+ masked_load_available = false,
+ masked_store_available = false
+ };
+};
+
+template <>
+EIGEN_STRONG_INLINE void prefetch<numext::int32_t>(const numext::int32_t* addr)
+{
+ svprfw(svptrue_b32(), addr, SV_PLDL1KEEP);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXi pset1<PacketXi>(const numext::int32_t& from)
+{
+ return svdup_n_s32(from);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXi plset<PacketXi>(const numext::int32_t& a)
+{
+ numext::int32_t c[packet_traits<numext::int32_t>::size];
+ for (int i = 0; i < packet_traits<numext::int32_t>::size; i++) c[i] = i;
+ return svadd_s32_z(svptrue_b32(), pset1<PacketXi>(a), svld1_s32(svptrue_b32(), c));
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXi padd<PacketXi>(const PacketXi& a, const PacketXi& b)
+{
+ return svadd_s32_z(svptrue_b32(), a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXi psub<PacketXi>(const PacketXi& a, const PacketXi& b)
+{
+ return svsub_s32_z(svptrue_b32(), a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXi pnegate(const PacketXi& a)
+{
+ return svneg_s32_z(svptrue_b32(), a);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXi pconj(const PacketXi& a)
+{
+ return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXi pmul<PacketXi>(const PacketXi& a, const PacketXi& b)
+{
+ return svmul_s32_z(svptrue_b32(), a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXi pdiv<PacketXi>(const PacketXi& a, const PacketXi& b)
+{
+ return svdiv_s32_z(svptrue_b32(), a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXi pmadd(const PacketXi& a, const PacketXi& b, const PacketXi& c)
+{
+ return svmla_s32_z(svptrue_b32(), c, a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXi pmin<PacketXi>(const PacketXi& a, const PacketXi& b)
+{
+ return svmin_s32_z(svptrue_b32(), a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXi pmax<PacketXi>(const PacketXi& a, const PacketXi& b)
+{
+ return svmax_s32_z(svptrue_b32(), a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXi pcmp_le<PacketXi>(const PacketXi& a, const PacketXi& b)
+{
+ return svdup_n_s32_z(svcmplt_s32(svptrue_b32(), a, b), 0xffffffffu);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXi pcmp_lt<PacketXi>(const PacketXi& a, const PacketXi& b)
+{
+ return svdup_n_s32_z(svcmplt_s32(svptrue_b32(), a, b), 0xffffffffu);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXi pcmp_eq<PacketXi>(const PacketXi& a, const PacketXi& b)
+{
+ return svdup_n_s32_z(svcmpeq_s32(svptrue_b32(), a, b), 0xffffffffu);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXi ptrue<PacketXi>(const PacketXi& /*a*/)
+{
+ return svdup_n_s32_z(svptrue_b32(), 0xffffffffu);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXi pzero<PacketXi>(const PacketXi& /*a*/)
+{
+ return svdup_n_s32_z(svptrue_b32(), 0);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXi pand<PacketXi>(const PacketXi& a, const PacketXi& b)
+{
+ return svand_s32_z(svptrue_b32(), a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXi por<PacketXi>(const PacketXi& a, const PacketXi& b)
+{
+ return svorr_s32_z(svptrue_b32(), a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXi pxor<PacketXi>(const PacketXi& a, const PacketXi& b)
+{
+ return sveor_s32_z(svptrue_b32(), a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXi pandnot<PacketXi>(const PacketXi& a, const PacketXi& b)
+{
+ return svbic_s32_z(svptrue_b32(), a, b);
+}
+
+template <int N>
+EIGEN_STRONG_INLINE PacketXi parithmetic_shift_right(PacketXi a)
+{
+ return svasrd_n_s32_z(svptrue_b32(), a, N);
+}
+
+template <int N>
+EIGEN_STRONG_INLINE PacketXi plogical_shift_right(PacketXi a)
+{
+ return svreinterpret_s32_u32(svlsr_u32_z(svptrue_b32(), svreinterpret_u32_s32(a), svdup_n_u32_z(svptrue_b32(), N)));
+}
+
+template <int N>
+EIGEN_STRONG_INLINE PacketXi plogical_shift_left(PacketXi a)
+{
+ return svlsl_s32_z(svptrue_b32(), a, svdup_n_u32_z(svptrue_b32(), N));
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXi pload<PacketXi>(const numext::int32_t* from)
+{
+ EIGEN_DEBUG_ALIGNED_LOAD return svld1_s32(svptrue_b32(), from);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXi ploadu<PacketXi>(const numext::int32_t* from)
+{
+ EIGEN_DEBUG_UNALIGNED_LOAD return svld1_s32(svptrue_b32(), from);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXi ploaddup<PacketXi>(const numext::int32_t* from)
+{
+ svuint32_t indices = svindex_u32(0, 1); // index {base=0, base+step=1, base+step*2, ...}
+ indices = svzip1_u32(indices, indices); // index in the format {a0, a0, a1, a1, a2, a2, ...}
+ return svld1_gather_u32index_s32(svptrue_b32(), from, indices);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXi ploadquad<PacketXi>(const numext::int32_t* from)
+{
+ svuint32_t indices = svindex_u32(0, 1); // index {base=0, base+step=1, base+step*2, ...}
+ indices = svzip1_u32(indices, indices); // index in the format {a0, a0, a1, a1, a2, a2, ...}
+ indices = svzip1_u32(indices, indices); // index in the format {a0, a0, a0, a0, a1, a1, a1, a1, ...}
+ return svld1_gather_u32index_s32(svptrue_b32(), from, indices);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<numext::int32_t>(numext::int32_t* to, const PacketXi& from)
+{
+ EIGEN_DEBUG_ALIGNED_STORE svst1_s32(svptrue_b32(), to, from);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<numext::int32_t>(numext::int32_t* to, const PacketXi& from)
+{
+ EIGEN_DEBUG_UNALIGNED_STORE svst1_s32(svptrue_b32(), to, from);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline PacketXi pgather<numext::int32_t, PacketXi>(const numext::int32_t* from, Index stride)
+{
+ // Indice format: {base=0, base+stride, base+stride*2, base+stride*3, ...}
+ svint32_t indices = svindex_s32(0, stride);
+ return svld1_gather_s32index_s32(svptrue_b32(), from, indices);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline void pscatter<numext::int32_t, PacketXi>(numext::int32_t* to, const PacketXi& from, Index stride)
+{
+ // Indice format: {base=0, base+stride, base+stride*2, base+stride*3, ...}
+ svint32_t indices = svindex_s32(0, stride);
+ svst1_scatter_s32index_s32(svptrue_b32(), to, indices, from);
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int32_t pfirst<PacketXi>(const PacketXi& a)
+{
+ // svlasta returns the first element if all predicate bits are 0
+ return svlasta_s32(svpfalse_b(), a);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXi preverse(const PacketXi& a)
+{
+ return svrev_s32(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXi pabs(const PacketXi& a)
+{
+ return svabs_s32_z(svptrue_b32(), a);
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int32_t predux<PacketXi>(const PacketXi& a)
+{
+ return static_cast<numext::int32_t>(svaddv_s32(svptrue_b32(), a));
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int32_t predux_mul<PacketXi>(const PacketXi& a)
+{
+ EIGEN_STATIC_ASSERT((EIGEN_ARM64_SVE_VL % 128 == 0),
+ EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT);
+
+ // Multiply the vector by its reverse
+ svint32_t prod = svmul_s32_z(svptrue_b32(), a, svrev_s32(a));
+ svint32_t half_prod;
+
+ // Extract the high half of the vector. Depending on the VL more reductions need to be done
+ if (EIGEN_ARM64_SVE_VL >= 2048) {
+ half_prod = svtbl_s32(prod, svindex_u32(32, 1));
+ prod = svmul_s32_z(svptrue_b32(), prod, half_prod);
+ }
+ if (EIGEN_ARM64_SVE_VL >= 1024) {
+ half_prod = svtbl_s32(prod, svindex_u32(16, 1));
+ prod = svmul_s32_z(svptrue_b32(), prod, half_prod);
+ }
+ if (EIGEN_ARM64_SVE_VL >= 512) {
+ half_prod = svtbl_s32(prod, svindex_u32(8, 1));
+ prod = svmul_s32_z(svptrue_b32(), prod, half_prod);
+ }
+ if (EIGEN_ARM64_SVE_VL >= 256) {
+ half_prod = svtbl_s32(prod, svindex_u32(4, 1));
+ prod = svmul_s32_z(svptrue_b32(), prod, half_prod);
+ }
+ // Last reduction
+ half_prod = svtbl_s32(prod, svindex_u32(2, 1));
+ prod = svmul_s32_z(svptrue_b32(), prod, half_prod);
+
+ // The reduction is done to the first element.
+ return pfirst<PacketXi>(prod);
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int32_t predux_min<PacketXi>(const PacketXi& a)
+{
+ return svminv_s32(svptrue_b32(), a);
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int32_t predux_max<PacketXi>(const PacketXi& a)
+{
+ return svmaxv_s32(svptrue_b32(), a);
+}
+
+template <int N>
+EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<PacketXi, N>& kernel) {
+ int buffer[packet_traits<numext::int32_t>::size * N] = {0};
+ int i = 0;
+
+ PacketXi stride_index = svindex_s32(0, N);
+
+ for (i = 0; i < N; i++) {
+ svst1_scatter_s32index_s32(svptrue_b32(), buffer + i, stride_index, kernel.packet[i]);
+ }
+ for (i = 0; i < N; i++) {
+ kernel.packet[i] = svld1_s32(svptrue_b32(), buffer + i * packet_traits<numext::int32_t>::size);
+ }
+}
+
+/********************************* float32 ************************************/
+
+typedef svfloat32_t PacketXf __attribute__((arm_sve_vector_bits(EIGEN_ARM64_SVE_VL)));
+
+template <>
+struct packet_traits<float> : default_packet_traits {
+ typedef PacketXf type;
+ typedef PacketXf half;
+
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = sve_packet_size_selector<float, EIGEN_ARM64_SVE_VL>::size,
+ HasHalfPacket = 0,
+
+ HasAdd = 1,
+ HasSub = 1,
+ HasShift = 1,
+ HasMul = 1,
+ HasNegate = 1,
+ HasAbs = 1,
+ HasArg = 0,
+ HasAbs2 = 1,
+ HasMin = 1,
+ HasMax = 1,
+ HasConj = 1,
+ HasSetLinear = 0,
+ HasBlend = 0,
+ HasReduxp = 0, // Not implemented in SVE
+
+ HasDiv = 1,
+ HasFloor = 1,
+
+ HasSin = EIGEN_FAST_MATH,
+ HasCos = EIGEN_FAST_MATH,
+ HasLog = 1,
+ HasExp = 1,
+ HasSqrt = 0,
+ HasTanh = EIGEN_FAST_MATH,
+ HasErf = EIGEN_FAST_MATH
+ };
+};
+
+template <>
+struct unpacket_traits<PacketXf> {
+ typedef float type;
+ typedef PacketXf half; // Half not yet implemented
+ typedef PacketXi integer_packet;
+
+ enum {
+ size = sve_packet_size_selector<float, EIGEN_ARM64_SVE_VL>::size,
+ alignment = Aligned64,
+ vectorizable = true,
+ masked_load_available = false,
+ masked_store_available = false
+ };
+};
+
+template <>
+EIGEN_STRONG_INLINE PacketXf pset1<PacketXf>(const float& from)
+{
+ return svdup_n_f32(from);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXf pset1frombits<PacketXf>(numext::uint32_t from)
+{
+ return svreinterpret_f32_u32(svdup_n_u32_z(svptrue_b32(), from));
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXf plset<PacketXf>(const float& a)
+{
+ float c[packet_traits<float>::size];
+ for (int i = 0; i < packet_traits<float>::size; i++) c[i] = i;
+ return svadd_f32_z(svptrue_b32(), pset1<PacketXf>(a), svld1_f32(svptrue_b32(), c));
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXf padd<PacketXf>(const PacketXf& a, const PacketXf& b)
+{
+ return svadd_f32_z(svptrue_b32(), a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXf psub<PacketXf>(const PacketXf& a, const PacketXf& b)
+{
+ return svsub_f32_z(svptrue_b32(), a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXf pnegate(const PacketXf& a)
+{
+ return svneg_f32_z(svptrue_b32(), a);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXf pconj(const PacketXf& a)
+{
+ return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXf pmul<PacketXf>(const PacketXf& a, const PacketXf& b)
+{
+ return svmul_f32_z(svptrue_b32(), a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXf pdiv<PacketXf>(const PacketXf& a, const PacketXf& b)
+{
+ return svdiv_f32_z(svptrue_b32(), a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXf pmadd(const PacketXf& a, const PacketXf& b, const PacketXf& c)
+{
+ return svmla_f32_z(svptrue_b32(), c, a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXf pmin<PacketXf>(const PacketXf& a, const PacketXf& b)
+{
+ return svmin_f32_z(svptrue_b32(), a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXf pmin<PropagateNaN, PacketXf>(const PacketXf& a, const PacketXf& b)
+{
+ return pmin<PacketXf>(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXf pmin<PropagateNumbers, PacketXf>(const PacketXf& a, const PacketXf& b)
+{
+ return svminnm_f32_z(svptrue_b32(), a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXf pmax<PacketXf>(const PacketXf& a, const PacketXf& b)
+{
+ return svmax_f32_z(svptrue_b32(), a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXf pmax<PropagateNaN, PacketXf>(const PacketXf& a, const PacketXf& b)
+{
+ return pmax<PacketXf>(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXf pmax<PropagateNumbers, PacketXf>(const PacketXf& a, const PacketXf& b)
+{
+ return svmaxnm_f32_z(svptrue_b32(), a, b);
+}
+
+// Float comparisons in SVE return svbool (predicate). Use svdup to set active
+// lanes to 1 (0xffffffffu) and inactive lanes to 0.
+template <>
+EIGEN_STRONG_INLINE PacketXf pcmp_le<PacketXf>(const PacketXf& a, const PacketXf& b)
+{
+ return svreinterpret_f32_u32(svdup_n_u32_z(svcmplt_f32(svptrue_b32(), a, b), 0xffffffffu));
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXf pcmp_lt<PacketXf>(const PacketXf& a, const PacketXf& b)
+{
+ return svreinterpret_f32_u32(svdup_n_u32_z(svcmplt_f32(svptrue_b32(), a, b), 0xffffffffu));
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXf pcmp_eq<PacketXf>(const PacketXf& a, const PacketXf& b)
+{
+ return svreinterpret_f32_u32(svdup_n_u32_z(svcmpeq_f32(svptrue_b32(), a, b), 0xffffffffu));
+}
+
+// Do a predicate inverse (svnot_b_z) on the predicate resulted from the
+// greater/equal comparison (svcmpge_f32). Then fill a float vector with the
+// active elements.
+template <>
+EIGEN_STRONG_INLINE PacketXf pcmp_lt_or_nan<PacketXf>(const PacketXf& a, const PacketXf& b)
+{
+ return svreinterpret_f32_u32(svdup_n_u32_z(svnot_b_z(svptrue_b32(), svcmpge_f32(svptrue_b32(), a, b)), 0xffffffffu));
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXf pfloor<PacketXf>(const PacketXf& a)
+{
+ return svrintm_f32_z(svptrue_b32(), a);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXf ptrue<PacketXf>(const PacketXf& /*a*/)
+{
+ return svreinterpret_f32_u32(svdup_n_u32_z(svptrue_b32(), 0xffffffffu));
+}
+
+// Logical Operations are not supported for float, so reinterpret casts
+template <>
+EIGEN_STRONG_INLINE PacketXf pand<PacketXf>(const PacketXf& a, const PacketXf& b)
+{
+ return svreinterpret_f32_u32(svand_u32_z(svptrue_b32(), svreinterpret_u32_f32(a), svreinterpret_u32_f32(b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXf por<PacketXf>(const PacketXf& a, const PacketXf& b)
+{
+ return svreinterpret_f32_u32(svorr_u32_z(svptrue_b32(), svreinterpret_u32_f32(a), svreinterpret_u32_f32(b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXf pxor<PacketXf>(const PacketXf& a, const PacketXf& b)
+{
+ return svreinterpret_f32_u32(sveor_u32_z(svptrue_b32(), svreinterpret_u32_f32(a), svreinterpret_u32_f32(b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXf pandnot<PacketXf>(const PacketXf& a, const PacketXf& b)
+{
+ return svreinterpret_f32_u32(svbic_u32_z(svptrue_b32(), svreinterpret_u32_f32(a), svreinterpret_u32_f32(b)));
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXf pload<PacketXf>(const float* from)
+{
+ EIGEN_DEBUG_ALIGNED_LOAD return svld1_f32(svptrue_b32(), from);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXf ploadu<PacketXf>(const float* from)
+{
+ EIGEN_DEBUG_UNALIGNED_LOAD return svld1_f32(svptrue_b32(), from);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXf ploaddup<PacketXf>(const float* from)
+{
+ svuint32_t indices = svindex_u32(0, 1); // index {base=0, base+step=1, base+step*2, ...}
+ indices = svzip1_u32(indices, indices); // index in the format {a0, a0, a1, a1, a2, a2, ...}
+ return svld1_gather_u32index_f32(svptrue_b32(), from, indices);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXf ploadquad<PacketXf>(const float* from)
+{
+ svuint32_t indices = svindex_u32(0, 1); // index {base=0, base+step=1, base+step*2, ...}
+ indices = svzip1_u32(indices, indices); // index in the format {a0, a0, a1, a1, a2, a2, ...}
+ indices = svzip1_u32(indices, indices); // index in the format {a0, a0, a0, a0, a1, a1, a1, a1, ...}
+ return svld1_gather_u32index_f32(svptrue_b32(), from, indices);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<float>(float* to, const PacketXf& from)
+{
+ EIGEN_DEBUG_ALIGNED_STORE svst1_f32(svptrue_b32(), to, from);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const PacketXf& from)
+{
+ EIGEN_DEBUG_UNALIGNED_STORE svst1_f32(svptrue_b32(), to, from);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline PacketXf pgather<float, PacketXf>(const float* from, Index stride)
+{
+ // Indice format: {base=0, base+stride, base+stride*2, base+stride*3, ...}
+ svint32_t indices = svindex_s32(0, stride);
+ return svld1_gather_s32index_f32(svptrue_b32(), from, indices);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline void pscatter<float, PacketXf>(float* to, const PacketXf& from, Index stride)
+{
+ // Indice format: {base=0, base+stride, base+stride*2, base+stride*3, ...}
+ svint32_t indices = svindex_s32(0, stride);
+ svst1_scatter_s32index_f32(svptrue_b32(), to, indices, from);
+}
+
+template <>
+EIGEN_STRONG_INLINE float pfirst<PacketXf>(const PacketXf& a)
+{
+ // svlasta returns the first element if all predicate bits are 0
+ return svlasta_f32(svpfalse_b(), a);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXf preverse(const PacketXf& a)
+{
+ return svrev_f32(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXf pabs(const PacketXf& a)
+{
+ return svabs_f32_z(svptrue_b32(), a);
+}
+
+// TODO(tellenbach): Should this go into MathFunctions.h? If so, change for
+// all vector extensions and the generic version.
+template <>
+EIGEN_STRONG_INLINE PacketXf pfrexp<PacketXf>(const PacketXf& a, PacketXf& exponent)
+{
+ return pfrexp_generic(a, exponent);
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux<PacketXf>(const PacketXf& a)
+{
+ return svaddv_f32(svptrue_b32(), a);
+}
+
+// Other reduction functions:
+// mul
+// Only works for SVE Vls multiple of 128
+template <>
+EIGEN_STRONG_INLINE float predux_mul<PacketXf>(const PacketXf& a)
+{
+ EIGEN_STATIC_ASSERT((EIGEN_ARM64_SVE_VL % 128 == 0),
+ EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT);
+ // Multiply the vector by its reverse
+ svfloat32_t prod = svmul_f32_z(svptrue_b32(), a, svrev_f32(a));
+ svfloat32_t half_prod;
+
+ // Extract the high half of the vector. Depending on the VL more reductions need to be done
+ if (EIGEN_ARM64_SVE_VL >= 2048) {
+ half_prod = svtbl_f32(prod, svindex_u32(32, 1));
+ prod = svmul_f32_z(svptrue_b32(), prod, half_prod);
+ }
+ if (EIGEN_ARM64_SVE_VL >= 1024) {
+ half_prod = svtbl_f32(prod, svindex_u32(16, 1));
+ prod = svmul_f32_z(svptrue_b32(), prod, half_prod);
+ }
+ if (EIGEN_ARM64_SVE_VL >= 512) {
+ half_prod = svtbl_f32(prod, svindex_u32(8, 1));
+ prod = svmul_f32_z(svptrue_b32(), prod, half_prod);
+ }
+ if (EIGEN_ARM64_SVE_VL >= 256) {
+ half_prod = svtbl_f32(prod, svindex_u32(4, 1));
+ prod = svmul_f32_z(svptrue_b32(), prod, half_prod);
+ }
+ // Last reduction
+ half_prod = svtbl_f32(prod, svindex_u32(2, 1));
+ prod = svmul_f32_z(svptrue_b32(), prod, half_prod);
+
+ // The reduction is done to the first element.
+ return pfirst<PacketXf>(prod);
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_min<PacketXf>(const PacketXf& a)
+{
+ return svminv_f32(svptrue_b32(), a);
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_max<PacketXf>(const PacketXf& a)
+{
+ return svmaxv_f32(svptrue_b32(), a);
+}
+
+template<int N>
+EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<PacketXf, N>& kernel)
+{
+ float buffer[packet_traits<float>::size * N] = {0};
+ int i = 0;
+
+ PacketXi stride_index = svindex_s32(0, N);
+
+ for (i = 0; i < N; i++) {
+ svst1_scatter_s32index_f32(svptrue_b32(), buffer + i, stride_index, kernel.packet[i]);
+ }
+
+ for (i = 0; i < N; i++) {
+ kernel.packet[i] = svld1_f32(svptrue_b32(), buffer + i * packet_traits<float>::size);
+ }
+}
+
+template<>
+EIGEN_STRONG_INLINE PacketXf pldexp<PacketXf>(const PacketXf& a, const PacketXf& exponent)
+{
+ return pldexp_generic(a, exponent);
+}
+
+} // namespace internal
+} // namespace Eigen
+
+#endif // EIGEN_PACKET_MATH_SVE_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/SVE/TypeCasting.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/SVE/TypeCasting.h
new file mode 100644
index 000000000..7ba5d9cd1
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/SVE/TypeCasting.h
@@ -0,0 +1,49 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2020, Arm Limited and Contributors
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_TYPE_CASTING_SVE_H
+#define EIGEN_TYPE_CASTING_SVE_H
+
+namespace Eigen {
+namespace internal {
+
+template <>
+struct type_casting_traits<float, numext::int32_t> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+
+template <>
+struct type_casting_traits<numext::int32_t, float> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+
+template <>
+EIGEN_STRONG_INLINE PacketXf pcast<PacketXi, PacketXf>(const PacketXi& a) {
+ return svcvt_f32_s32_z(svptrue_b32(), a);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXi pcast<PacketXf, PacketXi>(const PacketXf& a) {
+ return svcvt_s32_f32_z(svptrue_b32(), a);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXf preinterpret<PacketXf, PacketXi>(const PacketXi& a) {
+ return svreinterpret_f32_s32(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXi preinterpret<PacketXi, PacketXf>(const PacketXf& a) {
+ return svreinterpret_s32_f32(a);
+}
+
+} // namespace internal
+} // namespace Eigen
+
+#endif // EIGEN_TYPE_CASTING_SVE_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/SYCL/InteropHeaders.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/SYCL/InteropHeaders.h
new file mode 100644
index 000000000..10856ff5e
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/SYCL/InteropHeaders.h
@@ -0,0 +1,232 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/*****************************************************************
+ * InteropHeaders.h
+ *
+ * \brief:
+ * InteropHeaders
+ *
+ *****************************************************************/
+
+#ifndef EIGEN_INTEROP_HEADERS_SYCL_H
+#define EIGEN_INTEROP_HEADERS_SYCL_H
+
+namespace Eigen {
+
+#if !defined(EIGEN_DONT_VECTORIZE_SYCL)
+
+namespace internal {
+
+template <int has_blend, int lengths>
+struct sycl_packet_traits : default_packet_traits {
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = lengths,
+ HasHalfPacket = 0,
+ HasDiv = 1,
+ HasLog = 1,
+ HasExp = 1,
+ HasSqrt = 1,
+ HasRsqrt = 1,
+ HasSin = 1,
+ HasCos = 1,
+ HasTan = 1,
+ HasASin = 1,
+ HasACos = 1,
+ HasATan = 1,
+ HasSinh = 1,
+ HasCosh = 1,
+ HasTanh = 1,
+ HasLGamma = 0,
+ HasDiGamma = 0,
+ HasZeta = 0,
+ HasPolygamma = 0,
+ HasErf = 0,
+ HasErfc = 0,
+ HasNdtri = 0,
+ HasIGamma = 0,
+ HasIGammac = 0,
+ HasBetaInc = 0,
+ HasBlend = has_blend,
+ // This flag is used to indicate whether packet comparison is supported.
+ // pcmp_eq, pcmp_lt and pcmp_le should be defined for it to be true.
+ HasCmp = 1,
+ HasMax = 1,
+ HasMin = 1,
+ HasMul = 1,
+ HasAdd = 1,
+ HasFloor = 1,
+ HasRound = 1,
+ HasRint = 1,
+ HasLog1p = 1,
+ HasExpm1 = 1,
+ HasCeil = 1,
+ };
+};
+
+#ifdef SYCL_DEVICE_ONLY
+#define SYCL_PACKET_TRAITS(packet_type, has_blend, unpacket_type, lengths) \
+ template <> \
+ struct packet_traits<unpacket_type> \
+ : sycl_packet_traits<has_blend, lengths> { \
+ typedef packet_type type; \
+ typedef packet_type half; \
+ };
+
+SYCL_PACKET_TRAITS(cl::sycl::cl_float4, 1, float, 4)
+SYCL_PACKET_TRAITS(cl::sycl::cl_float4, 1, const float, 4)
+SYCL_PACKET_TRAITS(cl::sycl::cl_double2, 0, double, 2)
+SYCL_PACKET_TRAITS(cl::sycl::cl_double2, 0, const double, 2)
+#undef SYCL_PACKET_TRAITS
+
+// Make sure this is only available when targeting a GPU: we don't want to
+// introduce conflicts between these packet_traits definitions and the ones
+// we'll use on the host side (SSE, AVX, ...)
+#define SYCL_ARITHMETIC(packet_type) \
+ template <> \
+ struct is_arithmetic<packet_type> { \
+ enum { value = true }; \
+ };
+SYCL_ARITHMETIC(cl::sycl::cl_float4)
+SYCL_ARITHMETIC(cl::sycl::cl_double2)
+#undef SYCL_ARITHMETIC
+
+#define SYCL_UNPACKET_TRAITS(packet_type, unpacket_type, lengths) \
+ template <> \
+ struct unpacket_traits<packet_type> { \
+ typedef unpacket_type type; \
+ enum { size = lengths, vectorizable = true, alignment = Aligned16 }; \
+ typedef packet_type half; \
+ };
+SYCL_UNPACKET_TRAITS(cl::sycl::cl_float4, float, 4)
+SYCL_UNPACKET_TRAITS(cl::sycl::cl_double2, double, 2)
+
+#undef SYCL_UNPACKET_TRAITS
+#endif
+
+} // end namespace internal
+
+#endif
+
+namespace TensorSycl {
+namespace internal {
+
+template <typename PacketReturnType, int PacketSize>
+struct PacketWrapper;
+// This function should never get called on the device
+#ifndef SYCL_DEVICE_ONLY
+template <typename PacketReturnType, int PacketSize>
+struct PacketWrapper {
+ typedef typename ::Eigen::internal::unpacket_traits<PacketReturnType>::type
+ Scalar;
+ template <typename Index>
+ EIGEN_DEVICE_FUNC static Scalar scalarize(Index, PacketReturnType &) {
+ eigen_assert(false && "THERE IS NO PACKETIZE VERSION FOR THE CHOSEN TYPE");
+ abort();
+ }
+ EIGEN_DEVICE_FUNC static PacketReturnType convert_to_packet_type(Scalar in,
+ Scalar) {
+ return ::Eigen::internal::template plset<PacketReturnType>(in);
+ }
+ EIGEN_DEVICE_FUNC static void set_packet(PacketReturnType, Scalar *) {
+ eigen_assert(false && "THERE IS NO PACKETIZE VERSION FOR THE CHOSEN TYPE");
+ abort();
+ }
+};
+
+#elif defined(SYCL_DEVICE_ONLY)
+template <typename PacketReturnType>
+struct PacketWrapper<PacketReturnType, 4> {
+ typedef typename ::Eigen::internal::unpacket_traits<PacketReturnType>::type
+ Scalar;
+ template <typename Index>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static Scalar scalarize(Index index, PacketReturnType &in) {
+ switch (index) {
+ case 0:
+ return in.x();
+ case 1:
+ return in.y();
+ case 2:
+ return in.z();
+ case 3:
+ return in.w();
+ default:
+ //INDEX MUST BE BETWEEN 0 and 3.There is no abort function in SYCL kernel. so we cannot use abort here.
+ // The code will never reach here
+ __builtin_unreachable();
+ }
+ __builtin_unreachable();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static PacketReturnType convert_to_packet_type(
+ Scalar in, Scalar other) {
+ return PacketReturnType(in, other, other, other);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static void set_packet(PacketReturnType &lhs, Scalar *rhs) {
+ lhs = PacketReturnType(rhs[0], rhs[1], rhs[2], rhs[3]);
+ }
+};
+
+template <typename PacketReturnType>
+struct PacketWrapper<PacketReturnType, 1> {
+ typedef typename ::Eigen::internal::unpacket_traits<PacketReturnType>::type
+ Scalar;
+ template <typename Index>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static Scalar scalarize(Index, PacketReturnType &in) {
+ return in;
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static PacketReturnType convert_to_packet_type(Scalar in,
+ Scalar) {
+ return PacketReturnType(in);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static void set_packet(PacketReturnType &lhs, Scalar *rhs) {
+ lhs = rhs[0];
+ }
+};
+
+template <typename PacketReturnType>
+struct PacketWrapper<PacketReturnType, 2> {
+ typedef typename ::Eigen::internal::unpacket_traits<PacketReturnType>::type
+ Scalar;
+ template <typename Index>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static Scalar scalarize(Index index, PacketReturnType &in) {
+ switch (index) {
+ case 0:
+ return in.x();
+ case 1:
+ return in.y();
+ default:
+ //INDEX MUST BE BETWEEN 0 and 1.There is no abort function in SYCL kernel. so we cannot use abort here.
+ // The code will never reach here
+ __builtin_unreachable();
+ }
+ __builtin_unreachable();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static PacketReturnType convert_to_packet_type(
+ Scalar in, Scalar other) {
+ return PacketReturnType(in, other);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static void set_packet(PacketReturnType &lhs, Scalar *rhs) {
+ lhs = PacketReturnType(rhs[0], rhs[1]);
+ }
+};
+
+#endif
+
+} // end namespace internal
+} // end namespace TensorSycl
+} // end namespace Eigen
+
+#endif // EIGEN_INTEROP_HEADERS_SYCL_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/SYCL/MathFunctions.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/SYCL/MathFunctions.h
new file mode 100644
index 000000000..2ab0f2a76
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/SYCL/MathFunctions.h
@@ -0,0 +1,301 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/*****************************************************************
+ * MathFunctions.h
+ *
+ * \brief:
+ * MathFunctions
+ *
+ *****************************************************************/
+
+#ifndef EIGEN_MATH_FUNCTIONS_SYCL_H
+#define EIGEN_MATH_FUNCTIONS_SYCL_H
+namespace Eigen {
+
+namespace internal {
+
+// Make sure this is only available when targeting a GPU: we don't want to
+// introduce conflicts between these packet_traits definitions and the ones
+// we'll use on the host side (SSE, AVX, ...)
+#if defined(SYCL_DEVICE_ONLY)
+#define SYCL_PLOG(packet_type) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type plog<packet_type>( \
+ const packet_type& a) { \
+ return cl::sycl::log(a); \
+ }
+
+SYCL_PLOG(cl::sycl::cl_float4)
+SYCL_PLOG(cl::sycl::cl_double2)
+#undef SYCL_PLOG
+
+#define SYCL_PLOG1P(packet_type) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type plog1p<packet_type>( \
+ const packet_type& a) { \
+ return cl::sycl::log1p(a); \
+ }
+
+SYCL_PLOG1P(cl::sycl::cl_float4)
+SYCL_PLOG1P(cl::sycl::cl_double2)
+#undef SYCL_PLOG1P
+
+#define SYCL_PLOG10(packet_type) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type plog10<packet_type>( \
+ const packet_type& a) { \
+ return cl::sycl::log10(a); \
+ }
+
+SYCL_PLOG10(cl::sycl::cl_float4)
+SYCL_PLOG10(cl::sycl::cl_double2)
+#undef SYCL_PLOG10
+
+#define SYCL_PEXP(packet_type) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pexp<packet_type>( \
+ const packet_type& a) { \
+ return cl::sycl::exp(a); \
+ }
+
+SYCL_PEXP(cl::sycl::cl_float4)
+SYCL_PEXP(cl::sycl::cl_float)
+SYCL_PEXP(cl::sycl::cl_double2)
+#undef SYCL_PEXP
+
+#define SYCL_PEXPM1(packet_type) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pexpm1<packet_type>( \
+ const packet_type& a) { \
+ return cl::sycl::expm1(a); \
+ }
+
+SYCL_PEXPM1(cl::sycl::cl_float4)
+SYCL_PEXPM1(cl::sycl::cl_double2)
+#undef SYCL_PEXPM1
+
+#define SYCL_PSQRT(packet_type) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type psqrt<packet_type>( \
+ const packet_type& a) { \
+ return cl::sycl::sqrt(a); \
+ }
+
+SYCL_PSQRT(cl::sycl::cl_float4)
+SYCL_PSQRT(cl::sycl::cl_double2)
+#undef SYCL_PSQRT
+
+#define SYCL_PRSQRT(packet_type) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type prsqrt<packet_type>( \
+ const packet_type& a) { \
+ return cl::sycl::rsqrt(a); \
+ }
+
+SYCL_PRSQRT(cl::sycl::cl_float4)
+SYCL_PRSQRT(cl::sycl::cl_double2)
+#undef SYCL_PRSQRT
+
+/** \internal \returns the hyperbolic sine of \a a (coeff-wise) */
+#define SYCL_PSIN(packet_type) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type psin<packet_type>( \
+ const packet_type& a) { \
+ return cl::sycl::sin(a); \
+ }
+
+SYCL_PSIN(cl::sycl::cl_float4)
+SYCL_PSIN(cl::sycl::cl_double2)
+#undef SYCL_PSIN
+
+/** \internal \returns the hyperbolic cosine of \a a (coeff-wise) */
+#define SYCL_PCOS(packet_type) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pcos<packet_type>( \
+ const packet_type& a) { \
+ return cl::sycl::cos(a); \
+ }
+
+SYCL_PCOS(cl::sycl::cl_float4)
+SYCL_PCOS(cl::sycl::cl_double2)
+#undef SYCL_PCOS
+
+/** \internal \returns the hyperbolic tan of \a a (coeff-wise) */
+#define SYCL_PTAN(packet_type) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type ptan<packet_type>( \
+ const packet_type& a) { \
+ return cl::sycl::tan(a); \
+ }
+
+SYCL_PTAN(cl::sycl::cl_float4)
+SYCL_PTAN(cl::sycl::cl_double2)
+#undef SYCL_PTAN
+
+/** \internal \returns the hyperbolic sine of \a a (coeff-wise) */
+#define SYCL_PASIN(packet_type) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pasin<packet_type>( \
+ const packet_type& a) { \
+ return cl::sycl::asin(a); \
+ }
+
+SYCL_PASIN(cl::sycl::cl_float4)
+SYCL_PASIN(cl::sycl::cl_double2)
+#undef SYCL_PASIN
+
+/** \internal \returns the hyperbolic cosine of \a a (coeff-wise) */
+#define SYCL_PACOS(packet_type) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pacos<packet_type>( \
+ const packet_type& a) { \
+ return cl::sycl::acos(a); \
+ }
+
+SYCL_PACOS(cl::sycl::cl_float4)
+SYCL_PACOS(cl::sycl::cl_double2)
+#undef SYCL_PACOS
+
+/** \internal \returns the hyperbolic tan of \a a (coeff-wise) */
+#define SYCL_PATAN(packet_type) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type patan<packet_type>( \
+ const packet_type& a) { \
+ return cl::sycl::atan(a); \
+ }
+
+SYCL_PATAN(cl::sycl::cl_float4)
+SYCL_PATAN(cl::sycl::cl_double2)
+#undef SYCL_PATAN
+
+/** \internal \returns the hyperbolic sine of \a a (coeff-wise) */
+#define SYCL_PSINH(packet_type) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type psinh<packet_type>( \
+ const packet_type& a) { \
+ return cl::sycl::sinh(a); \
+ }
+
+SYCL_PSINH(cl::sycl::cl_float4)
+SYCL_PSINH(cl::sycl::cl_double2)
+#undef SYCL_PSINH
+
+/** \internal \returns the hyperbolic cosine of \a a (coeff-wise) */
+#define SYCL_PCOSH(packet_type) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pcosh<packet_type>( \
+ const packet_type& a) { \
+ return cl::sycl::cosh(a); \
+ }
+
+SYCL_PCOSH(cl::sycl::cl_float4)
+SYCL_PCOSH(cl::sycl::cl_double2)
+#undef SYCL_PCOSH
+
+/** \internal \returns the hyperbolic tan of \a a (coeff-wise) */
+#define SYCL_PTANH(packet_type) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type ptanh<packet_type>( \
+ const packet_type& a) { \
+ return cl::sycl::tanh(a); \
+ }
+
+SYCL_PTANH(cl::sycl::cl_float4)
+SYCL_PTANH(cl::sycl::cl_double2)
+#undef SYCL_PTANH
+
+#define SYCL_PCEIL(packet_type) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pceil<packet_type>( \
+ const packet_type& a) { \
+ return cl::sycl::ceil(a); \
+ }
+
+SYCL_PCEIL(cl::sycl::cl_float4)
+SYCL_PCEIL(cl::sycl::cl_double2)
+#undef SYCL_PCEIL
+
+#define SYCL_PROUND(packet_type) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pround<packet_type>( \
+ const packet_type& a) { \
+ return cl::sycl::round(a); \
+ }
+
+SYCL_PROUND(cl::sycl::cl_float4)
+SYCL_PROUND(cl::sycl::cl_double2)
+#undef SYCL_PROUND
+
+#define SYCL_PRINT(packet_type) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type print<packet_type>( \
+ const packet_type& a) { \
+ return cl::sycl::rint(a); \
+ }
+
+SYCL_PRINT(cl::sycl::cl_float4)
+SYCL_PRINT(cl::sycl::cl_double2)
+#undef SYCL_PRINT
+
+#define SYCL_FLOOR(packet_type) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pfloor<packet_type>( \
+ const packet_type& a) { \
+ return cl::sycl::floor(a); \
+ }
+
+SYCL_FLOOR(cl::sycl::cl_float4)
+SYCL_FLOOR(cl::sycl::cl_double2)
+#undef SYCL_FLOOR
+
+#define SYCL_PMIN(packet_type, expr) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pmin<packet_type>( \
+ const packet_type& a, const packet_type& b) { \
+ return expr; \
+ }
+
+SYCL_PMIN(cl::sycl::cl_float4, cl::sycl::fmin(a, b))
+SYCL_PMIN(cl::sycl::cl_double2, cl::sycl::fmin(a, b))
+#undef SYCL_PMIN
+
+#define SYCL_PMAX(packet_type, expr) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pmax<packet_type>( \
+ const packet_type& a, const packet_type& b) { \
+ return expr; \
+ }
+
+SYCL_PMAX(cl::sycl::cl_float4, cl::sycl::fmax(a, b))
+SYCL_PMAX(cl::sycl::cl_double2, cl::sycl::fmax(a, b))
+#undef SYCL_PMAX
+
+#define SYCL_PLDEXP(packet_type) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pldexp( \
+ const packet_type& a, const packet_type& exponent) { \
+ return cl::sycl::ldexp( \
+ a, exponent.template convert<cl::sycl::cl_int, \
+ cl::sycl::rounding_mode::automatic>()); \
+ }
+
+SYCL_PLDEXP(cl::sycl::cl_float4)
+SYCL_PLDEXP(cl::sycl::cl_double2)
+#undef SYCL_PLDEXP
+
+#endif
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_MATH_FUNCTIONS_SYCL_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/SYCL/PacketMath.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/SYCL/PacketMath.h
new file mode 100644
index 000000000..87badc076
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/SYCL/PacketMath.h
@@ -0,0 +1,670 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/*****************************************************************
+ * PacketMath.h
+ *
+ * \brief:
+ * PacketMath
+ *
+ *****************************************************************/
+
+#ifndef EIGEN_PACKET_MATH_SYCL_H
+#define EIGEN_PACKET_MATH_SYCL_H
+#include <type_traits>
+namespace Eigen {
+
+namespace internal {
+#ifdef SYCL_DEVICE_ONLY
+
+#define SYCL_PLOADT_RO(address_space_target) \
+ template <typename packet_type, int Alignment> \
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type ploadt_ro( \
+ typename cl::sycl::multi_ptr< \
+ const typename unpacket_traits<packet_type>::type, \
+ cl::sycl::access::address_space::address_space_target>::pointer_t \
+ from) { \
+ typedef typename unpacket_traits<packet_type>::type scalar; \
+ typedef cl::sycl::multi_ptr< \
+ scalar, cl::sycl::access::address_space::address_space_target> \
+ multi_ptr; \
+ auto res = packet_type( \
+ static_cast<typename unpacket_traits<packet_type>::type>(0)); \
+ res.load(0, multi_ptr(const_cast<typename multi_ptr::pointer_t>(from))); \
+ return res; \
+ }
+
+SYCL_PLOADT_RO(global_space)
+SYCL_PLOADT_RO(local_space)
+#undef SYCL_PLOADT_RO
+#endif
+
+template <typename packet_type, int Alignment, typename T>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type
+ploadt_ro(const Eigen::TensorSycl::internal::RangeAccess<
+ cl::sycl::access::mode::read_write, T>& from) {
+ return ploadt_ro<packet_type, Alignment>(from.get_pointer());
+}
+
+#ifdef SYCL_DEVICE_ONLY
+#define SYCL_PLOAD(address_space_target, Alignment, AlignedType) \
+ template <typename packet_type> \
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type pload##AlignedType( \
+ typename cl::sycl::multi_ptr< \
+ const typename unpacket_traits<packet_type>::type, \
+ cl::sycl::access::address_space::address_space_target>::pointer_t \
+ from) { \
+ return ploadt_ro<packet_type, Alignment>(from); \
+ }
+
+// global space
+SYCL_PLOAD(global_space, Unaligned, u)
+SYCL_PLOAD(global_space, Aligned, )
+// local space
+SYCL_PLOAD(local_space, Unaligned, u)
+SYCL_PLOAD(local_space, Aligned, )
+
+#undef SYCL_PLOAD
+#endif
+
+#define SYCL_PLOAD(Alignment, AlignedType) \
+ template <typename packet_type> \
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type pload##AlignedType( \
+ const Eigen::TensorSycl::internal::RangeAccess< \
+ cl::sycl::access::mode::read_write, \
+ typename unpacket_traits<packet_type>::type> \
+ from) { \
+ return ploadt_ro<packet_type, Alignment>(from); \
+ }
+SYCL_PLOAD(Unaligned, u)
+SYCL_PLOAD(Aligned, )
+#undef SYCL_PLOAD
+
+#ifdef SYCL_DEVICE_ONLY
+/** \internal \returns a packet version of \a *from.
+ * The pointer \a from must be aligned on a \a Alignment bytes boundary. */
+#define SYCL_PLOADT(address_space_target) \
+ template <typename packet_type, int Alignment> \
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type ploadt( \
+ typename cl::sycl::multi_ptr< \
+ const typename unpacket_traits<packet_type>::type, \
+ cl::sycl::access::address_space::address_space_target>::pointer_t \
+ from) { \
+ if (Alignment >= unpacket_traits<packet_type>::alignment) \
+ return pload<packet_type>(from); \
+ else \
+ return ploadu<packet_type>(from); \
+ }
+
+// global space
+SYCL_PLOADT(global_space)
+// local space
+SYCL_PLOADT(local_space)
+#undef SYCL_PLOADT
+#endif
+
+template <typename packet_type, int Alignment>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type
+ploadt(const Eigen::TensorSycl::internal::RangeAccess<
+ cl::sycl::access::mode::read_write,
+ typename unpacket_traits<packet_type>::type>& from) {
+ return ploadt<packet_type, Alignment>(from.get_pointer());
+}
+#ifdef SYCL_DEVICE_ONLY
+
+// private_space
+#define SYCL_PLOADT_RO_SPECIAL(packet_type, Alignment) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type \
+ ploadt_ro<packet_type, Alignment>( \
+ const typename unpacket_traits<packet_type>::type* from) { \
+ typedef typename unpacket_traits<packet_type>::type scalar; \
+ auto res = packet_type(static_cast<scalar>(0)); \
+ res.template load<cl::sycl::access::address_space::private_space>( \
+ 0, const_cast<scalar*>(from)); \
+ return res; \
+ }
+
+SYCL_PLOADT_RO_SPECIAL(cl::sycl::cl_float4, Aligned)
+SYCL_PLOADT_RO_SPECIAL(cl::sycl::cl_double2, Aligned)
+SYCL_PLOADT_RO_SPECIAL(cl::sycl::cl_float4, Unaligned)
+SYCL_PLOADT_RO_SPECIAL(cl::sycl::cl_double2, Unaligned)
+
+#define SYCL_PLOAD_SPECIAL(packet_type, alignment_type) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type pload##alignment_type( \
+ const typename unpacket_traits<packet_type>::type* from) { \
+ typedef typename unpacket_traits<packet_type>::type scalar; \
+ auto res = packet_type(static_cast<scalar>(0)); \
+ res.template load<cl::sycl::access::address_space::private_space>( \
+ 0, const_cast<scalar*>(from)); \
+ return res; \
+ }
+SYCL_PLOAD_SPECIAL(cl::sycl::cl_float4, )
+SYCL_PLOAD_SPECIAL(cl::sycl::cl_double2, )
+SYCL_PLOAD_SPECIAL(cl::sycl::cl_float4, u)
+SYCL_PLOAD_SPECIAL(cl::sycl::cl_double2, u)
+
+#undef SYCL_PLOAD_SPECIAL
+
+#define SYCL_PSTORE(scalar, packet_type, address_space_target, alignment) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void pstore##alignment( \
+ typename cl::sycl::multi_ptr< \
+ scalar, \
+ cl::sycl::access::address_space::address_space_target>::pointer_t \
+ to, \
+ const packet_type& from) { \
+ typedef cl::sycl::multi_ptr< \
+ scalar, cl::sycl::access::address_space::address_space_target> \
+ multi_ptr; \
+ from.store(0, multi_ptr(to)); \
+ }
+
+// global space
+SYCL_PSTORE(float, cl::sycl::cl_float4, global_space, )
+SYCL_PSTORE(float, cl::sycl::cl_float4, global_space, u)
+SYCL_PSTORE(double, cl::sycl::cl_double2, global_space, )
+SYCL_PSTORE(double, cl::sycl::cl_double2, global_space, u)
+SYCL_PSTORE(float, cl::sycl::cl_float4, local_space, )
+SYCL_PSTORE(float, cl::sycl::cl_float4, local_space, u)
+SYCL_PSTORE(double, cl::sycl::cl_double2, local_space, )
+SYCL_PSTORE(double, cl::sycl::cl_double2, local_space, u)
+
+SYCL_PSTORE(float, cl::sycl::cl_float4, private_space, )
+SYCL_PSTORE(float, cl::sycl::cl_float4, private_space, u)
+SYCL_PSTORE(double, cl::sycl::cl_double2, private_space, )
+SYCL_PSTORE(double, cl::sycl::cl_double2, private_space, u)
+#undef SYCL_PSTORE
+
+#define SYCL_PSTORE_T(address_space_target) \
+ template <typename scalar, typename packet_type, int Alignment> \
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void pstoret( \
+ typename cl::sycl::multi_ptr< \
+ scalar, \
+ cl::sycl::access::address_space::address_space_target>::pointer_t \
+ to, \
+ const packet_type& from) { \
+ if (Alignment) \
+ pstore(to, from); \
+ else \
+ pstoreu(to, from); \
+ }
+
+SYCL_PSTORE_T(global_space)
+
+SYCL_PSTORE_T(local_space)
+
+#undef SYCL_PSTORE_T
+
+#define SYCL_PSET1(packet_type) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type pset1<packet_type>( \
+ const typename unpacket_traits<packet_type>::type& from) { \
+ return packet_type(from); \
+ }
+
+// global space
+SYCL_PSET1(cl::sycl::cl_float4)
+SYCL_PSET1(cl::sycl::cl_double2)
+
+#undef SYCL_PSET1
+
+template <typename packet_type>
+struct get_base_packet {
+ template <typename sycl_multi_pointer>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type
+ get_ploaddup(sycl_multi_pointer) {}
+
+ template <typename sycl_multi_pointer>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type
+ get_pgather(sycl_multi_pointer, Index) {}
+};
+
+template <>
+struct get_base_packet<cl::sycl::cl_float4> {
+ template <typename sycl_multi_pointer>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE cl::sycl::cl_float4 get_ploaddup(
+ sycl_multi_pointer from) {
+ return cl::sycl::cl_float4(from[0], from[0], from[1], from[1]);
+ }
+ template <typename sycl_multi_pointer>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE cl::sycl::cl_float4 get_pgather(
+ sycl_multi_pointer from, Index stride) {
+ return cl::sycl::cl_float4(from[0 * stride], from[1 * stride],
+ from[2 * stride], from[3 * stride]);
+ }
+
+ template <typename sycl_multi_pointer>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void set_pscatter(
+ sycl_multi_pointer to, const cl::sycl::cl_float4& from, Index stride) {
+ auto tmp = stride;
+ to[0] = from.x();
+ to[tmp] = from.y();
+ to[tmp += stride] = from.z();
+ to[tmp += stride] = from.w();
+ }
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE cl::sycl::cl_float4 set_plset(
+ const float& a) {
+ return cl::sycl::cl_float4(static_cast<float>(a), static_cast<float>(a + 1),
+ static_cast<float>(a + 2),
+ static_cast<float>(a + 3));
+ }
+};
+
+template <>
+struct get_base_packet<cl::sycl::cl_double2> {
+ template <typename sycl_multi_pointer>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE cl::sycl::cl_double2
+ get_ploaddup(const sycl_multi_pointer from) {
+ return cl::sycl::cl_double2(from[0], from[0]);
+ }
+
+ template <typename sycl_multi_pointer, typename Index>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE cl::sycl::cl_double2 get_pgather(
+ const sycl_multi_pointer from, Index stride) {
+ return cl::sycl::cl_double2(from[0 * stride], from[1 * stride]);
+ }
+
+ template <typename sycl_multi_pointer>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void set_pscatter(
+ sycl_multi_pointer to, const cl::sycl::cl_double2& from, Index stride) {
+ to[0] = from.x();
+ to[stride] = from.y();
+ }
+
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE cl::sycl::cl_double2 set_plset(
+ const double& a) {
+ return cl::sycl::cl_double2(static_cast<double>(a),
+ static_cast<double>(a + 1));
+ }
+};
+
+#define SYCL_PLOAD_DUP(address_space_target) \
+ template <typename packet_type> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type ploaddup( \
+ typename cl::sycl::multi_ptr< \
+ const typename unpacket_traits<packet_type>::type, \
+ cl::sycl::access::address_space::address_space_target>::pointer_t \
+ from) { \
+ return get_base_packet<packet_type>::get_ploaddup(from); \
+ }
+
+// global space
+SYCL_PLOAD_DUP(global_space)
+// local_space
+SYCL_PLOAD_DUP(local_space)
+#undef SYCL_PLOAD_DUP
+
+#define SYCL_PLOAD_DUP_SPECILIZE(packet_type) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type ploaddup<packet_type>( \
+ const typename unpacket_traits<packet_type>::type* from) { \
+ return get_base_packet<packet_type>::get_ploaddup(from); \
+ }
+
+SYCL_PLOAD_DUP_SPECILIZE(cl::sycl::cl_float4)
+SYCL_PLOAD_DUP_SPECILIZE(cl::sycl::cl_double2)
+
+#undef SYCL_PLOAD_DUP_SPECILIZE
+
+#define SYCL_PLSET(packet_type) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type plset<packet_type>( \
+ const typename unpacket_traits<packet_type>::type& a) { \
+ return get_base_packet<packet_type>::set_plset(a); \
+ }
+
+SYCL_PLSET(cl::sycl::cl_float4)
+SYCL_PLSET(cl::sycl::cl_double2)
+
+#undef SYCL_PLSET
+
+#define SYCL_PGATHER(address_space_target) \
+ template <typename Scalar, typename packet_type> \
+ EIGEN_DEVICE_FUNC inline packet_type pgather( \
+ typename cl::sycl::multi_ptr< \
+ const typename unpacket_traits<packet_type>::type, \
+ cl::sycl::access::address_space::address_space_target>::pointer_t \
+ from, \
+ Index stride) { \
+ return get_base_packet<packet_type>::get_pgather(from, stride); \
+ }
+
+// global space
+SYCL_PGATHER(global_space)
+// local space
+SYCL_PGATHER(local_space)
+
+#undef SYCL_PGATHER
+
+#define SYCL_PGATHER_SPECILIZE(scalar, packet_type) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type \
+ pgather<scalar, packet_type>( \
+ const typename unpacket_traits<packet_type>::type* from, Index stride) { \
+ return get_base_packet<packet_type>::get_pgather(from, stride); \
+ }
+
+SYCL_PGATHER_SPECILIZE(float, cl::sycl::cl_float4)
+SYCL_PGATHER_SPECILIZE(double, cl::sycl::cl_double2)
+
+#undef SYCL_PGATHER_SPECILIZE
+
+#define SYCL_PSCATTER(address_space_target) \
+ template <typename Scalar, typename packet_type> \
+ EIGEN_DEVICE_FUNC inline void pscatter( \
+ typename cl::sycl::multi_ptr< \
+ typename unpacket_traits<packet_type>::type, \
+ cl::sycl::access::address_space::address_space_target>::pointer_t \
+ to, \
+ const packet_type& from, Index stride) { \
+ get_base_packet<packet_type>::set_pscatter(to, from, stride); \
+ }
+
+// global space
+SYCL_PSCATTER(global_space)
+// local space
+SYCL_PSCATTER(local_space)
+
+#undef SYCL_PSCATTER
+
+#define SYCL_PSCATTER_SPECILIZE(scalar, packet_type) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter<scalar, packet_type>( \
+ typename unpacket_traits<packet_type>::type * to, \
+ const packet_type& from, Index stride) { \
+ get_base_packet<packet_type>::set_pscatter(to, from, stride); \
+ }
+
+SYCL_PSCATTER_SPECILIZE(float, cl::sycl::cl_float4)
+SYCL_PSCATTER_SPECILIZE(double, cl::sycl::cl_double2)
+
+#undef SYCL_PSCATTER_SPECILIZE
+
+#define SYCL_PMAD(packet_type) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type pmadd( \
+ const packet_type& a, const packet_type& b, const packet_type& c) { \
+ return cl::sycl::mad(a, b, c); \
+ }
+
+SYCL_PMAD(cl::sycl::cl_float4)
+SYCL_PMAD(cl::sycl::cl_double2)
+#undef SYCL_PMAD
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float pfirst<cl::sycl::cl_float4>(
+ const cl::sycl::cl_float4& a) {
+ return a.x();
+}
+template <>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double pfirst<cl::sycl::cl_double2>(
+ const cl::sycl::cl_double2& a) {
+ return a.x();
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float predux<cl::sycl::cl_float4>(
+ const cl::sycl::cl_float4& a) {
+ return a.x() + a.y() + a.z() + a.w();
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double predux<cl::sycl::cl_double2>(
+ const cl::sycl::cl_double2& a) {
+ return a.x() + a.y();
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float predux_max<cl::sycl::cl_float4>(
+ const cl::sycl::cl_float4& a) {
+ return cl::sycl::fmax(cl::sycl::fmax(a.x(), a.y()),
+ cl::sycl::fmax(a.z(), a.w()));
+}
+template <>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double predux_max<cl::sycl::cl_double2>(
+ const cl::sycl::cl_double2& a) {
+ return cl::sycl::fmax(a.x(), a.y());
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float predux_min<cl::sycl::cl_float4>(
+ const cl::sycl::cl_float4& a) {
+ return cl::sycl::fmin(cl::sycl::fmin(a.x(), a.y()),
+ cl::sycl::fmin(a.z(), a.w()));
+}
+template <>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double predux_min<cl::sycl::cl_double2>(
+ const cl::sycl::cl_double2& a) {
+ return cl::sycl::fmin(a.x(), a.y());
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float predux_mul<cl::sycl::cl_float4>(
+ const cl::sycl::cl_float4& a) {
+ return a.x() * a.y() * a.z() * a.w();
+}
+template <>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double predux_mul<cl::sycl::cl_double2>(
+ const cl::sycl::cl_double2& a) {
+ return a.x() * a.y();
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE cl::sycl::cl_float4
+pabs<cl::sycl::cl_float4>(const cl::sycl::cl_float4& a) {
+ return cl::sycl::cl_float4(cl::sycl::fabs(a.x()), cl::sycl::fabs(a.y()),
+ cl::sycl::fabs(a.z()), cl::sycl::fabs(a.w()));
+}
+template <>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE cl::sycl::cl_double2
+pabs<cl::sycl::cl_double2>(const cl::sycl::cl_double2& a) {
+ return cl::sycl::cl_double2(cl::sycl::fabs(a.x()), cl::sycl::fabs(a.y()));
+}
+
+template <typename Packet>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet sycl_pcmp_le(const Packet &a,
+ const Packet &b) {
+ return ((a <= b)
+ .template convert<typename unpacket_traits<Packet>::type,
+ cl::sycl::rounding_mode::automatic>());
+}
+
+template <typename Packet>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet sycl_pcmp_lt(const Packet &a,
+ const Packet &b) {
+ return ((a < b)
+ .template convert<typename unpacket_traits<Packet>::type,
+ cl::sycl::rounding_mode::automatic>());
+}
+
+template <typename Packet>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet sycl_pcmp_eq(const Packet &a,
+ const Packet &b) {
+ return ((a == b)
+ .template convert<typename unpacket_traits<Packet>::type,
+ cl::sycl::rounding_mode::automatic>());
+}
+
+#define SYCL_PCMP(OP, TYPE) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE TYPE pcmp_##OP<TYPE>(const TYPE &a, \
+ const TYPE &b) { \
+ return sycl_pcmp_##OP<TYPE>(a, b); \
+ }
+
+SYCL_PCMP(le, cl::sycl::cl_float4)
+SYCL_PCMP(lt, cl::sycl::cl_float4)
+SYCL_PCMP(eq, cl::sycl::cl_float4)
+SYCL_PCMP(le, cl::sycl::cl_double2)
+SYCL_PCMP(lt, cl::sycl::cl_double2)
+SYCL_PCMP(eq, cl::sycl::cl_double2)
+#undef SYCL_PCMP
+
+template <typename T> struct convert_to_integer;
+
+template <> struct convert_to_integer<float> {
+ using type = std::int32_t;
+ using packet_type = cl::sycl::cl_int4;
+};
+template <> struct convert_to_integer<double> {
+ using type = std::int64_t;
+ using packet_type = cl::sycl::cl_long2;
+};
+
+template <typename PacketIn>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename convert_to_integer<
+ typename unpacket_traits<PacketIn>::type>::packet_type
+vector_as_int(const PacketIn &p) {
+ return (
+ p.template convert<typename convert_to_integer<
+ typename unpacket_traits<PacketIn>::type>::type,
+ cl::sycl::rounding_mode::automatic>());
+}
+
+template <typename packetOut, typename PacketIn>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packetOut
+convert_vector(const PacketIn &p) {
+ return (p.template convert<typename unpacket_traits<packetOut>::type,
+ cl::sycl::rounding_mode::automatic>());
+}
+
+#define SYCL_PAND(TYPE) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TYPE pand<TYPE>(const TYPE &a, \
+ const TYPE &b) { \
+ return convert_vector<TYPE>(vector_as_int(a) & vector_as_int(b)); \
+ }
+SYCL_PAND(cl::sycl::cl_float4)
+SYCL_PAND(cl::sycl::cl_double2)
+#undef SYCL_PAND
+
+#define SYCL_POR(TYPE) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TYPE por<TYPE>(const TYPE &a, \
+ const TYPE &b) { \
+ return convert_vector<TYPE>(vector_as_int(a) | vector_as_int(b)); \
+ }
+
+SYCL_POR(cl::sycl::cl_float4)
+SYCL_POR(cl::sycl::cl_double2)
+#undef SYCL_POR
+
+#define SYCL_PXOR(TYPE) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TYPE pxor<TYPE>(const TYPE &a, \
+ const TYPE &b) { \
+ return convert_vector<TYPE>(vector_as_int(a) ^ vector_as_int(b)); \
+ }
+
+SYCL_PXOR(cl::sycl::cl_float4)
+SYCL_PXOR(cl::sycl::cl_double2)
+#undef SYCL_PXOR
+
+#define SYCL_PANDNOT(TYPE) \
+ template <> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TYPE pandnot<TYPE>(const TYPE &a, \
+ const TYPE &b) { \
+ return convert_vector<TYPE>(vector_as_int(a) & (~vector_as_int(b))); \
+ }
+SYCL_PANDNOT(cl::sycl::cl_float4)
+SYCL_PANDNOT(cl::sycl::cl_double2)
+#undef SYCL_PANDNOT
+
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void ptranspose(
+ PacketBlock<cl::sycl::cl_float4, 4>& kernel) {
+ float tmp = kernel.packet[0].y();
+ kernel.packet[0].y() = kernel.packet[1].x();
+ kernel.packet[1].x() = tmp;
+
+ tmp = kernel.packet[0].z();
+ kernel.packet[0].z() = kernel.packet[2].x();
+ kernel.packet[2].x() = tmp;
+
+ tmp = kernel.packet[0].w();
+ kernel.packet[0].w() = kernel.packet[3].x();
+ kernel.packet[3].x() = tmp;
+
+ tmp = kernel.packet[1].z();
+ kernel.packet[1].z() = kernel.packet[2].y();
+ kernel.packet[2].y() = tmp;
+
+ tmp = kernel.packet[1].w();
+ kernel.packet[1].w() = kernel.packet[3].y();
+ kernel.packet[3].y() = tmp;
+
+ tmp = kernel.packet[2].w();
+ kernel.packet[2].w() = kernel.packet[3].z();
+ kernel.packet[3].z() = tmp;
+}
+
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void ptranspose(
+ PacketBlock<cl::sycl::cl_double2, 2>& kernel) {
+ double tmp = kernel.packet[0].y();
+ kernel.packet[0].y() = kernel.packet[1].x();
+ kernel.packet[1].x() = tmp;
+}
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE cl::sycl::cl_float4 pblend(
+ const Selector<unpacket_traits<cl::sycl::cl_float4>::size>& ifPacket,
+ const cl::sycl::cl_float4& thenPacket,
+ const cl::sycl::cl_float4& elsePacket) {
+ cl::sycl::cl_int4 condition(
+ ifPacket.select[0] ? 0 : -1, ifPacket.select[1] ? 0 : -1,
+ ifPacket.select[2] ? 0 : -1, ifPacket.select[3] ? 0 : -1);
+ return cl::sycl::select(thenPacket, elsePacket, condition);
+}
+
+template <>
+inline cl::sycl::cl_double2 pblend(
+ const Selector<unpacket_traits<cl::sycl::cl_double2>::size>& ifPacket,
+ const cl::sycl::cl_double2& thenPacket,
+ const cl::sycl::cl_double2& elsePacket) {
+ cl::sycl::cl_long2 condition(ifPacket.select[0] ? 0 : -1,
+ ifPacket.select[1] ? 0 : -1);
+ return cl::sycl::select(thenPacket, elsePacket, condition);
+}
+#endif // SYCL_DEVICE_ONLY
+
+#define SYCL_PSTORE(alignment) \
+ template <typename packet_type> \
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void pstore##alignment( \
+ const Eigen::TensorSycl::internal::RangeAccess< \
+ cl::sycl::access::mode::read_write, \
+ typename unpacket_traits<packet_type>::type>& to, \
+ const packet_type& from) { \
+ pstore##alignment(to.get_pointer(), from); \
+ }
+
+// global space
+SYCL_PSTORE()
+SYCL_PSTORE(u)
+
+#undef SYCL_PSTORE
+
+template <typename scalar, typename packet_type, int Alignment>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void pstoret(
+ Eigen::TensorSycl::internal::RangeAccess<
+ cl::sycl::access::mode::read_write,
+ typename unpacket_traits<packet_type>::type>
+ to,
+ const packet_type& from) {
+ pstoret<scalar, packet_type, Alignment>(to.get_pointer(), from);
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_PACKET_MATH_SYCL_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h
new file mode 100644
index 000000000..f81e59db5
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h
@@ -0,0 +1,694 @@
+/***************************************************************************
+ * Copyright (C) 2017 Codeplay Software Limited
+ * This Source Code Form is subject to the terms of the Mozilla
+ * Public License v. 2.0. If a copy of the MPL was not distributed
+ * with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+ *
+ *
+ * SyclMemoryModel.h
+ *
+ * Description:
+ * Interface for SYCL buffers to behave as a non-dereferenceable pointer
+ * Interface for Placeholder accessor to behave as a pointer on both host
+ * and device
+ *
+ * Authors:
+ *
+ * Ruyman Reyes Codeplay Software Ltd.
+ * Mehdi Goli Codeplay Software Ltd.
+ * Vanya Yaneva Codeplay Software Ltd.
+ *
+ **************************************************************************/
+
+#if defined(EIGEN_USE_SYCL) && \
+ !defined(EIGEN_CXX11_TENSOR_TENSOR_SYCL_STORAGE_MEMORY_H)
+#define EIGEN_CXX11_TENSOR_TENSOR_SYCL_STORAGE_MEMORY_H
+
+#include <CL/sycl.hpp>
+#ifdef EIGEN_EXCEPTIONS
+#include <stdexcept>
+#endif
+#include <cstddef>
+#include <queue>
+#include <set>
+#include <unordered_map>
+
+namespace Eigen {
+namespace TensorSycl {
+namespace internal {
+
+using sycl_acc_target = cl::sycl::access::target;
+using sycl_acc_mode = cl::sycl::access::mode;
+
+/**
+ * Default values for template arguments
+ */
+using buffer_data_type_t = uint8_t;
+const sycl_acc_target default_acc_target = sycl_acc_target::global_buffer;
+const sycl_acc_mode default_acc_mode = sycl_acc_mode::read_write;
+
+/**
+ * PointerMapper
+ * Associates fake pointers with buffers.
+ *
+ */
+class PointerMapper {
+ public:
+ using base_ptr_t = std::intptr_t;
+
+ /* Structure of a virtual pointer
+ *
+ * |================================================|
+ * | POINTER ADDRESS |
+ * |================================================|
+ */
+ struct virtual_pointer_t {
+ /* Type for the pointers
+ */
+ base_ptr_t m_contents;
+
+ /** Conversions from virtual_pointer_t to
+ * void * should just reinterpret_cast the integer number
+ */
+ operator void *() const { return reinterpret_cast<void *>(m_contents); }
+
+ /**
+ * Convert back to the integer number.
+ */
+ operator base_ptr_t() const { return m_contents; }
+
+ /**
+ * Add a certain value to the pointer to create a
+ * new pointer to that offset
+ */
+ virtual_pointer_t operator+(size_t off) { return m_contents + off; }
+
+ /* Numerical order for sorting pointers in containers. */
+ bool operator<(virtual_pointer_t rhs) const {
+ return (static_cast<base_ptr_t>(m_contents) <
+ static_cast<base_ptr_t>(rhs.m_contents));
+ }
+
+ bool operator>(virtual_pointer_t rhs) const {
+ return (static_cast<base_ptr_t>(m_contents) >
+ static_cast<base_ptr_t>(rhs.m_contents));
+ }
+
+ /**
+ * Numerical order for sorting pointers in containers
+ */
+ bool operator==(virtual_pointer_t rhs) const {
+ return (static_cast<base_ptr_t>(m_contents) ==
+ static_cast<base_ptr_t>(rhs.m_contents));
+ }
+
+ /**
+ * Simple forward to the equality overload.
+ */
+ bool operator!=(virtual_pointer_t rhs) const {
+ return !(this->operator==(rhs));
+ }
+
+ /**
+ * Converts a void * into a virtual pointer structure.
+ * Note that this will only work if the void * was
+ * already a virtual_pointer_t, but we have no way of
+ * checking
+ */
+ virtual_pointer_t(const void *ptr)
+ : m_contents(reinterpret_cast<base_ptr_t>(ptr)){};
+
+ /**
+ * Creates a virtual_pointer_t from the given integer
+ * number
+ */
+ virtual_pointer_t(base_ptr_t u) : m_contents(u){};
+ };
+
+ /* Definition of a null pointer
+ */
+ const virtual_pointer_t null_virtual_ptr = nullptr;
+
+ /**
+ * Whether if a pointer is null or not.
+ * A pointer is nullptr if the value is of null_virtual_ptr
+ */
+ static inline bool is_nullptr(virtual_pointer_t ptr) {
+ return (static_cast<void *>(ptr) == nullptr);
+ }
+
+ /* basic type for all buffers
+ */
+ using buffer_t = cl::sycl::buffer_mem;
+
+ /**
+ * Node that stores information about a device allocation.
+ * Nodes are sorted by size to organise a free list of nodes
+ * that can be recovered.
+ */
+ struct pMapNode_t {
+ buffer_t m_buffer;
+ size_t m_size;
+ bool m_free;
+
+ pMapNode_t(buffer_t b, size_t size, bool f)
+ : m_buffer{b}, m_size{size}, m_free{f} {
+ m_buffer.set_final_data(nullptr);
+ }
+
+ bool operator<=(const pMapNode_t &rhs) { return (m_size <= rhs.m_size); }
+ };
+
+ /** Storage of the pointer / buffer tree
+ */
+ using pointerMap_t = std::map<virtual_pointer_t, pMapNode_t>;
+
+ /**
+ * Obtain the insertion point in the pointer map for
+ * a pointer of the given size.
+ * \param requiredSize Size attemted to reclaim
+ */
+ typename pointerMap_t::iterator get_insertion_point(size_t requiredSize) {
+ typename pointerMap_t::iterator retVal;
+ bool reuse = false;
+ if (!m_freeList.empty()) {
+ // try to re-use an existing block
+ for (auto freeElem : m_freeList) {
+ if (freeElem->second.m_size >= requiredSize) {
+ retVal = freeElem;
+ reuse = true;
+ // Element is not going to be free anymore
+ m_freeList.erase(freeElem);
+ break;
+ }
+ }
+ }
+ if (!reuse) {
+ retVal = std::prev(m_pointerMap.end());
+ }
+ return retVal;
+ }
+
+ /**
+ * Returns an iterator to the node that stores the information
+ * of the given virtual pointer from the given pointer map structure.
+ * If pointer is not found, throws std::out_of_range.
+ * If the pointer map structure is empty, throws std::out_of_range
+ *
+ * \param pMap the pointerMap_t structure storing all the pointers
+ * \param virtual_pointer_ptr The virtual pointer to obtain the node of
+ * \throws std::out:of_range if the pointer is not found or pMap is empty
+ */
+ typename pointerMap_t::iterator get_node(const virtual_pointer_t ptr) {
+ if (this->count() == 0) {
+ m_pointerMap.clear();
+ EIGEN_THROW_X(std::out_of_range("There are no pointers allocated\n"));
+
+ }
+ if (is_nullptr(ptr)) {
+ m_pointerMap.clear();
+ EIGEN_THROW_X(std::out_of_range("Cannot access null pointer\n"));
+ }
+ // The previous element to the lower bound is the node that
+ // holds this memory address
+ auto node = m_pointerMap.lower_bound(ptr);
+ // If the value of the pointer is not the one of the node
+ // then we return the previous one
+ if (node == std::end(m_pointerMap)) {
+ --node;
+ } else if (node->first != ptr) {
+ if (node == std::begin(m_pointerMap)) {
+ m_pointerMap.clear();
+ EIGEN_THROW_X(
+ std::out_of_range("The pointer is not registered in the map\n"));
+
+ }
+ --node;
+ }
+
+ return node;
+ }
+
+ /* get_buffer.
+ * Returns a buffer from the map using the pointer address
+ */
+ template <typename buffer_data_type = buffer_data_type_t>
+ cl::sycl::buffer<buffer_data_type, 1> get_buffer(
+ const virtual_pointer_t ptr) {
+ using sycl_buffer_t = cl::sycl::buffer<buffer_data_type, 1>;
+
+ // get_node() returns a `buffer_mem`, so we need to cast it to a `buffer<>`.
+ // We can do this without the `buffer_mem` being a pointer, as we
+ // only declare member variables in the base class (`buffer_mem`) and not in
+ // the child class (`buffer<>).
+ auto node = get_node(ptr);
+ eigen_assert(node->first == ptr || node->first < ptr);
+ eigen_assert(ptr < static_cast<virtual_pointer_t>(node->second.m_size +
+ node->first));
+ return *(static_cast<sycl_buffer_t *>(&node->second.m_buffer));
+ }
+
+ /**
+ * @brief Returns an accessor to the buffer of the given virtual pointer
+ * @param accessMode
+ * @param accessTarget
+ * @param ptr The virtual pointer
+ */
+ template <sycl_acc_mode access_mode = default_acc_mode,
+ sycl_acc_target access_target = default_acc_target,
+ typename buffer_data_type = buffer_data_type_t>
+ cl::sycl::accessor<buffer_data_type, 1, access_mode, access_target>
+ get_access(const virtual_pointer_t ptr) {
+ auto buf = get_buffer<buffer_data_type>(ptr);
+ return buf.template get_access<access_mode, access_target>();
+ }
+
+ /**
+ * @brief Returns an accessor to the buffer of the given virtual pointer
+ * in the given command group scope
+ * @param accessMode
+ * @param accessTarget
+ * @param ptr The virtual pointer
+ * @param cgh Reference to the command group scope
+ */
+ template <sycl_acc_mode access_mode = default_acc_mode,
+ sycl_acc_target access_target = default_acc_target,
+ typename buffer_data_type = buffer_data_type_t>
+ cl::sycl::accessor<buffer_data_type, 1, access_mode, access_target>
+ get_access(const virtual_pointer_t ptr, cl::sycl::handler &cgh) {
+ auto buf = get_buffer<buffer_data_type>(ptr);
+ return buf.template get_access<access_mode, access_target>(cgh);
+ }
+
+ /*
+ * Returns the offset from the base address of this pointer.
+ */
+ inline std::ptrdiff_t get_offset(const virtual_pointer_t ptr) {
+ // The previous element to the lower bound is the node that
+ // holds this memory address
+ auto node = get_node(ptr);
+ auto start = node->first;
+ eigen_assert(start == ptr || start < ptr);
+ eigen_assert(ptr < start + node->second.m_size);
+ return (ptr - start);
+ }
+
+ /*
+ * Returns the number of elements by which the given pointer is offset from
+ * the base address.
+ */
+ template <typename buffer_data_type>
+ inline size_t get_element_offset(const virtual_pointer_t ptr) {
+ return get_offset(ptr) / sizeof(buffer_data_type);
+ }
+
+ /**
+ * Constructs the PointerMapper structure.
+ */
+ PointerMapper(base_ptr_t baseAddress = 4096)
+ : m_pointerMap{}, m_freeList{}, m_baseAddress{baseAddress} {
+ if (m_baseAddress == 0) {
+ EIGEN_THROW_X(std::invalid_argument("Base address cannot be zero\n"));
+ }
+ };
+
+ /**
+ * PointerMapper cannot be copied or moved
+ */
+ PointerMapper(const PointerMapper &) = delete;
+
+ /**
+ * Empty the pointer list
+ */
+ inline void clear() {
+ m_freeList.clear();
+ m_pointerMap.clear();
+ }
+
+ /* add_pointer.
+ * Adds an existing pointer to the map and returns the virtual pointer id.
+ */
+ inline virtual_pointer_t add_pointer(const buffer_t &b) {
+ return add_pointer_impl(b);
+ }
+
+ /* add_pointer.
+ * Adds a pointer to the map and returns the virtual pointer id.
+ */
+ inline virtual_pointer_t add_pointer(buffer_t &&b) {
+ return add_pointer_impl(b);
+ }
+
+ /**
+ * @brief Fuses the given node with the previous nodes in the
+ * pointer map if they are free
+ *
+ * @param node A reference to the free node to be fused
+ */
+ void fuse_forward(typename pointerMap_t::iterator &node) {
+ while (node != std::prev(m_pointerMap.end())) {
+ // if following node is free
+ // remove it and extend the current node with its size
+ auto fwd_node = std::next(node);
+ if (!fwd_node->second.m_free) {
+ break;
+ }
+ auto fwd_size = fwd_node->second.m_size;
+ m_freeList.erase(fwd_node);
+ m_pointerMap.erase(fwd_node);
+
+ node->second.m_size += fwd_size;
+ }
+ }
+
+ /**
+ * @brief Fuses the given node with the following nodes in the
+ * pointer map if they are free
+ *
+ * @param node A reference to the free node to be fused
+ */
+ void fuse_backward(typename pointerMap_t::iterator &node) {
+ while (node != m_pointerMap.begin()) {
+ // if previous node is free, extend it
+ // with the size of the current one
+ auto prev_node = std::prev(node);
+ if (!prev_node->second.m_free) {
+ break;
+ }
+ prev_node->second.m_size += node->second.m_size;
+
+ // remove the current node
+ m_freeList.erase(node);
+ m_pointerMap.erase(node);
+
+ // point to the previous node
+ node = prev_node;
+ }
+ }
+
+ /* remove_pointer.
+ * Removes the given pointer from the map.
+ * The pointer is allowed to be reused only if ReUse if true.
+ */
+ template <bool ReUse = true>
+ void remove_pointer(const virtual_pointer_t ptr) {
+ if (is_nullptr(ptr)) {
+ return;
+ }
+ auto node = this->get_node(ptr);
+
+ node->second.m_free = true;
+ m_freeList.emplace(node);
+
+ // Fuse the node
+ // with free nodes before and after it
+ fuse_forward(node);
+ fuse_backward(node);
+
+ // If after fusing the node is the last one
+ // simply remove it (since it is free)
+ if (node == std::prev(m_pointerMap.end())) {
+ m_freeList.erase(node);
+ m_pointerMap.erase(node);
+ }
+ }
+
+ /* count.
+ * Return the number of active pointers (i.e, pointers that
+ * have been malloc but not freed).
+ */
+ size_t count() const { return (m_pointerMap.size() - m_freeList.size()); }
+
+ private:
+ /* add_pointer_impl.
+ * Adds a pointer to the map and returns the virtual pointer id.
+ * BufferT is either a const buffer_t& or a buffer_t&&.
+ */
+ template <class BufferT>
+ virtual_pointer_t add_pointer_impl(BufferT b) {
+ virtual_pointer_t retVal = nullptr;
+ size_t bufSize = b.get_count();
+ pMapNode_t p{b, bufSize, false};
+ // If this is the first pointer:
+ if (m_pointerMap.empty()) {
+ virtual_pointer_t initialVal{m_baseAddress};
+ m_pointerMap.emplace(initialVal, p);
+ return initialVal;
+ }
+
+ auto lastElemIter = get_insertion_point(bufSize);
+ // We are recovering an existing free node
+ if (lastElemIter->second.m_free) {
+ lastElemIter->second.m_buffer = b;
+ lastElemIter->second.m_free = false;
+
+ // If the recovered node is bigger than the inserted one
+ // add a new free node with the remaining space
+ if (lastElemIter->second.m_size > bufSize) {
+ // create a new node with the remaining space
+ auto remainingSize = lastElemIter->second.m_size - bufSize;
+ pMapNode_t p2{b, remainingSize, true};
+
+ // update size of the current node
+ lastElemIter->second.m_size = bufSize;
+
+ // add the new free node
+ auto newFreePtr = lastElemIter->first + bufSize;
+ auto freeNode = m_pointerMap.emplace(newFreePtr, p2).first;
+ m_freeList.emplace(freeNode);
+ }
+
+ retVal = lastElemIter->first;
+ } else {
+ size_t lastSize = lastElemIter->second.m_size;
+ retVal = lastElemIter->first + lastSize;
+ m_pointerMap.emplace(retVal, p);
+ }
+ return retVal;
+ }
+
+ /**
+ * Compare two iterators to pointer map entries according to
+ * the size of the allocation on the device.
+ */
+ struct SortBySize {
+ bool operator()(typename pointerMap_t::iterator a,
+ typename pointerMap_t::iterator b) const {
+ return ((a->first < b->first) && (a->second <= b->second)) ||
+ ((a->first < b->first) && (b->second <= a->second));
+ }
+ };
+
+ /* Maps the pointer addresses to buffer and size pairs.
+ */
+ pointerMap_t m_pointerMap;
+
+ /* List of free nodes available for re-using
+ */
+ std::set<typename pointerMap_t::iterator, SortBySize> m_freeList;
+
+ /* Base address used when issuing the first virtual pointer, allows users
+ * to specify alignment. Cannot be zero. */
+ std::intptr_t m_baseAddress;
+};
+
+/* remove_pointer.
+ * Removes the given pointer from the map.
+ * The pointer is allowed to be reused only if ReUse if true.
+ */
+template <>
+inline void PointerMapper::remove_pointer<false>(const virtual_pointer_t ptr) {
+ if (is_nullptr(ptr)) {
+ return;
+ }
+ m_pointerMap.erase(this->get_node(ptr));
+}
+
+/**
+ * Malloc-like interface to the pointer-mapper.
+ * Given a size, creates a byte-typed buffer and returns a
+ * fake pointer to keep track of it.
+ * \param size Size in bytes of the desired allocation
+ * \throw cl::sycl::exception if error while creating the buffer
+ */
+inline void *SYCLmalloc(size_t size, PointerMapper &pMap) {
+ if (size == 0) {
+ return nullptr;
+ }
+ // Create a generic buffer of the given size
+ using buffer_t = cl::sycl::buffer<buffer_data_type_t, 1>;
+ auto thePointer = pMap.add_pointer(buffer_t(cl::sycl::range<1>{size}));
+ // Store the buffer on the global list
+ return static_cast<void *>(thePointer);
+}
+
+/**
+ * Free-like interface to the pointer mapper.
+ * Given a fake-pointer created with the virtual-pointer malloc,
+ * destroys the buffer and remove it from the list.
+ * If ReUse is false, the pointer is not added to the freeList,
+ * it should be false only for sub-buffers.
+ */
+template <bool ReUse = true, typename PointerMapper>
+inline void SYCLfree(void *ptr, PointerMapper &pMap) {
+ pMap.template remove_pointer<ReUse>(ptr);
+}
+
+/**
+ * Clear all the memory allocated by SYCL.
+ */
+template <typename PointerMapper>
+inline void SYCLfreeAll(PointerMapper &pMap) {
+ pMap.clear();
+}
+
+template <cl::sycl::access::mode AcMd, typename T>
+struct RangeAccess {
+ static const auto global_access = cl::sycl::access::target::global_buffer;
+ static const auto is_place_holder = cl::sycl::access::placeholder::true_t;
+ typedef T scalar_t;
+ typedef scalar_t &ref_t;
+ typedef typename cl::sycl::global_ptr<scalar_t>::pointer_t ptr_t;
+
+ // the accessor type does not necessarily the same as T
+ typedef cl::sycl::accessor<scalar_t, 1, AcMd, global_access, is_place_holder>
+ accessor;
+
+ typedef RangeAccess<AcMd, T> self_t;
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE RangeAccess(accessor access,
+ size_t offset,
+ std::intptr_t virtual_ptr)
+ : access_(access), offset_(offset), virtual_ptr_(virtual_ptr) {}
+
+ RangeAccess(cl::sycl::buffer<scalar_t, 1> buff =
+ cl::sycl::buffer<scalar_t, 1>(cl::sycl::range<1>(1)))
+ : access_{accessor{buff}}, offset_(0), virtual_ptr_(-1) {}
+
+ // This should be only used for null constructor on the host side
+ RangeAccess(std::nullptr_t) : RangeAccess() {}
+ // This template parameter must be removed and scalar_t should be replaced
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ptr_t get_pointer() const {
+ return (access_.get_pointer().get() + offset_);
+ }
+ template <typename Index>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE self_t &operator+=(Index offset) {
+ offset_ += (offset);
+ return *this;
+ }
+ template <typename Index>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE self_t operator+(Index offset) const {
+ return self_t(access_, offset_ + offset, virtual_ptr_);
+ }
+ template <typename Index>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE self_t operator-(Index offset) const {
+ return self_t(access_, offset_ - offset, virtual_ptr_);
+ }
+ template <typename Index>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE self_t &operator-=(Index offset) {
+ offset_ -= offset;
+ return *this;
+ }
+
+ // THIS IS FOR NULL COMPARISON ONLY
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend bool operator==(
+ const RangeAccess &lhs, std::nullptr_t) {
+ return ((lhs.virtual_ptr_ == -1));
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend bool operator!=(
+ const RangeAccess &lhs, std::nullptr_t i) {
+ return !(lhs == i);
+ }
+
+ // THIS IS FOR NULL COMPARISON ONLY
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend bool operator==(
+ std::nullptr_t, const RangeAccess &rhs) {
+ return ((rhs.virtual_ptr_ == -1));
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend bool operator!=(
+ std::nullptr_t i, const RangeAccess &rhs) {
+ return !(i == rhs);
+ }
+ // Prefix operator (Increment and return value)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE self_t &operator++() {
+ offset_++;
+ return (*this);
+ }
+
+ // Postfix operator (Return value and increment)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE self_t operator++(int i) {
+ EIGEN_UNUSED_VARIABLE(i);
+ self_t temp_iterator(*this);
+ offset_++;
+ return temp_iterator;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t get_size() const {
+ return (access_.get_count() - offset_);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t get_offset() const {
+ return offset_;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void set_offset(std::ptrdiff_t offset) {
+ offset_ = offset;
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ref_t operator*() const {
+ return *get_pointer();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ref_t operator*() {
+ return *get_pointer();
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ptr_t operator->() = delete;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ref_t operator[](int x) {
+ return *(get_pointer() + x);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ref_t operator[](int x) const {
+ return *(get_pointer() + x);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE scalar_t *get_virtual_pointer() const {
+ return reinterpret_cast<scalar_t *>(virtual_ptr_ +
+ (offset_ * sizeof(scalar_t)));
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit operator bool() const {
+ return (virtual_ptr_ != -1);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE operator RangeAccess<AcMd, const T>() {
+ return RangeAccess<AcMd, const T>(access_, offset_, virtual_ptr_);
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ operator RangeAccess<AcMd, const T>() const {
+ return RangeAccess<AcMd, const T>(access_, offset_, virtual_ptr_);
+ }
+ // binding placeholder accessors to a command group handler for SYCL
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(
+ cl::sycl::handler &cgh) const {
+ cgh.require(access_);
+ }
+
+ private:
+ accessor access_;
+ size_t offset_;
+ std::intptr_t virtual_ptr_; // the location of the buffer in the map
+};
+
+template <cl::sycl::access::mode AcMd, typename T>
+struct RangeAccess<AcMd, const T> : RangeAccess<AcMd, T> {
+ typedef RangeAccess<AcMd, T> Base;
+ using Base::Base;
+};
+
+} // namespace internal
+} // namespace TensorSycl
+} // namespace Eigen
+
+#endif // EIGEN_CXX11_TENSOR_TENSOR_SYCL_STORAGE_MEMORY_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/SYCL/TypeCasting.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/SYCL/TypeCasting.h
new file mode 100644
index 000000000..9208ab21d
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/SYCL/TypeCasting.h
@@ -0,0 +1,85 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/*****************************************************************
+ * TypeCasting.h
+ *
+ * \brief:
+ * TypeCasting
+ *
+ *****************************************************************/
+
+#ifndef EIGEN_TYPE_CASTING_SYCL_H
+#define EIGEN_TYPE_CASTING_SYCL_H
+
+namespace Eigen {
+
+namespace internal {
+#ifdef SYCL_DEVICE_ONLY
+template <>
+struct type_casting_traits<float, int> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE cl::sycl::cl_int4
+pcast<cl::sycl::cl_float4, cl::sycl::cl_int4>(const cl::sycl::cl_float4& a) {
+ return a
+ .template convert<cl::sycl::cl_int, cl::sycl::rounding_mode::automatic>();
+}
+
+template <>
+struct type_casting_traits<int, float> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE cl::sycl::cl_float4
+pcast<cl::sycl::cl_int4, cl::sycl::cl_float4>(const cl::sycl::cl_int4& a) {
+ return a.template convert<cl::sycl::cl_float,
+ cl::sycl::rounding_mode::automatic>();
+}
+
+template <>
+struct type_casting_traits<double, float> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
+};
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE cl::sycl::cl_float4
+pcast<cl::sycl::cl_double2, cl::sycl::cl_float4>(
+ const cl::sycl::cl_double2& a, const cl::sycl::cl_double2& b) {
+ auto a1 = a.template convert<cl::sycl::cl_float,
+ cl::sycl::rounding_mode::automatic>();
+ auto b1 = b.template convert<cl::sycl::cl_float,
+ cl::sycl::rounding_mode::automatic>();
+ return cl::sycl::float4(a1.x(), a1.y(), b1.x(), b1.y());
+}
+
+template <>
+struct type_casting_traits<float, double> {
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
+};
+
+template <>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE cl::sycl::cl_double2
+pcast<cl::sycl::cl_float4, cl::sycl::cl_double2>(const cl::sycl::cl_float4& a) {
+ // Simply discard the second half of the input
+ return cl::sycl::cl_double2(a.x(), a.y());
+}
+
+#endif
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_TYPE_CASTING_SYCL_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/ZVector/Complex.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/ZVector/Complex.h
index 1bfb73397..6c67cfe05 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/arch/ZVector/Complex.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/ZVector/Complex.h
@@ -15,6 +15,10 @@ namespace Eigen {
namespace internal {
+#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12)
+static Packet4ui p4ui_CONJ_XOR = { 0x00000000, 0x80000000, 0x00000000, 0x80000000 }; //vec_mergeh((Packet4ui)p4i_ZERO, (Packet4ui)p4f_MZERO);
+#endif
+
static Packet2ul p2ul_CONJ_XOR1 = (Packet2ul) vec_sld((Packet4ui) p2d_ZERO_, (Packet4ui) p2l_ZERO, 8);//{ 0x8000000000000000, 0x0000000000000000 };
static Packet2ul p2ul_CONJ_XOR2 = (Packet2ul) vec_sld((Packet4ui) p2l_ZERO, (Packet4ui) p2d_ZERO_, 8);//{ 0x8000000000000000, 0x0000000000000000 };
@@ -29,10 +33,14 @@ struct Packet2cf
{
EIGEN_STRONG_INLINE Packet2cf() {}
EIGEN_STRONG_INLINE explicit Packet2cf(const Packet4f& a) : v(a) {}
+#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ < 12)
union {
Packet4f v;
Packet1cd cd[2];
};
+#else
+ Packet4f v;
+#endif
};
template<> struct packet_traits<std::complex<float> > : default_packet_traits
@@ -83,69 +91,43 @@ template<> struct packet_traits<std::complex<double> > : default_packet_traits
};
};
-template<> struct unpacket_traits<Packet2cf> { typedef std::complex<float> type; enum {size=2, alignment=Aligned16}; typedef Packet2cf half; };
-template<> struct unpacket_traits<Packet1cd> { typedef std::complex<double> type; enum {size=1, alignment=Aligned16}; typedef Packet1cd half; };
+template<> struct unpacket_traits<Packet2cf> {
+ typedef std::complex<float> type;
+ enum {size=2, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false};
+ typedef Packet2cf half;
+ typedef Packet4f as_real;
+};
+template<> struct unpacket_traits<Packet1cd> {
+ typedef std::complex<double> type;
+ enum {size=1, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false};
+ typedef Packet1cd half;
+ typedef Packet2d as_real;
+};
/* Forward declaration */
EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet2cf,2>& kernel);
-template<> EIGEN_STRONG_INLINE Packet2cf pload <Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>((const float*)from)); }
+/* complex<double> first */
template<> EIGEN_STRONG_INLINE Packet1cd pload <Packet1cd>(const std::complex<double>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(pload<Packet2d>((const double*)from)); }
-template<> EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>((const float*)from)); }
template<> EIGEN_STRONG_INLINE Packet1cd ploadu<Packet1cd>(const std::complex<double>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ploadu<Packet2d>((const double*)from)); }
-template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); }
template<> EIGEN_STRONG_INLINE void pstore <std::complex<double> >(std::complex<double> * to, const Packet1cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); }
-template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); }
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double> * to, const Packet1cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); }
template<> EIGEN_STRONG_INLINE Packet1cd pset1<Packet1cd>(const std::complex<double>& from)
{ /* here we really have to use unaligned loads :( */ return ploadu<Packet1cd>(&from); }
-template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>& from)
-{
- Packet2cf res;
- res.cd[0] = Packet1cd(vec_ld2f((const float *)&from));
- res.cd[1] = res.cd[0];
- return res;
-}
-template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, Index stride)
-{
- std::complex<float> EIGEN_ALIGN16 af[2];
- af[0] = from[0*stride];
- af[1] = from[1*stride];
- return pload<Packet2cf>(af);
-}
template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Packet1cd>(const std::complex<double>* from, Index stride EIGEN_UNUSED)
{
return pload<Packet1cd>(from);
}
-template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, Index stride)
-{
- std::complex<float> EIGEN_ALIGN16 af[2];
- pstore<std::complex<float> >((std::complex<float> *) af, from);
- to[0*stride] = af[0];
- to[1*stride] = af[1];
-}
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet1cd>(std::complex<double>* to, const Packet1cd& from, Index stride EIGEN_UNUSED)
{
pstore<std::complex<double> >(to, from);
}
-
-template<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(padd<Packet4f>(a.v, b.v)); }
template<> EIGEN_STRONG_INLINE Packet1cd padd<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(a.v + b.v); }
-template<> EIGEN_STRONG_INLINE Packet2cf psub<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(psub<Packet4f>(a.v, b.v)); }
template<> EIGEN_STRONG_INLINE Packet1cd psub<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(a.v - b.v); }
template<> EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a) { return Packet1cd(pnegate(Packet2d(a.v))); }
-template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) { return Packet2cf(pnegate(Packet4f(a.v))); }
template<> EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a) { return Packet1cd((Packet2d)vec_xor((Packet2d)a.v, (Packet2d)p2ul_CONJ_XOR2)); }
-template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a)
-{
- Packet2cf res;
- res.v.v4f[0] = pconj(Packet1cd(reinterpret_cast<Packet2d>(a.v.v4f[0]))).v;
- res.v.v4f[1] = pconj(Packet1cd(reinterpret_cast<Packet2d>(a.v.v4f[1]))).v;
- return res;
-}
-
template<> EIGEN_STRONG_INLINE Packet1cd pmul<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
{
Packet2d a_re, a_im, v1, v2;
@@ -163,190 +145,177 @@ template<> EIGEN_STRONG_INLINE Packet1cd pmul<Packet1cd>(const Packet1cd& a, con
return Packet1cd(v1 + v2);
}
-template<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
-{
- Packet2cf res;
- res.v.v4f[0] = pmul(Packet1cd(reinterpret_cast<Packet2d>(a.v.v4f[0])), Packet1cd(reinterpret_cast<Packet2d>(b.v.v4f[0]))).v;
- res.v.v4f[1] = pmul(Packet1cd(reinterpret_cast<Packet2d>(a.v.v4f[1])), Packet1cd(reinterpret_cast<Packet2d>(b.v.v4f[1]))).v;
- return res;
-}
-
-template<> EIGEN_STRONG_INLINE Packet1cd pand <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_and(a.v,b.v)); }
-template<> EIGEN_STRONG_INLINE Packet2cf pand <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pand<Packet4f>(a.v,b.v)); }
-template<> EIGEN_STRONG_INLINE Packet1cd por <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_or(a.v,b.v)); }
-template<> EIGEN_STRONG_INLINE Packet2cf por <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(por<Packet4f>(a.v,b.v)); }
-template<> EIGEN_STRONG_INLINE Packet1cd pxor <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_xor(a.v,b.v)); }
-template<> EIGEN_STRONG_INLINE Packet2cf pxor <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pxor<Packet4f>(a.v,b.v)); }
-template<> EIGEN_STRONG_INLINE Packet1cd pandnot<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_and(a.v, vec_nor(b.v,b.v))); }
-template<> EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pandnot<Packet4f>(a.v,b.v)); }
-
+template<> EIGEN_STRONG_INLINE Packet1cd pand <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_and(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet1cd por <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_or(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet1cd pxor <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_xor(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet1cd pandnot <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_and(a.v, vec_nor(b.v,b.v))); }
template<> EIGEN_STRONG_INLINE Packet1cd ploaddup<Packet1cd>(const std::complex<double>* from) { return pset1<Packet1cd>(*from); }
-template<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<float>* from) { return pset1<Packet2cf>(*from); }
+template<> EIGEN_STRONG_INLINE Packet1cd pcmp_eq(const Packet1cd& a, const Packet1cd& b) {
+ Packet2d eq = vec_cmpeq (a.v, b.v);
+ Packet2d tmp = { eq[1], eq[0] };
+ return (Packet1cd)pand<Packet2d>(eq, tmp);
+}
-template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> * addr) { EIGEN_ZVECTOR_PREFETCH(addr); }
template<> EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::complex<double> * addr) { EIGEN_ZVECTOR_PREFETCH(addr); }
template<> EIGEN_STRONG_INLINE std::complex<double> pfirst<Packet1cd>(const Packet1cd& a)
{
- std::complex<double> EIGEN_ALIGN16 res;
+ EIGEN_ALIGN16 std::complex<double> res;
pstore<std::complex<double> >(&res, a);
return res;
}
-template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet2cf>(const Packet2cf& a)
-{
- std::complex<float> EIGEN_ALIGN16 res[2];
- pstore<std::complex<float> >(res, a);
-
- return res[0];
-}
template<> EIGEN_STRONG_INLINE Packet1cd preverse(const Packet1cd& a) { return a; }
-template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a)
+template<> EIGEN_STRONG_INLINE std::complex<double> predux<Packet1cd>(const Packet1cd& a)
{
- Packet2cf res;
- res.cd[0] = a.cd[1];
- res.cd[1] = a.cd[0];
- return res;
+ return pfirst(a);
}
-
-template<> EIGEN_STRONG_INLINE std::complex<double> predux<Packet1cd>(const Packet1cd& a)
+template<> EIGEN_STRONG_INLINE std::complex<double> predux_mul<Packet1cd>(const Packet1cd& a)
{
return pfirst(a);
}
-template<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet2cf>(const Packet2cf& a)
+EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cd,Packet2d)
+
+template<> EIGEN_STRONG_INLINE Packet1cd pdiv<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
{
- std::complex<float> res;
- Packet1cd b = padd<Packet1cd>(a.cd[0], a.cd[1]);
- vec_st2f(b.v, (float*)&res);
- return res;
+ // TODO optimize it for AltiVec
+ Packet1cd res = pmul(a,pconj(b));
+ Packet2d s = vec_madd(b.v, b.v, p2d_ZERO_);
+ return Packet1cd(pdiv(res.v, s + vec_perm(s, s, p16uc_REVERSE64)));
}
-template<> EIGEN_STRONG_INLINE Packet1cd preduxp<Packet1cd>(const Packet1cd* vecs)
+EIGEN_STRONG_INLINE Packet1cd pcplxflip/*<Packet1cd>*/(const Packet1cd& x)
{
- return vecs[0];
+ return Packet1cd(preverse(Packet2d(x.v)));
}
-template<> EIGEN_STRONG_INLINE Packet2cf preduxp<Packet2cf>(const Packet2cf* vecs)
+
+EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet1cd,2>& kernel)
{
- PacketBlock<Packet2cf,2> transpose;
- transpose.packet[0] = vecs[0];
- transpose.packet[1] = vecs[1];
- ptranspose(transpose);
+ Packet2d tmp = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_HI);
+ kernel.packet[1].v = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_LO);
+ kernel.packet[0].v = tmp;
+}
- return padd<Packet2cf>(transpose.packet[0], transpose.packet[1]);
-}
+/* complex<float> follows */
+template<> EIGEN_STRONG_INLINE Packet2cf pload <Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>((const float*)from)); }
+template<> EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>((const float*)from)); }
+template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); }
+template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); }
-template<> EIGEN_STRONG_INLINE std::complex<double> predux_mul<Packet1cd>(const Packet1cd& a)
+template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet2cf>(const Packet2cf& a)
{
- return pfirst(a);
+ EIGEN_ALIGN16 std::complex<float> res[2];
+ pstore<std::complex<float> >(res, a);
+
+ return res[0];
}
-template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const Packet2cf& a)
+
+
+#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ < 12)
+template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>& from)
{
- std::complex<float> res;
- Packet1cd b = pmul<Packet1cd>(a.cd[0], a.cd[1]);
- vec_st2f(b.v, (float*)&res);
+ Packet2cf res;
+ res.cd[0] = Packet1cd(vec_ld2f((const float *)&from));
+ res.cd[1] = res.cd[0];
return res;
}
-
-template<int Offset>
-struct palign_impl<Offset,Packet1cd>
+#else
+template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>& from)
{
- static EIGEN_STRONG_INLINE void run(Packet1cd& /*first*/, const Packet1cd& /*second*/)
- {
- // FIXME is it sure we never have to align a Packet1cd?
- // Even though a std::complex<double> has 16 bytes, it is not necessarily aligned on a 16 bytes boundary...
- }
-};
+ Packet2cf res;
+ if((std::ptrdiff_t(&from) % 16) == 0)
+ res.v = pload<Packet4f>((const float *)&from);
+ else
+ res.v = ploadu<Packet4f>((const float *)&from);
+ res.v = vec_perm(res.v, res.v, p16uc_PSET64_HI);
+ return res;
+}
+#endif
-template<int Offset>
-struct palign_impl<Offset,Packet2cf>
+template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, Index stride)
{
- static EIGEN_STRONG_INLINE void run(Packet2cf& first, const Packet2cf& second)
- {
- if (Offset == 1) {
- first.cd[0] = first.cd[1];
- first.cd[1] = second.cd[0];
- }
- }
-};
-
-template<> struct conj_helper<Packet1cd, Packet1cd, false,true>
+ EIGEN_ALIGN16 std::complex<float> af[2];
+ af[0] = from[0*stride];
+ af[1] = from[1*stride];
+ return pload<Packet2cf>(af);
+}
+template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, Index stride)
{
- EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const
- { return padd(pmul(x,y),c); }
+ EIGEN_ALIGN16 std::complex<float> af[2];
+ pstore<std::complex<float> >((std::complex<float> *) af, from);
+ to[0*stride] = af[0];
+ to[1*stride] = af[1];
+}
- EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
- {
- return internal::pmul(a, pconj(b));
- }
-};
+template<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(padd<Packet4f>(a.v, b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf psub<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(psub<Packet4f>(a.v, b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) { return Packet2cf(pnegate(Packet4f(a.v))); }
-template<> struct conj_helper<Packet1cd, Packet1cd, true,false>
-{
- EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const
- { return padd(pmul(x,y),c); }
+template<> EIGEN_STRONG_INLINE Packet2cf pand <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pand<Packet4f>(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf por <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(por<Packet4f>(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf pxor <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pxor<Packet4f>(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pandnot<Packet4f>(a.v,b.v)); }
- EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
- {
- return internal::pmul(pconj(a), b);
- }
-};
+template<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<float>* from) { return pset1<Packet2cf>(*from); }
-template<> struct conj_helper<Packet1cd, Packet1cd, true,true>
-{
- EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const
- { return padd(pmul(x,y),c); }
+template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> * addr) { EIGEN_ZVECTOR_PREFETCH(addr); }
- EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
- {
- return pconj(internal::pmul(a, b));
- }
-};
-template<> struct conj_helper<Packet2cf, Packet2cf, false,true>
-{
- EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
- { return padd(pmul(x,y),c); }
+#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ < 12)
- EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
- {
- return internal::pmul(a, pconj(b));
- }
-};
+template<> EIGEN_STRONG_INLINE Packet2cf pcmp_eq(const Packet2cf& a, const Packet2cf& b) {
+ Packet4f eq = pcmp_eq<Packet4f> (a.v, b.v);
+ Packet2cf res;
+ Packet2d tmp1 = { eq.v4f[0][1], eq.v4f[0][0] };
+ Packet2d tmp2 = { eq.v4f[1][1], eq.v4f[1][0] };
+ res.v.v4f[0] = pand<Packet2d>(eq.v4f[0], tmp1);
+ res.v.v4f[1] = pand<Packet2d>(eq.v4f[1], tmp2);
+ return res;
+}
-template<> struct conj_helper<Packet2cf, Packet2cf, true,false>
+template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a)
{
- EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
- { return padd(pmul(x,y),c); }
-
- EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
- {
- return internal::pmul(pconj(a), b);
- }
-};
+ Packet2cf res;
+ res.v.v4f[0] = pconj(Packet1cd(reinterpret_cast<Packet2d>(a.v.v4f[0]))).v;
+ res.v.v4f[1] = pconj(Packet1cd(reinterpret_cast<Packet2d>(a.v.v4f[1]))).v;
+ return res;
+}
-template<> struct conj_helper<Packet2cf, Packet2cf, true,true>
+template<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
{
- EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
- { return padd(pmul(x,y),c); }
+ Packet2cf res;
+ res.v.v4f[0] = pmul(Packet1cd(reinterpret_cast<Packet2d>(a.v.v4f[0])), Packet1cd(reinterpret_cast<Packet2d>(b.v.v4f[0]))).v;
+ res.v.v4f[1] = pmul(Packet1cd(reinterpret_cast<Packet2d>(a.v.v4f[1])), Packet1cd(reinterpret_cast<Packet2d>(b.v.v4f[1]))).v;
+ return res;
+}
- EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
- {
- return pconj(internal::pmul(a, b));
- }
-};
+template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a)
+{
+ Packet2cf res;
+ res.cd[0] = a.cd[1];
+ res.cd[1] = a.cd[0];
+ return res;
+}
-EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf,Packet4f)
-EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cd,Packet2d)
+template<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet2cf>(const Packet2cf& a)
+{
+ std::complex<float> res;
+ Packet1cd b = padd<Packet1cd>(a.cd[0], a.cd[1]);
+ vec_st2f(b.v, (float*)&res);
+ return res;
+}
-template<> EIGEN_STRONG_INLINE Packet1cd pdiv<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
+template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const Packet2cf& a)
{
- // TODO optimize it for AltiVec
- Packet1cd res = conj_helper<Packet1cd,Packet1cd,false,true>().pmul(a,b);
- Packet2d s = vec_madd(b.v, b.v, p2d_ZERO_);
- return Packet1cd(pdiv(res.v, s + vec_perm(s, s, p16uc_REVERSE64)));
+ std::complex<float> res;
+ Packet1cd b = pmul<Packet1cd>(a.cd[0], a.cd[1]);
+ vec_st2f(b.v, (float*)&res);
+ return res;
}
+EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf,Packet4f)
+
template<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
{
// TODO optimize it for AltiVec
@@ -356,11 +325,6 @@ template<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, con
return res;
}
-EIGEN_STRONG_INLINE Packet1cd pcplxflip/*<Packet1cd>*/(const Packet1cd& x)
-{
- return Packet1cd(preverse(Packet2d(x.v)));
-}
-
EIGEN_STRONG_INLINE Packet2cf pcplxflip/*<Packet2cf>*/(const Packet2cf& x)
{
Packet2cf res;
@@ -369,13 +333,6 @@ EIGEN_STRONG_INLINE Packet2cf pcplxflip/*<Packet2cf>*/(const Packet2cf& x)
return res;
}
-EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet1cd,2>& kernel)
-{
- Packet2d tmp = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_HI);
- kernel.packet[1].v = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_LO);
- kernel.packet[0].v = tmp;
-}
-
EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet2cf,2>& kernel)
{
Packet1cd tmp = kernel.packet[0].cd[1];
@@ -389,6 +346,88 @@ template<> EIGEN_STRONG_INLINE Packet2cf pblend(const Selector<2>& ifPacket, con
result.v = pblend<Packet4f>(ifPacket4, thenPacket.v, elsePacket.v);
return result;
}
+#else
+template<> EIGEN_STRONG_INLINE Packet2cf pcmp_eq(const Packet2cf& a, const Packet2cf& b) {
+ Packet4f eq = vec_cmpeq (a.v, b.v);
+ Packet4f tmp = { eq[1], eq[0], eq[3], eq[2] };
+ return (Packet2cf)pand<Packet4f>(eq, tmp);
+}
+template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a) { return Packet2cf(pxor<Packet4f>(a.v, reinterpret_cast<Packet4f>(p4ui_CONJ_XOR))); }
+template<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{
+ Packet4f a_re, a_im, prod, prod_im;
+
+ // Permute and multiply the real parts of a and b
+ a_re = vec_perm(a.v, a.v, p16uc_PSET32_WODD);
+
+ // Get the imaginary parts of a
+ a_im = vec_perm(a.v, a.v, p16uc_PSET32_WEVEN);
+
+ // multiply a_im * b and get the conjugate result
+ prod_im = a_im * b.v;
+ prod_im = pxor<Packet4f>(prod_im, reinterpret_cast<Packet4f>(p4ui_CONJ_XOR));
+ // permute back to a proper order
+ prod_im = vec_perm(prod_im, prod_im, p16uc_COMPLEX32_REV);
+
+ // multiply a_re * b, add prod_im
+ prod = pmadd<Packet4f>(a_re, b.v, prod_im);
+
+ return Packet2cf(prod);
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a)
+{
+ Packet4f rev_a;
+ rev_a = vec_perm(a.v, a.v, p16uc_COMPLEX32_REV2);
+ return Packet2cf(rev_a);
+}
+
+template<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet2cf>(const Packet2cf& a)
+{
+ Packet4f b;
+ b = vec_sld(a.v, a.v, 8);
+ b = padd<Packet4f>(a.v, b);
+ return pfirst<Packet2cf>(Packet2cf(b));
+}
+
+template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const Packet2cf& a)
+{
+ Packet4f b;
+ Packet2cf prod;
+ b = vec_sld(a.v, a.v, 8);
+ prod = pmul<Packet2cf>(a, Packet2cf(b));
+
+ return pfirst<Packet2cf>(prod);
+}
+
+EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf,Packet4f)
+
+template<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{
+ // TODO optimize it for AltiVec
+ Packet2cf res = pmul(a, pconj(b));
+ Packet4f s = pmul<Packet4f>(b.v, b.v);
+ return Packet2cf(pdiv(res.v, padd<Packet4f>(s, vec_perm(s, s, p16uc_COMPLEX32_REV))));
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf pcplxflip<Packet2cf>(const Packet2cf& x)
+{
+ return Packet2cf(vec_perm(x.v, x.v, p16uc_COMPLEX32_REV));
+}
+
+EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet2cf,2>& kernel)
+{
+ Packet4f tmp = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_HI);
+ kernel.packet[1].v = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_LO);
+ kernel.packet[0].v = tmp;
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf pblend(const Selector<2>& ifPacket, const Packet2cf& thenPacket, const Packet2cf& elsePacket) {
+ Packet2cf result;
+ result.v = reinterpret_cast<Packet4f>(pblend<Packet2d>(ifPacket, reinterpret_cast<Packet2d>(thenPacket.v), reinterpret_cast<Packet2d>(elsePacket.v)));
+ return result;
+}
+#endif
} // end namespace internal
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/ZVector/MathFunctions.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/ZVector/MathFunctions.h
index 5c7aa7256..1635e128c 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/arch/ZVector/MathFunctions.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/ZVector/MathFunctions.h
@@ -20,6 +20,50 @@ namespace Eigen {
namespace internal {
+#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12)
+static _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
+static _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
+static _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f);
+static _EIGEN_DECLARE_CONST_Packet4i(23, 23);
+
+static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(inv_mant_mask, ~0x7f800000);
+
+/* the smallest non denormalized float number */
+static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(min_norm_pos, 0x00800000);
+static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_inf, 0xff800000); // -1.f/0.f
+static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_nan, 0xffffffff);
+
+/* natural logarithm computed for 4 simultaneous float
+ return NaN for x <= 0
+*/
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_SQRTHF, 0.707106781186547524f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p0, 7.0376836292E-2f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p1, - 1.1514610310E-1f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p2, 1.1676998740E-1f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p3, - 1.2420140846E-1f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p4, + 1.4249322787E-1f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p5, - 1.6668057665E-1f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p6, + 2.0000714765E-1f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p7, - 2.4999993993E-1f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p8, + 3.3333331174E-1f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q1, -2.12194440e-4f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q2, 0.693359375f);
+
+static _EIGEN_DECLARE_CONST_Packet4f(exp_hi, 88.3762626647950f);
+static _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -88.3762626647949f);
+
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f);
+
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500E-4f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507E-3f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073E-3f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894E-2f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f);
+static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f);
+#endif
+
static _EIGEN_DECLARE_CONST_Packet2d(1 , 1.0);
static _EIGEN_DECLARE_CONST_Packet2d(2 , 2.0);
static _EIGEN_DECLARE_CONST_Packet2d(half, 0.5);
@@ -93,43 +137,95 @@ Packet2d pexp<Packet2d>(const Packet2d& _x)
}
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
-Packet4f pexp<Packet4f>(const Packet4f& x)
+Packet4f pexp<Packet4f>(const Packet4f& _x)
{
+#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12)
+ Packet4f x = _x;
+
+ Packet4f tmp, fx;
+ Packet4i emm0;
+
+ // clamp x
+ x = pmax(pmin(x, p4f_exp_hi), p4f_exp_lo);
+
+ // express exp(x) as exp(g + n*log(2))
+ fx = pmadd(x, p4f_cephes_LOG2EF, p4f_half);
+
+ fx = pfloor(fx);
+
+ tmp = pmul(fx, p4f_cephes_exp_C1);
+ Packet4f z = pmul(fx, p4f_cephes_exp_C2);
+ x = psub(x, tmp);
+ x = psub(x, z);
+
+ z = pmul(x,x);
+
+ Packet4f y = p4f_cephes_exp_p0;
+ y = pmadd(y, x, p4f_cephes_exp_p1);
+ y = pmadd(y, x, p4f_cephes_exp_p2);
+ y = pmadd(y, x, p4f_cephes_exp_p3);
+ y = pmadd(y, x, p4f_cephes_exp_p4);
+ y = pmadd(y, x, p4f_cephes_exp_p5);
+ y = pmadd(y, z, x);
+ y = padd(y, p4f_1);
+
+ // build 2^n
+ emm0 = (Packet4i){ (int)fx[0], (int)fx[1], (int)fx[2], (int)fx[3] };
+ emm0 = emm0 + p4i_0x7f;
+ emm0 = emm0 << reinterpret_cast<Packet4i>(p4i_23);
+
+ return pmax(pmul(y, reinterpret_cast<Packet4f>(emm0)), _x);
+#else
Packet4f res;
- res.v4f[0] = pexp<Packet2d>(x.v4f[0]);
- res.v4f[1] = pexp<Packet2d>(x.v4f[1]);
+ res.v4f[0] = pexp<Packet2d>(_x.v4f[0]);
+ res.v4f[1] = pexp<Packet2d>(_x.v4f[1]);
return res;
+#endif
}
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet2d psqrt<Packet2d>(const Packet2d& x)
{
- return __builtin_s390_vfsqdb(x);
+ return vec_sqrt(x);
}
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet4f psqrt<Packet4f>(const Packet4f& x)
{
Packet4f res;
+#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12)
+ res = vec_sqrt(x);
+#else
res.v4f[0] = psqrt<Packet2d>(x.v4f[0]);
res.v4f[1] = psqrt<Packet2d>(x.v4f[1]);
+#endif
return res;
}
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet2d prsqrt<Packet2d>(const Packet2d& x) {
- // Unfortunately we can't use the much faster mm_rqsrt_pd since it only provides an approximation.
return pset1<Packet2d>(1.0) / psqrt<Packet2d>(x);
}
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet4f prsqrt<Packet4f>(const Packet4f& x) {
Packet4f res;
+#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12)
+ res = pset1<Packet4f>(1.0) / psqrt<Packet4f>(x);
+#else
res.v4f[0] = prsqrt<Packet2d>(x.v4f[0]);
res.v4f[1] = prsqrt<Packet2d>(x.v4f[1]);
+#endif
return res;
}
+// Hyperbolic Tangent function.
+template <>
+EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f
+ptanh<Packet4f>(const Packet4f& x) {
+ return internal::generic_fast_tanh_float(x);
+}
+
} // end namespace internal
} // end namespace Eigen
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/arch/ZVector/PacketMath.h b/examples/ThirdPartyLibs/Eigen/src/Core/arch/ZVector/PacketMath.h
index 57b01fc63..a7b59c80e 100644..100755
--- a/examples/ThirdPartyLibs/Eigen/src/Core/arch/ZVector/PacketMath.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/arch/ZVector/PacketMath.h
@@ -10,26 +10,20 @@
#ifndef EIGEN_PACKET_MATH_ZVECTOR_H
#define EIGEN_PACKET_MATH_ZVECTOR_H
-#include <stdint.h>
-
namespace Eigen {
namespace internal {
#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
-#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 4
+#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 16
#endif
#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
#endif
-#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD
-#define EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD
-#endif
-
#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
-#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16
+#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32
#endif
typedef __vector int Packet4i;
@@ -41,21 +35,30 @@ typedef __vector double Packet2d;
typedef __vector unsigned long long Packet2ul;
typedef __vector long long Packet2l;
+// Z14 has builtin support for float vectors
+#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12)
+typedef __vector float Packet4f;
+#else
typedef struct {
Packet2d v4f[2];
} Packet4f;
+#endif
typedef union {
- int32_t i[4];
- uint32_t ui[4];
- int64_t l[2];
- uint64_t ul[2];
+ numext::int32_t i[4];
+ numext::uint32_t ui[4];
+ numext::int64_t l[2];
+ numext::uint64_t ul[2];
double d[2];
+ float f[4];
Packet4i v4i;
Packet4ui v4ui;
Packet2l v2l;
Packet2ul v2ul;
Packet2d v2d;
+#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12)
+ Packet4f v4f;
+#endif
} Packet;
// We don't want to write the same code all the time, but we need to reuse the constants
@@ -80,15 +83,31 @@ typedef union {
Packet2l p2l_##NAME = pset1<Packet2l>(X)
// These constants are endian-agnostic
-//static _EIGEN_DECLARE_CONST_FAST_Packet4i(ZERO, 0); //{ 0, 0, 0, 0,}
+static _EIGEN_DECLARE_CONST_FAST_Packet4i(ZERO, 0); //{ 0, 0, 0, 0,}
static _EIGEN_DECLARE_CONST_FAST_Packet4i(ONE, 1); //{ 1, 1, 1, 1}
static _EIGEN_DECLARE_CONST_FAST_Packet2d(ZERO, 0);
static _EIGEN_DECLARE_CONST_FAST_Packet2l(ZERO, 0);
static _EIGEN_DECLARE_CONST_FAST_Packet2l(ONE, 1);
-static Packet2d p2d_ONE = { 1.0, 1.0 };
-static Packet2d p2d_ZERO_ = { -0.0, -0.0 };
+static Packet2d p2d_ONE = { 1.0, 1.0 };
+static Packet2d p2d_ZERO_ = { numext::bit_cast<double>(0x8000000000000000ull),
+ numext::bit_cast<double>(0x8000000000000000ull) };
+
+#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12)
+#define _EIGEN_DECLARE_CONST_FAST_Packet4f(NAME,X) \
+ Packet4f p4f_##NAME = reinterpret_cast<Packet4f>(vec_splat_s32(X))
+
+#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
+ Packet4f p4f_##NAME = pset1<Packet4f>(X)
+
+#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
+ const Packet4f p4f_##NAME = reinterpret_cast<Packet4f>(pset1<Packet4i>(X))
+
+static _EIGEN_DECLARE_CONST_FAST_Packet4f(ZERO, 0); //{ 0.0, 0.0, 0.0, 0.0}
+static _EIGEN_DECLARE_CONST_FAST_Packet4i(MINUS1,-1); //{ -1, -1, -1, -1}
+static Packet4f p4f_MZERO = { 0x80000000, 0x80000000, 0x80000000, 0x80000000};
+#endif
static Packet4i p4i_COUNTDOWN = { 0, 1, 2, 3 };
static Packet4f p4f_COUNTDOWN = { 0.0, 1.0, 2.0, 3.0 };
@@ -120,9 +139,9 @@ static Packet16uc p16uc_TRANSPOSE64_LO = vec_add(p16uc_PSET64_LO, p16uc_HALF64_0
static Packet16uc p16uc_TRANSPOSE64_HI = { 0,1,2,3, 4,5,6,7, 16,17,18,19, 20,21,22,23};
static Packet16uc p16uc_TRANSPOSE64_LO = { 8,9,10,11, 12,13,14,15, 24,25,26,27, 28,29,30,31};
-//static Packet16uc p16uc_COMPLEX32_REV = vec_sld(p16uc_REVERSE32, p16uc_REVERSE32, 8); //{ 4,5,6,7, 0,1,2,3, 12,13,14,15, 8,9,10,11 };
+static Packet16uc p16uc_COMPLEX32_REV = vec_sld(p16uc_REVERSE32, p16uc_REVERSE32, 8); //{ 4,5,6,7, 0,1,2,3, 12,13,14,15, 8,9,10,11 };
-//static Packet16uc p16uc_COMPLEX32_REV2 = vec_sld(p16uc_FORWARD, p16uc_FORWARD, 8); //{ 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 };
+static Packet16uc p16uc_COMPLEX32_REV2 = vec_sld(p16uc_FORWARD, p16uc_FORWARD, 8); //{ 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 };
#if EIGEN_HAS_BUILTIN(__builtin_prefetch) || EIGEN_COMP_GNUC
@@ -149,29 +168,31 @@ template<> struct packet_traits<int> : default_packet_traits
};
};
-template<> struct packet_traits<float> : default_packet_traits
-{
+template <>
+struct packet_traits<float> : default_packet_traits {
typedef Packet4f type;
typedef Packet4f half;
enum {
Vectorizable = 1,
AlignedOnScalar = 1,
- size=4,
+ size = 4,
HasHalfPacket = 0,
- HasAdd = 1,
- HasSub = 1,
- HasMul = 1,
- HasDiv = 1,
- HasMin = 1,
- HasMax = 1,
- HasAbs = 1,
- HasSin = 0,
- HasCos = 0,
- HasLog = 0,
- HasExp = 1,
+ HasAdd = 1,
+ HasSub = 1,
+ HasMul = 1,
+ HasDiv = 1,
+ HasMin = 1,
+ HasMax = 1,
+ HasAbs = 1,
+ HasSin = 0,
+ HasCos = 0,
+ HasLog = 0,
+ HasExp = 1,
HasSqrt = 1,
HasRsqrt = 1,
+ HasTanh = 1,
+ HasErf = 1,
HasRound = 1,
HasFloor = 1,
HasCeil = 1,
@@ -211,9 +232,9 @@ template<> struct packet_traits<double> : default_packet_traits
};
};
-template<> struct unpacket_traits<Packet4i> { typedef int type; enum {size=4, alignment=Aligned16}; typedef Packet4i half; };
-template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4, alignment=Aligned16}; typedef Packet4f half; };
-template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2, alignment=Aligned16}; typedef Packet2d half; };
+template<> struct unpacket_traits<Packet4i> { typedef int type; enum {size=4, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet4i half; };
+template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet4f half; };
+template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet2d half; };
/* Forward declaration */
EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet4f,4>& kernel);
@@ -258,82 +279,15 @@ inline std::ostream & operator <<(std::ostream & s, const Packet2d & v)
return s;
}
-/* Helper function to simulate a vec_splat_packet4f
- */
-template<int element> EIGEN_STRONG_INLINE Packet4f vec_splat_packet4f(const Packet4f& from)
+#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12)
+inline std::ostream & operator <<(std::ostream & s, const Packet4f & v)
{
- Packet4f splat;
- switch (element) {
- case 0:
- splat.v4f[0] = vec_splat(from.v4f[0], 0);
- splat.v4f[1] = splat.v4f[0];
- break;
- case 1:
- splat.v4f[0] = vec_splat(from.v4f[0], 1);
- splat.v4f[1] = splat.v4f[0];
- break;
- case 2:
- splat.v4f[0] = vec_splat(from.v4f[1], 0);
- splat.v4f[1] = splat.v4f[0];
- break;
- case 3:
- splat.v4f[0] = vec_splat(from.v4f[1], 1);
- splat.v4f[1] = splat.v4f[0];
- break;
- }
- return splat;
+ Packet vt;
+ vt.v4f = v;
+ s << vt.f[0] << ", " << vt.f[1] << ", " << vt.f[2] << ", " << vt.f[3];
+ return s;
}
-
-template<int Offset>
-struct palign_impl<Offset,Packet4i>
-{
- static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)
- {
- switch (Offset % 4) {
- case 1:
- first = vec_sld(first, second, 4); break;
- case 2:
- first = vec_sld(first, second, 8); break;
- case 3:
- first = vec_sld(first, second, 12); break;
- }
- }
-};
-
-/* This is a tricky one, we have to translate float alignment to vector elements of sizeof double
- */
-template<int Offset>
-struct palign_impl<Offset,Packet4f>
-{
- static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
- {
- switch (Offset % 4) {
- case 1:
- first.v4f[0] = vec_sld(first.v4f[0], first.v4f[1], 8);
- first.v4f[1] = vec_sld(first.v4f[1], second.v4f[0], 8);
- break;
- case 2:
- first.v4f[0] = first.v4f[1];
- first.v4f[1] = second.v4f[0];
- break;
- case 3:
- first.v4f[0] = vec_sld(first.v4f[1], second.v4f[0], 8);
- first.v4f[1] = vec_sld(second.v4f[0], second.v4f[1], 8);
- break;
- }
- }
-};
-
-
-template<int Offset>
-struct palign_impl<Offset,Packet2d>
-{
- static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)
- {
- if (Offset == 1)
- first = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(first), reinterpret_cast<Packet4i>(second), 8));
- }
-};
+#endif
template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int* from)
{
@@ -344,16 +298,6 @@ template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int* from)
return vfrom->v4i;
}
-template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from)
-{
- // FIXME: No intrinsic yet
- EIGEN_DEBUG_ALIGNED_LOAD
- Packet4f vfrom;
- vfrom.v4f[0] = vec_ld2f(&from[0]);
- vfrom.v4f[1] = vec_ld2f(&from[2]);
- return vfrom;
-}
-
template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from)
{
// FIXME: No intrinsic yet
@@ -372,15 +316,6 @@ template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet4i& f
vto->v4i = from;
}
-template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from)
-{
- // FIXME: No intrinsic yet
- EIGEN_DEBUG_ALIGNED_STORE
- vec_st2f(from.v4f[0], &to[0]);
- vec_st2f(from.v4f[1], &to[2]);
-}
-
-
template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from)
{
// FIXME: No intrinsic yet
@@ -397,13 +332,6 @@ template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from)
template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) {
return vec_splats(from);
}
-template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from)
-{
- Packet4f to;
- to.v4f[0] = pset1<Packet2d>(static_cast<const double&>(from));
- to.v4f[1] = to.v4f[0];
- return to;
-}
template<> EIGEN_STRONG_INLINE void
pbroadcast4<Packet4i>(const int *a,
@@ -417,17 +345,6 @@ pbroadcast4<Packet4i>(const int *a,
}
template<> EIGEN_STRONG_INLINE void
-pbroadcast4<Packet4f>(const float *a,
- Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3)
-{
- a3 = pload<Packet4f>(a);
- a0 = vec_splat_packet4f<0>(a3);
- a1 = vec_splat_packet4f<1>(a3);
- a2 = vec_splat_packet4f<2>(a3);
- a3 = vec_splat_packet4f<3>(a3);
-}
-
-template<> EIGEN_STRONG_INLINE void
pbroadcast4<Packet2d>(const double *a,
Packet2d& a0, Packet2d& a1, Packet2d& a2, Packet2d& a3)
{
@@ -441,7 +358,7 @@ pbroadcast4<Packet2d>(const double *a,
template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, Index stride)
{
- int EIGEN_ALIGN16 ai[4];
+ EIGEN_ALIGN16 int ai[4];
ai[0] = from[0*stride];
ai[1] = from[1*stride];
ai[2] = from[2*stride];
@@ -449,19 +366,9 @@ template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* f
return pload<Packet4i>(ai);
}
-template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)
-{
- float EIGEN_ALIGN16 ai[4];
- ai[0] = from[0*stride];
- ai[1] = from[1*stride];
- ai[2] = from[2*stride];
- ai[3] = from[3*stride];
- return pload<Packet4f>(ai);
-}
-
template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, Index stride)
{
- double EIGEN_ALIGN16 af[2];
+ EIGEN_ALIGN16 double af[2];
af[0] = from[0*stride];
af[1] = from[1*stride];
return pload<Packet2d>(af);
@@ -469,7 +376,7 @@ template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const dou
template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, Index stride)
{
- int EIGEN_ALIGN16 ai[4];
+ EIGEN_ALIGN16 int ai[4];
pstore<int>((int *)ai, from);
to[0*stride] = ai[0];
to[1*stride] = ai[1];
@@ -477,9 +384,272 @@ template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const
to[3*stride] = ai[3];
}
+template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, Index stride)
+{
+ EIGEN_ALIGN16 double af[2];
+ pstore<double>(af, from);
+ to[0*stride] = af[0];
+ to[1*stride] = af[1];
+}
+
+template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return (a + b); }
+template<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return (a + b); }
+
+template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return (a - b); }
+template<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return (a - b); }
+
+template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b) { return (a * b); }
+template<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) { return (a * b); }
+
+template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& a, const Packet4i& b) { return (a / b); }
+template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) { return (a / b); }
+
+template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return (-a); }
+template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a) { return (-a); }
+
+template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; }
+
+template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd<Packet4i>(pmul<Packet4i>(a, b), c); }
+template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return vec_madd(a, b, c); }
+
+template<> EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(const int& a) { return padd<Packet4i>(pset1<Packet4i>(a), p4i_COUNTDOWN); }
+template<> EIGEN_STRONG_INLINE Packet2d plset<Packet2d>(const double& a) { return padd<Packet2d>(pset1<Packet2d>(a), p2d_COUNTDOWN); }
+
+template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_min(a, b); }
+template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_min(a, b); }
+
+template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_max(a, b); }
+template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_max(a, b); }
+
+template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_and(a, b); }
+template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_and(a, b); }
+
+template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_or(a, b); }
+template<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_or(a, b); }
+
+template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_xor(a, b); }
+template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_xor(a, b); }
+
+template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return pand<Packet4i>(a, vec_nor(b, b)); }
+template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_and(a, vec_nor(b, b)); }
+
+template<> EIGEN_STRONG_INLINE Packet2d pround<Packet2d>(const Packet2d& a) { return vec_round(a); }
+template<> EIGEN_STRONG_INLINE Packet2d pceil<Packet2d>(const Packet2d& a) { return vec_ceil(a); }
+template<> EIGEN_STRONG_INLINE Packet2d pfloor<Packet2d>(const Packet2d& a) { return vec_floor(a); }
+
+template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from) { return pload<Packet4i>(from); }
+template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from) { return pload<Packet2d>(from); }
+
+
+template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int* from)
+{
+ Packet4i p = pload<Packet4i>(from);
+ return vec_perm(p, p, p16uc_DUPLICATE32_HI);
+}
+
+template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double* from)
+{
+ Packet2d p = pload<Packet2d>(from);
+ return vec_perm(p, p, p16uc_PSET64_HI);
+}
+
+template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { pstore<int>(to, from); }
+template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) { pstore<double>(to, from); }
+
+template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { EIGEN_ZVECTOR_PREFETCH(addr); }
+template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { EIGEN_ZVECTOR_PREFETCH(addr); }
+
+template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { EIGEN_ALIGN16 int x[4]; pstore(x, a); return x[0]; }
+template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { EIGEN_ALIGN16 double x[2]; pstore(x, a); return x[0]; }
+
+template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a)
+{
+ return reinterpret_cast<Packet4i>(vec_perm(reinterpret_cast<Packet16uc>(a), reinterpret_cast<Packet16uc>(a), p16uc_REVERSE32));
+}
+
+template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a)
+{
+ return reinterpret_cast<Packet2d>(vec_perm(reinterpret_cast<Packet16uc>(a), reinterpret_cast<Packet16uc>(a), p16uc_REVERSE64));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4i pabs<Packet4i>(const Packet4i& a) { return vec_abs(a); }
+template<> EIGEN_STRONG_INLINE Packet2d pabs<Packet2d>(const Packet2d& a) { return vec_abs(a); }
+
+template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
+{
+ Packet4i b, sum;
+ b = vec_sld(a, a, 8);
+ sum = padd<Packet4i>(a, b);
+ b = vec_sld(sum, sum, 4);
+ sum = padd<Packet4i>(sum, b);
+ return pfirst(sum);
+}
+
+template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a)
+{
+ Packet2d b, sum;
+ b = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(a), reinterpret_cast<Packet4i>(a), 8));
+ sum = padd<Packet2d>(a, b);
+ return pfirst(sum);
+}
+
+// Other reduction functions:
+// mul
+template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
+{
+ EIGEN_ALIGN16 int aux[4];
+ pstore(aux, a);
+ return aux[0] * aux[1] * aux[2] * aux[3];
+}
+
+template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a)
+{
+ return pfirst(pmul(a, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(a), reinterpret_cast<Packet4i>(a), 8))));
+}
+
+// min
+template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
+{
+ Packet4i b, res;
+ b = pmin<Packet4i>(a, vec_sld(a, a, 8));
+ res = pmin<Packet4i>(b, vec_sld(b, b, 4));
+ return pfirst(res);
+}
+
+template<> EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a)
+{
+ return pfirst(pmin<Packet2d>(a, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(a), reinterpret_cast<Packet4i>(a), 8))));
+}
+
+// max
+template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
+{
+ Packet4i b, res;
+ b = pmax<Packet4i>(a, vec_sld(a, a, 8));
+ res = pmax<Packet4i>(b, vec_sld(b, b, 4));
+ return pfirst(res);
+}
+
+// max
+template<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a)
+{
+ return pfirst(pmax<Packet2d>(a, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(a), reinterpret_cast<Packet4i>(a), 8))));
+}
+
+EIGEN_DEVICE_FUNC inline void
+ptranspose(PacketBlock<Packet4i,4>& kernel) {
+ Packet4i t0 = vec_mergeh(kernel.packet[0], kernel.packet[2]);
+ Packet4i t1 = vec_mergel(kernel.packet[0], kernel.packet[2]);
+ Packet4i t2 = vec_mergeh(kernel.packet[1], kernel.packet[3]);
+ Packet4i t3 = vec_mergel(kernel.packet[1], kernel.packet[3]);
+ kernel.packet[0] = vec_mergeh(t0, t2);
+ kernel.packet[1] = vec_mergel(t0, t2);
+ kernel.packet[2] = vec_mergeh(t1, t3);
+ kernel.packet[3] = vec_mergel(t1, t3);
+}
+
+EIGEN_DEVICE_FUNC inline void
+ptranspose(PacketBlock<Packet2d,2>& kernel) {
+ Packet2d t0 = vec_perm(kernel.packet[0], kernel.packet[1], p16uc_TRANSPOSE64_HI);
+ Packet2d t1 = vec_perm(kernel.packet[0], kernel.packet[1], p16uc_TRANSPOSE64_LO);
+ kernel.packet[0] = t0;
+ kernel.packet[1] = t1;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4i pblend(const Selector<4>& ifPacket, const Packet4i& thenPacket, const Packet4i& elsePacket) {
+ Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3] };
+ Packet4ui mask = vec_cmpeq(select, reinterpret_cast<Packet4ui>(p4i_ONE));
+ return vec_sel(elsePacket, thenPacket, mask);
+}
+
+
+template<> EIGEN_STRONG_INLINE Packet2d pblend(const Selector<2>& ifPacket, const Packet2d& thenPacket, const Packet2d& elsePacket) {
+ Packet2ul select = { ifPacket.select[0], ifPacket.select[1] };
+ Packet2ul mask = vec_cmpeq(select, reinterpret_cast<Packet2ul>(p2l_ONE));
+ return vec_sel(elsePacket, thenPacket, mask);
+}
+
+/* z13 has no vector float support so we emulate that with double
+ z14 has proper vector float support.
+*/
+#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ < 12)
+/* Helper function to simulate a vec_splat_packet4f
+ */
+template<int element> EIGEN_STRONG_INLINE Packet4f vec_splat_packet4f(const Packet4f& from)
+{
+ Packet4f splat;
+ switch (element) {
+ case 0:
+ splat.v4f[0] = vec_splat(from.v4f[0], 0);
+ splat.v4f[1] = splat.v4f[0];
+ break;
+ case 1:
+ splat.v4f[0] = vec_splat(from.v4f[0], 1);
+ splat.v4f[1] = splat.v4f[0];
+ break;
+ case 2:
+ splat.v4f[0] = vec_splat(from.v4f[1], 0);
+ splat.v4f[1] = splat.v4f[0];
+ break;
+ case 3:
+ splat.v4f[0] = vec_splat(from.v4f[1], 1);
+ splat.v4f[1] = splat.v4f[0];
+ break;
+ }
+ return splat;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from)
+{
+ // FIXME: No intrinsic yet
+ EIGEN_DEBUG_ALIGNED_LOAD
+ Packet4f vfrom;
+ vfrom.v4f[0] = vec_ld2f(&from[0]);
+ vfrom.v4f[1] = vec_ld2f(&from[2]);
+ return vfrom;
+}
+
+template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from)
+{
+ // FIXME: No intrinsic yet
+ EIGEN_DEBUG_ALIGNED_STORE
+ vec_st2f(from.v4f[0], &to[0]);
+ vec_st2f(from.v4f[1], &to[2]);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from)
+{
+ Packet4f to;
+ to.v4f[0] = pset1<Packet2d>(static_cast<const double&>(from));
+ to.v4f[1] = to.v4f[0];
+ return to;
+}
+
+template<> EIGEN_STRONG_INLINE void
+pbroadcast4<Packet4f>(const float *a,
+ Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3)
+{
+ a3 = pload<Packet4f>(a);
+ a0 = vec_splat_packet4f<0>(a3);
+ a1 = vec_splat_packet4f<1>(a3);
+ a2 = vec_splat_packet4f<2>(a3);
+ a3 = vec_splat_packet4f<3>(a3);
+}
+
+template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)
+{
+ EIGEN_ALIGN16 float ai[4];
+ ai[0] = from[0*stride];
+ ai[1] = from[1*stride];
+ ai[2] = from[2*stride];
+ ai[3] = from[3*stride];
+ return pload<Packet4f>(ai);
+}
+
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)
{
- float EIGEN_ALIGN16 ai[4];
+ EIGEN_ALIGN16 float ai[4];
pstore<float>((float *)ai, from);
to[0*stride] = ai[0];
to[1*stride] = ai[1];
@@ -487,15 +657,6 @@ template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, co
to[3*stride] = ai[3];
}
-template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, Index stride)
-{
- double EIGEN_ALIGN16 af[2];
- pstore<double>(af, from);
- to[0*stride] = af[0];
- to[1*stride] = af[1];
-}
-
-template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return (a + b); }
template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b)
{
Packet4f c;
@@ -503,9 +664,7 @@ template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const
c.v4f[1] = a.v4f[1] + b.v4f[1];
return c;
}
-template<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return (a + b); }
-template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return (a - b); }
template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b)
{
Packet4f c;
@@ -513,9 +672,7 @@ template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const
c.v4f[1] = a.v4f[1] - b.v4f[1];
return c;
}
-template<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return (a - b); }
-template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b) { return (a * b); }
template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b)
{
Packet4f c;
@@ -523,9 +680,7 @@ template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const
c.v4f[1] = a.v4f[1] * b.v4f[1];
return c;
}
-template<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) { return (a * b); }
-template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& a, const Packet4i& b) { return (a / b); }
template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b)
{
Packet4f c;
@@ -533,9 +688,7 @@ template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const
c.v4f[1] = a.v4f[1] / b.v4f[1];
return c;
}
-template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) { return (a / b); }
-template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return (-a); }
template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a)
{
Packet4f c;
@@ -543,13 +696,7 @@ template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a)
c.v4f[1] = -a.v4f[1];
return c;
}
-template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a) { return (-a); }
-
-template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }
-template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }
-template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; }
-template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd<Packet4i>(pmul<Packet4i>(a, b), c); }
template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c)
{
Packet4f res;
@@ -557,14 +704,7 @@ template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f&
res.v4f[1] = vec_madd(a.v4f[1], b.v4f[1], c.v4f[1]);
return res;
}
-template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return vec_madd(a, b, c); }
-
-template<> EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(const int& a) { return padd<Packet4i>(pset1<Packet4i>(a), p4i_COUNTDOWN); }
-template<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(const float& a) { return padd<Packet4f>(pset1<Packet4f>(a), p4f_COUNTDOWN); }
-template<> EIGEN_STRONG_INLINE Packet2d plset<Packet2d>(const double& a) { return padd<Packet2d>(pset1<Packet2d>(a), p2d_COUNTDOWN); }
-template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_min(a, b); }
-template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_min(a, b); }
template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b)
{
Packet4f res;
@@ -573,8 +713,6 @@ template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const
return res;
}
-template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_max(a, b); }
-template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_max(a, b); }
template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b)
{
Packet4f res;
@@ -583,8 +721,6 @@ template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const
return res;
}
-template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_and(a, b); }
-template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_and(a, b); }
template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b)
{
Packet4f res;
@@ -593,28 +729,22 @@ template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const
return res;
}
-template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_or(a, b); }
-template<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_or(a, b); }
template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b)
{
Packet4f res;
- res.v4f[0] = pand(a.v4f[0], b.v4f[0]);
- res.v4f[1] = pand(a.v4f[1], b.v4f[1]);
+ res.v4f[0] = por(a.v4f[0], b.v4f[0]);
+ res.v4f[1] = por(a.v4f[1], b.v4f[1]);
return res;
}
-template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_xor(a, b); }
-template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_xor(a, b); }
template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b)
{
Packet4f res;
- res.v4f[0] = pand(a.v4f[0], b.v4f[0]);
- res.v4f[1] = pand(a.v4f[1], b.v4f[1]);
+ res.v4f[0] = pxor(a.v4f[0], b.v4f[0]);
+ res.v4f[1] = pxor(a.v4f[1], b.v4f[1]);
return res;
}
-template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return pand<Packet4i>(a, vec_nor(b, b)); }
-template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_and(a, vec_nor(b, b)); }
template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b)
{
Packet4f res;
@@ -630,7 +760,7 @@ template<> EIGEN_STRONG_INLINE Packet4f pround<Packet4f>(const Packet4f& a)
res.v4f[1] = vec_round(a.v4f[1]);
return res;
}
-template<> EIGEN_STRONG_INLINE Packet2d pround<Packet2d>(const Packet2d& a) { return vec_round(a); }
+
template<> EIGEN_STRONG_INLINE Packet4f pceil<Packet4f>(const Packet4f& a)
{
Packet4f res;
@@ -638,7 +768,7 @@ template<> EIGEN_STRONG_INLINE Packet4f pceil<Packet4f>(const Packet4f& a)
res.v4f[1] = vec_ceil(a.v4f[1]);
return res;
}
-template<> EIGEN_STRONG_INLINE Packet2d pceil<Packet2d>(const Packet2d& a) { return vec_ceil(a); }
+
template<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a)
{
Packet4f res;
@@ -646,18 +776,6 @@ template<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a)
res.v4f[1] = vec_floor(a.v4f[1]);
return res;
}
-template<> EIGEN_STRONG_INLINE Packet2d pfloor<Packet2d>(const Packet2d& a) { return vec_floor(a); }
-
-template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from) { return pload<Packet4i>(from); }
-template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from) { return pload<Packet4f>(from); }
-template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from) { return pload<Packet2d>(from); }
-
-
-template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int* from)
-{
- Packet4i p = pload<Packet4i>(from);
- return vec_perm(p, p, p16uc_DUPLICATE32_HI);
-}
template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
{
@@ -667,33 +785,7 @@ template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
return p;
}
-template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double* from)
-{
- Packet2d p = pload<Packet2d>(from);
- return vec_perm(p, p, p16uc_PSET64_HI);
-}
-
-template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { pstore<int>(to, from); }
-template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { pstore<float>(to, from); }
-template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) { pstore<double>(to, from); }
-
-template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { EIGEN_ZVECTOR_PREFETCH(addr); }
-template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { EIGEN_ZVECTOR_PREFETCH(addr); }
-template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { EIGEN_ZVECTOR_PREFETCH(addr); }
-
-template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { int EIGEN_ALIGN16 x[4]; pstore(x, a); return x[0]; }
-template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { float EIGEN_ALIGN16 x[2]; vec_st2f(a.v4f[0], &x[0]); return x[0]; }
-template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { double EIGEN_ALIGN16 x[2]; pstore(x, a); return x[0]; }
-
-template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a)
-{
- return reinterpret_cast<Packet4i>(vec_perm(reinterpret_cast<Packet16uc>(a), reinterpret_cast<Packet16uc>(a), p16uc_REVERSE32));
-}
-
-template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a)
-{
- return reinterpret_cast<Packet2d>(vec_perm(reinterpret_cast<Packet16uc>(a), reinterpret_cast<Packet16uc>(a), p16uc_REVERSE64));
-}
+template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { EIGEN_ALIGN16 float x[2]; vec_st2f(a.v4f[0], &x[0]); return x[0]; }
template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)
{
@@ -703,8 +795,6 @@ template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)
return rev;
}
-template<> EIGEN_STRONG_INLINE Packet4i pabs<Packet4i>(const Packet4i& a) { return vec_abs(a); }
-template<> EIGEN_STRONG_INLINE Packet2d pabs<Packet2d>(const Packet2d& a) { return vec_abs(a); }
template<> EIGEN_STRONG_INLINE Packet4f pabs<Packet4f>(const Packet4f& a)
{
Packet4f res;
@@ -713,23 +803,6 @@ template<> EIGEN_STRONG_INLINE Packet4f pabs<Packet4f>(const Packet4f& a)
return res;
}
-template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
-{
- Packet4i b, sum;
- b = vec_sld(a, a, 8);
- sum = padd<Packet4i>(a, b);
- b = vec_sld(sum, sum, 4);
- sum = padd<Packet4i>(sum, b);
- return pfirst(sum);
-}
-
-template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a)
-{
- Packet2d b, sum;
- b = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(a), reinterpret_cast<Packet4i>(a), 8));
- sum = padd<Packet2d>(a, b);
- return pfirst(sum);
-}
template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
{
Packet2d sum;
@@ -738,94 +811,12 @@ template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
return static_cast<float>(first);
}
-template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
-{
- Packet4i v[4], sum[4];
-
- // It's easier and faster to transpose then add as columns
- // Check: http://www.freevec.org/function/matrix_4x4_transpose_floats for explanation
- // Do the transpose, first set of moves
- v[0] = vec_mergeh(vecs[0], vecs[2]);
- v[1] = vec_mergel(vecs[0], vecs[2]);
- v[2] = vec_mergeh(vecs[1], vecs[3]);
- v[3] = vec_mergel(vecs[1], vecs[3]);
- // Get the resulting vectors
- sum[0] = vec_mergeh(v[0], v[2]);
- sum[1] = vec_mergel(v[0], v[2]);
- sum[2] = vec_mergeh(v[1], v[3]);
- sum[3] = vec_mergel(v[1], v[3]);
-
- // Now do the summation:
- // Lines 0+1
- sum[0] = padd<Packet4i>(sum[0], sum[1]);
- // Lines 2+3
- sum[1] = padd<Packet4i>(sum[2], sum[3]);
- // Add the results
- sum[0] = padd<Packet4i>(sum[0], sum[1]);
-
- return sum[0];
-}
-
-template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
-{
- Packet2d v[2], sum;
- v[0] = padd<Packet2d>(vecs[0], reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4ui>(vecs[0]), reinterpret_cast<Packet4ui>(vecs[0]), 8)));
- v[1] = padd<Packet2d>(vecs[1], reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4ui>(vecs[1]), reinterpret_cast<Packet4ui>(vecs[1]), 8)));
-
- sum = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4ui>(v[0]), reinterpret_cast<Packet4ui>(v[1]), 8));
-
- return sum;
-}
-
-template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
-{
- PacketBlock<Packet4f,4> transpose;
- transpose.packet[0] = vecs[0];
- transpose.packet[1] = vecs[1];
- transpose.packet[2] = vecs[2];
- transpose.packet[3] = vecs[3];
- ptranspose(transpose);
-
- Packet4f sum = padd(transpose.packet[0], transpose.packet[1]);
- sum = padd(sum, transpose.packet[2]);
- sum = padd(sum, transpose.packet[3]);
- return sum;
-}
-
-// Other reduction functions:
-// mul
-template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
-{
- EIGEN_ALIGN16 int aux[4];
- pstore(aux, a);
- return aux[0] * aux[1] * aux[2] * aux[3];
-}
-
-template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a)
-{
- return pfirst(pmul(a, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(a), reinterpret_cast<Packet4i>(a), 8))));
-}
-
template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
{
// Return predux_mul<Packet2d> of the subvectors product
return static_cast<float>(pfirst(predux_mul(pmul(a.v4f[0], a.v4f[1]))));
}
-// min
-template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
-{
- Packet4i b, res;
- b = pmin<Packet4i>(a, vec_sld(a, a, 8));
- res = pmin<Packet4i>(b, vec_sld(b, b, 4));
- return pfirst(res);
-}
-
-template<> EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a)
-{
- return pfirst(pmin<Packet2d>(a, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(a), reinterpret_cast<Packet4i>(a), 8))));
-}
-
template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
{
Packet2d b, res;
@@ -834,21 +825,6 @@ template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
return static_cast<float>(pfirst(res));
}
-// max
-template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
-{
- Packet4i b, res;
- b = pmax<Packet4i>(a, vec_sld(a, a, 8));
- res = pmax<Packet4i>(b, vec_sld(b, b, 4));
- return pfirst(res);
-}
-
-// max
-template<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a)
-{
- return pfirst(pmax<Packet2d>(a, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(a), reinterpret_cast<Packet4i>(a), 8))));
-}
-
template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
{
Packet2d b, res;
@@ -857,26 +833,6 @@ template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
return static_cast<float>(pfirst(res));
}
-EIGEN_DEVICE_FUNC inline void
-ptranspose(PacketBlock<Packet4i,4>& kernel) {
- Packet4i t0 = vec_mergeh(kernel.packet[0], kernel.packet[2]);
- Packet4i t1 = vec_mergel(kernel.packet[0], kernel.packet[2]);
- Packet4i t2 = vec_mergeh(kernel.packet[1], kernel.packet[3]);
- Packet4i t3 = vec_mergel(kernel.packet[1], kernel.packet[3]);
- kernel.packet[0] = vec_mergeh(t0, t2);
- kernel.packet[1] = vec_mergel(t0, t2);
- kernel.packet[2] = vec_mergeh(t1, t3);
- kernel.packet[3] = vec_mergel(t1, t3);
-}
-
-EIGEN_DEVICE_FUNC inline void
-ptranspose(PacketBlock<Packet2d,2>& kernel) {
- Packet2d t0 = vec_perm(kernel.packet[0], kernel.packet[1], p16uc_TRANSPOSE64_HI);
- Packet2d t1 = vec_perm(kernel.packet[0], kernel.packet[1], p16uc_TRANSPOSE64_LO);
- kernel.packet[0] = t0;
- kernel.packet[1] = t1;
-}
-
/* Split the Packet4f PacketBlock into 4 Packet2d PacketBlocks and transpose each one
*/
EIGEN_DEVICE_FUNC inline void
@@ -915,12 +871,6 @@ ptranspose(PacketBlock<Packet4f,4>& kernel) {
kernel.packet[3].v4f[1] = t3.packet[1];
}
-template<> EIGEN_STRONG_INLINE Packet4i pblend(const Selector<4>& ifPacket, const Packet4i& thenPacket, const Packet4i& elsePacket) {
- Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3] };
- Packet4ui mask = vec_cmpeq(select, reinterpret_cast<Packet4ui>(p4i_ONE));
- return vec_sel(elsePacket, thenPacket, mask);
-}
-
template<> EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, const Packet4f& thenPacket, const Packet4f& elsePacket) {
Packet2ul select_hi = { ifPacket.select[0], ifPacket.select[1] };
Packet2ul select_lo = { ifPacket.select[2], ifPacket.select[3] };
@@ -932,12 +882,177 @@ template<> EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, cons
return result;
}
-template<> EIGEN_STRONG_INLINE Packet2d pblend(const Selector<2>& ifPacket, const Packet2d& thenPacket, const Packet2d& elsePacket) {
- Packet2ul select = { ifPacket.select[0], ifPacket.select[1] };
- Packet2ul mask = vec_cmpeq(select, reinterpret_cast<Packet2ul>(p2l_ONE));
+template<> Packet4f EIGEN_STRONG_INLINE pcmp_le<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+ Packet4f res;
+ res.v4f[0] = pcmp_le(a.v4f[0], b.v4f[0]);
+ res.v4f[1] = pcmp_le(a.v4f[1], b.v4f[1]);
+ return res;
+}
+
+template<> Packet4f EIGEN_STRONG_INLINE pcmp_lt<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+ Packet4f res;
+ res.v4f[0] = pcmp_lt(a.v4f[0], b.v4f[0]);
+ res.v4f[1] = pcmp_lt(a.v4f[1], b.v4f[1]);
+ return res;
+}
+
+template<> Packet4f EIGEN_STRONG_INLINE pcmp_eq<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+ Packet4f res;
+ res.v4f[0] = pcmp_eq(a.v4f[0], b.v4f[0]);
+ res.v4f[1] = pcmp_eq(a.v4f[1], b.v4f[1]);
+ return res;
+}
+
+#else
+template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from)
+{
+ // FIXME: No intrinsic yet
+ EIGEN_DEBUG_ALIGNED_LOAD
+ Packet *vfrom;
+ vfrom = (Packet *) from;
+ return vfrom->v4f;
+}
+
+template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from)
+{
+ // FIXME: No intrinsic yet
+ EIGEN_DEBUG_ALIGNED_STORE
+ Packet *vto;
+ vto = (Packet *) to;
+ vto->v4f = from;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from)
+{
+ return vec_splats(from);
+}
+
+template<> EIGEN_STRONG_INLINE void
+pbroadcast4<Packet4f>(const float *a,
+ Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3)
+{
+ a3 = pload<Packet4f>(a);
+ a0 = vec_splat(a3, 0);
+ a1 = vec_splat(a3, 1);
+ a2 = vec_splat(a3, 2);
+ a3 = vec_splat(a3, 3);
+}
+
+template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)
+{
+ EIGEN_ALIGN16 float af[4];
+ af[0] = from[0*stride];
+ af[1] = from[1*stride];
+ af[2] = from[2*stride];
+ af[3] = from[3*stride];
+ return pload<Packet4f>(af);
+}
+
+template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)
+{
+ EIGEN_ALIGN16 float af[4];
+ pstore<float>((float*)af, from);
+ to[0*stride] = af[0];
+ to[1*stride] = af[1];
+ to[2*stride] = af[2];
+ to[3*stride] = af[3];
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return (a + b); }
+template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return (a - b); }
+template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return (a * b); }
+template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b) { return (a / b); }
+template<> EIGEN_STRONG_INLINE Packet4f pnegate<Packet4f>(const Packet4f& a) { return (-a); }
+template<> EIGEN_STRONG_INLINE Packet4f pconj<Packet4f> (const Packet4f& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet4f pmadd<Packet4f> (const Packet4f& a, const Packet4f& b, const Packet4f& c) { return vec_madd(a, b, c); }
+template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f> (const Packet4f& a, const Packet4f& b) { return vec_min(a, b); }
+template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f> (const Packet4f& a, const Packet4f& b) { return vec_max(a, b); }
+template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f> (const Packet4f& a, const Packet4f& b) { return vec_and(a, b); }
+template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f> (const Packet4f& a, const Packet4f& b) { return vec_or(a, b); }
+template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f> (const Packet4f& a, const Packet4f& b) { return vec_xor(a, b); }
+template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_and(a, vec_nor(b, b)); }
+template<> EIGEN_STRONG_INLINE Packet4f pround<Packet4f> (const Packet4f& a) { return vec_round(a); }
+template<> EIGEN_STRONG_INLINE Packet4f pceil<Packet4f> (const Packet4f& a) { return vec_ceil(a); }
+template<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f> (const Packet4f& a) { return vec_floor(a); }
+template<> EIGEN_STRONG_INLINE Packet4f pabs<Packet4f> (const Packet4f& a) { return vec_abs(a); }
+template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { EIGEN_ALIGN16 float x[4]; pstore(x, a); return x[0]; }
+
+template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
+{
+ Packet4f p = pload<Packet4f>(from);
+ return vec_perm(p, p, p16uc_DUPLICATE32_HI);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)
+{
+ return reinterpret_cast<Packet4f>(vec_perm(reinterpret_cast<Packet16uc>(a), reinterpret_cast<Packet16uc>(a), p16uc_REVERSE32));
+}
+
+template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
+{
+ Packet4f b, sum;
+ b = vec_sld(a, a, 8);
+ sum = padd<Packet4f>(a, b);
+ b = vec_sld(sum, sum, 4);
+ sum = padd<Packet4f>(sum, b);
+ return pfirst(sum);
+}
+
+// Other reduction functions:
+// mul
+template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
+{
+ Packet4f prod;
+ prod = pmul(a, vec_sld(a, a, 8));
+ return pfirst(pmul(prod, vec_sld(prod, prod, 4)));
+}
+
+// min
+template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
+{
+ Packet4f b, res;
+ b = pmin<Packet4f>(a, vec_sld(a, a, 8));
+ res = pmin<Packet4f>(b, vec_sld(b, b, 4));
+ return pfirst(res);
+}
+
+// max
+template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
+{
+ Packet4f b, res;
+ b = pmax<Packet4f>(a, vec_sld(a, a, 8));
+ res = pmax<Packet4f>(b, vec_sld(b, b, 4));
+ return pfirst(res);
+}
+
+EIGEN_DEVICE_FUNC inline void
+ptranspose(PacketBlock<Packet4f,4>& kernel) {
+ Packet4f t0 = vec_mergeh(kernel.packet[0], kernel.packet[2]);
+ Packet4f t1 = vec_mergel(kernel.packet[0], kernel.packet[2]);
+ Packet4f t2 = vec_mergeh(kernel.packet[1], kernel.packet[3]);
+ Packet4f t3 = vec_mergel(kernel.packet[1], kernel.packet[3]);
+ kernel.packet[0] = vec_mergeh(t0, t2);
+ kernel.packet[1] = vec_mergel(t0, t2);
+ kernel.packet[2] = vec_mergeh(t1, t3);
+ kernel.packet[3] = vec_mergel(t1, t3);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, const Packet4f& thenPacket, const Packet4f& elsePacket) {
+ Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3] };
+ Packet4ui mask = vec_cmpeq(select, reinterpret_cast<Packet4ui>(p4i_ONE));
return vec_sel(elsePacket, thenPacket, mask);
}
+#endif
+
+template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { EIGEN_ZVECTOR_PREFETCH(addr); }
+template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f> (const float* from) { return pload<Packet4f>(from); }
+template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { pstore<float>(to, from); }
+template<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f> (const float& a) { return padd<Packet4f>(pset1<Packet4f>(a), p4f_COUNTDOWN); }
+
} // end namespace internal
} // end namespace Eigen
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/functors/AssignmentFunctors.h b/examples/ThirdPartyLibs/Eigen/src/Core/functors/AssignmentFunctors.h
index 1077d8eb0..bf64ef4ed 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/functors/AssignmentFunctors.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/functors/AssignmentFunctors.h
@@ -144,7 +144,7 @@ template<typename Scalar> struct swap_assign_op {
EIGEN_EMPTY_STRUCT_CTOR(swap_assign_op)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(Scalar& a, const Scalar& b) const
{
-#ifdef EIGEN_CUDACC
+#ifdef EIGEN_GPUCC
// FIXME is there some kind of cuda::swap?
Scalar t=b; const_cast<Scalar&>(b)=a; a=t;
#else
@@ -157,7 +157,16 @@ template<typename Scalar>
struct functor_traits<swap_assign_op<Scalar> > {
enum {
Cost = 3 * NumTraits<Scalar>::ReadCost,
- PacketAccess = packet_traits<Scalar>::Vectorizable
+ PacketAccess =
+ #if defined(EIGEN_VECTORIZE_AVX) && EIGEN_COMP_CLANG && (EIGEN_COMP_CLANG<800 || defined(__apple_build_version__))
+ // This is a partial workaround for a bug in clang generating bad code
+ // when mixing 256/512 bits loads and 128 bits moves.
+ // See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1684
+ // https://bugs.llvm.org/show_bug.cgi?id=40815
+ 0
+ #else
+ packet_traits<Scalar>::Vectorizable
+ #endif
};
};
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/functors/BinaryFunctors.h b/examples/ThirdPartyLibs/Eigen/src/Core/functors/BinaryFunctors.h
index 96747bac7..63f09ab93 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/functors/BinaryFunctors.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/functors/BinaryFunctors.h
@@ -39,32 +39,26 @@ struct scalar_sum_op : binary_op_base<LhsScalar,RhsScalar>
EIGEN_SCALAR_BINARY_OP_PLUGIN
}
#endif
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a + b; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a + b; }
template<typename Packet>
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& a, const Packet& b) const
{ return internal::padd(a,b); }
template<typename Packet>
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type predux(const Packet& a) const
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type predux(const Packet& a) const
{ return internal::predux(a); }
};
template<typename LhsScalar,typename RhsScalar>
struct functor_traits<scalar_sum_op<LhsScalar,RhsScalar> > {
enum {
- Cost = (NumTraits<LhsScalar>::AddCost+NumTraits<RhsScalar>::AddCost)/2, // rough estimate!
+ Cost = (int(NumTraits<LhsScalar>::AddCost) + int(NumTraits<RhsScalar>::AddCost)) / 2, // rough estimate!
PacketAccess = is_same<LhsScalar,RhsScalar>::value && packet_traits<LhsScalar>::HasAdd && packet_traits<RhsScalar>::HasAdd
// TODO vectorize mixed sum
};
};
-/** \internal
- * \brief Template specialization to deprecate the summation of boolean expressions.
- * This is required to solve Bug 426.
- * \sa DenseBase::count(), DenseBase::any(), ArrayBase::cast(), MatrixBase::cast()
- */
-template<> struct scalar_sum_op<bool,bool> : scalar_sum_op<int,int> {
- EIGEN_DEPRECATED
- scalar_sum_op() {}
-};
+
+template<>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool scalar_sum_op<bool,bool>::operator() (const bool& a, const bool& b) const { return a || b; }
/** \internal
@@ -83,23 +77,27 @@ struct scalar_product_op : binary_op_base<LhsScalar,RhsScalar>
EIGEN_SCALAR_BINARY_OP_PLUGIN
}
#endif
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a * b; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a * b; }
template<typename Packet>
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& a, const Packet& b) const
{ return internal::pmul(a,b); }
template<typename Packet>
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type predux(const Packet& a) const
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type predux(const Packet& a) const
{ return internal::predux_mul(a); }
};
template<typename LhsScalar,typename RhsScalar>
struct functor_traits<scalar_product_op<LhsScalar,RhsScalar> > {
enum {
- Cost = (NumTraits<LhsScalar>::MulCost + NumTraits<RhsScalar>::MulCost)/2, // rough estimate!
+ Cost = (int(NumTraits<LhsScalar>::MulCost) + int(NumTraits<RhsScalar>::MulCost))/2, // rough estimate!
PacketAccess = is_same<LhsScalar,RhsScalar>::value && packet_traits<LhsScalar>::HasMul && packet_traits<RhsScalar>::HasMul
// TODO vectorize mixed product
};
};
+template<>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool scalar_product_op<bool,bool>::operator() (const bool& a, const bool& b) const { return a && b; }
+
+
/** \internal
* \brief Template functor to compute the conjugate product of two scalars
*
@@ -116,11 +114,11 @@ struct scalar_conj_product_op : binary_op_base<LhsScalar,RhsScalar>
typedef typename ScalarBinaryOpTraits<LhsScalar,RhsScalar,scalar_conj_product_op>::ReturnType result_type;
EIGEN_EMPTY_STRUCT_CTOR(scalar_conj_product_op)
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const LhsScalar& a, const RhsScalar& b) const
{ return conj_helper<LhsScalar,RhsScalar,Conj,false>().pmul(a,b); }
template<typename Packet>
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& a, const Packet& b) const
{ return conj_helper<Packet,Packet,Conj,false>().pmul(a,b); }
};
template<typename LhsScalar,typename RhsScalar>
@@ -136,21 +134,28 @@ struct functor_traits<scalar_conj_product_op<LhsScalar,RhsScalar> > {
*
* \sa class CwiseBinaryOp, MatrixBase::cwiseMin, class VectorwiseOp, MatrixBase::minCoeff()
*/
-template<typename LhsScalar,typename RhsScalar>
+template<typename LhsScalar,typename RhsScalar, int NaNPropagation>
struct scalar_min_op : binary_op_base<LhsScalar,RhsScalar>
{
typedef typename ScalarBinaryOpTraits<LhsScalar,RhsScalar,scalar_min_op>::ReturnType result_type;
EIGEN_EMPTY_STRUCT_CTOR(scalar_min_op)
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return numext::mini(a, b); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const LhsScalar& a, const RhsScalar& b) const {
+ return internal::pmin<NaNPropagation>(a, b);
+ }
template<typename Packet>
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
- { return internal::pmin(a,b); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& a, const Packet& b) const
+ {
+ return internal::pmin<NaNPropagation>(a,b);
+ }
template<typename Packet>
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type predux(const Packet& a) const
- { return internal::predux_min(a); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type predux(const Packet& a) const
+ {
+ return internal::predux_min<NaNPropagation>(a);
+ }
};
-template<typename LhsScalar,typename RhsScalar>
-struct functor_traits<scalar_min_op<LhsScalar,RhsScalar> > {
+
+template<typename LhsScalar,typename RhsScalar, int NaNPropagation>
+struct functor_traits<scalar_min_op<LhsScalar,RhsScalar, NaNPropagation> > {
enum {
Cost = (NumTraits<LhsScalar>::AddCost+NumTraits<RhsScalar>::AddCost)/2,
PacketAccess = internal::is_same<LhsScalar, RhsScalar>::value && packet_traits<LhsScalar>::HasMin
@@ -162,21 +167,28 @@ struct functor_traits<scalar_min_op<LhsScalar,RhsScalar> > {
*
* \sa class CwiseBinaryOp, MatrixBase::cwiseMax, class VectorwiseOp, MatrixBase::maxCoeff()
*/
-template<typename LhsScalar,typename RhsScalar>
-struct scalar_max_op : binary_op_base<LhsScalar,RhsScalar>
+template<typename LhsScalar,typename RhsScalar, int NaNPropagation>
+struct scalar_max_op : binary_op_base<LhsScalar,RhsScalar>
{
typedef typename ScalarBinaryOpTraits<LhsScalar,RhsScalar,scalar_max_op>::ReturnType result_type;
EIGEN_EMPTY_STRUCT_CTOR(scalar_max_op)
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return numext::maxi(a, b); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const LhsScalar& a, const RhsScalar& b) const {
+ return internal::pmax<NaNPropagation>(a,b);
+ }
template<typename Packet>
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
- { return internal::pmax(a,b); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& a, const Packet& b) const
+ {
+ return internal::pmax<NaNPropagation>(a,b);
+ }
template<typename Packet>
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type predux(const Packet& a) const
- { return internal::predux_max(a); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type predux(const Packet& a) const
+ {
+ return internal::predux_max<NaNPropagation>(a);
+ }
};
-template<typename LhsScalar,typename RhsScalar>
-struct functor_traits<scalar_max_op<LhsScalar,RhsScalar> > {
+
+template<typename LhsScalar,typename RhsScalar, int NaNPropagation>
+struct functor_traits<scalar_max_op<LhsScalar,RhsScalar, NaNPropagation> > {
enum {
Cost = (NumTraits<LhsScalar>::AddCost+NumTraits<RhsScalar>::AddCost)/2,
PacketAccess = internal::is_same<LhsScalar, RhsScalar>::value && packet_traits<LhsScalar>::HasMax
@@ -253,9 +265,8 @@ struct scalar_cmp_op<LhsScalar,RhsScalar, cmp_NEQ> : binary_op_base<LhsScalar,Rh
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(const LhsScalar& a, const RhsScalar& b) const {return a!=b;}
};
-
/** \internal
- * \brief Template functor to compute the hypot of two scalars
+ * \brief Template functor to compute the hypot of two \b positive \b and \b real scalars
*
* \sa MatrixBase::stableNorm(), class Redux
*/
@@ -263,22 +274,15 @@ template<typename Scalar>
struct scalar_hypot_op<Scalar,Scalar> : binary_op_base<Scalar,Scalar>
{
EIGEN_EMPTY_STRUCT_CTOR(scalar_hypot_op)
-// typedef typename NumTraits<Scalar>::Real result_type;
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& _x, const Scalar& _y) const
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar &x, const Scalar &y) const
{
- EIGEN_USING_STD_MATH(sqrt)
- Scalar p, qp;
- if(_x>_y)
- {
- p = _x;
- qp = _y / p;
- }
- else
- {
- p = _y;
- qp = _x / p;
- }
- return p * sqrt(Scalar(1) + qp*qp);
+ // This functor is used by hypotNorm only for which it is faster to first apply abs
+ // on all coefficients prior to reduction through hypot.
+ // This way we avoid calling abs on positive and real entries, and this also permits
+ // to seamlessly handle complexes. Otherwise we would have to handle both real and complexes
+ // through the same functor...
+ return internal::positive_real_hypot(x,y);
}
};
template<typename Scalar>
@@ -294,6 +298,7 @@ struct functor_traits<scalar_hypot_op<Scalar,Scalar> > {
/** \internal
* \brief Template functor to compute the pow of two scalars
+ * See the specification of pow in https://en.cppreference.com/w/cpp/numeric/math/pow
*/
template<typename Scalar, typename Exponent>
struct scalar_pow_op : binary_op_base<Scalar,Exponent>
@@ -308,16 +313,31 @@ struct scalar_pow_op : binary_op_base<Scalar,Exponent>
EIGEN_SCALAR_BINARY_OP_PLUGIN
}
#endif
+
EIGEN_DEVICE_FUNC
inline result_type operator() (const Scalar& a, const Exponent& b) const { return numext::pow(a, b); }
+
+ template<typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
+ {
+ return generic_pow(a,b);
+ }
};
+
template<typename Scalar, typename Exponent>
struct functor_traits<scalar_pow_op<Scalar,Exponent> > {
- enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false };
+ enum {
+ Cost = 5 * NumTraits<Scalar>::MulCost,
+ PacketAccess = (!NumTraits<Scalar>::IsComplex && !NumTraits<Scalar>::IsInteger &&
+ packet_traits<Scalar>::HasExp && packet_traits<Scalar>::HasLog &&
+ packet_traits<Scalar>::HasRound && packet_traits<Scalar>::HasCmp &&
+ // Temporarly disable packet access for half/bfloat16 until
+ // accuracy is improved.
+ !is_same<Scalar, half>::value && !is_same<Scalar, bfloat16>::value
+ )
+ };
};
-
-
//---------- non associative binary functors ----------
/** \internal
@@ -344,7 +364,7 @@ struct scalar_difference_op : binary_op_base<LhsScalar,RhsScalar>
template<typename LhsScalar,typename RhsScalar>
struct functor_traits<scalar_difference_op<LhsScalar,RhsScalar> > {
enum {
- Cost = (NumTraits<LhsScalar>::AddCost+NumTraits<RhsScalar>::AddCost)/2,
+ Cost = (int(NumTraits<LhsScalar>::AddCost) + int(NumTraits<RhsScalar>::AddCost)) / 2,
PacketAccess = is_same<LhsScalar,RhsScalar>::value && packet_traits<LhsScalar>::HasSub && packet_traits<RhsScalar>::HasSub
};
};
@@ -389,11 +409,14 @@ struct functor_traits<scalar_quotient_op<LhsScalar,RhsScalar> > {
struct scalar_boolean_and_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_boolean_and_op)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator() (const bool& a, const bool& b) const { return a && b; }
+ template<typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
+ { return internal::pand(a,b); }
};
template<> struct functor_traits<scalar_boolean_and_op> {
enum {
Cost = NumTraits<bool>::AddCost,
- PacketAccess = false
+ PacketAccess = true
};
};
@@ -405,11 +428,14 @@ template<> struct functor_traits<scalar_boolean_and_op> {
struct scalar_boolean_or_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_boolean_or_op)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator() (const bool& a, const bool& b) const { return a || b; }
+ template<typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
+ { return internal::por(a,b); }
};
template<> struct functor_traits<scalar_boolean_or_op> {
enum {
Cost = NumTraits<bool>::AddCost,
- PacketAccess = false
+ PacketAccess = true
};
};
@@ -421,11 +447,44 @@ template<> struct functor_traits<scalar_boolean_or_op> {
struct scalar_boolean_xor_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_boolean_xor_op)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator() (const bool& a, const bool& b) const { return a ^ b; }
+ template<typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
+ { return internal::pxor(a,b); }
};
template<> struct functor_traits<scalar_boolean_xor_op> {
enum {
Cost = NumTraits<bool>::AddCost,
- PacketAccess = false
+ PacketAccess = true
+ };
+};
+
+/** \internal
+ * \brief Template functor to compute the absolute difference of two scalars
+ *
+ * \sa class CwiseBinaryOp, MatrixBase::absolute_difference
+ */
+template<typename LhsScalar,typename RhsScalar>
+struct scalar_absolute_difference_op : binary_op_base<LhsScalar,RhsScalar>
+{
+ typedef typename ScalarBinaryOpTraits<LhsScalar,RhsScalar,scalar_absolute_difference_op>::ReturnType result_type;
+#ifndef EIGEN_SCALAR_BINARY_OP_PLUGIN
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_absolute_difference_op)
+#else
+ scalar_absolute_difference_op() {
+ EIGEN_SCALAR_BINARY_OP_PLUGIN
+ }
+#endif
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const
+ { return numext::absdiff(a,b); }
+ template<typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
+ { return internal::pabsdiff(a,b); }
+};
+template<typename LhsScalar,typename RhsScalar>
+struct functor_traits<scalar_absolute_difference_op<LhsScalar,RhsScalar> > {
+ enum {
+ Cost = (NumTraits<LhsScalar>::AddCost+NumTraits<RhsScalar>::AddCost)/2,
+ PacketAccess = is_same<LhsScalar,RhsScalar>::value && packet_traits<LhsScalar>::HasAbsDiff
};
};
@@ -443,7 +502,7 @@ template<typename BinaryOp> struct bind1st_op : BinaryOp {
typedef typename BinaryOp::second_argument_type second_argument_type;
typedef typename BinaryOp::result_type result_type;
- bind1st_op(const first_argument_type &val) : m_value(val) {}
+ EIGEN_DEVICE_FUNC explicit bind1st_op(const first_argument_type &val) : m_value(val) {}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const second_argument_type& b) const { return BinaryOp::operator()(m_value,b); }
@@ -462,7 +521,7 @@ template<typename BinaryOp> struct bind2nd_op : BinaryOp {
typedef typename BinaryOp::second_argument_type second_argument_type;
typedef typename BinaryOp::result_type result_type;
- bind2nd_op(const second_argument_type &val) : m_value(val) {}
+ EIGEN_DEVICE_FUNC explicit bind2nd_op(const second_argument_type &val) : m_value(val) {}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const first_argument_type& a) const { return BinaryOp::operator()(a,m_value); }
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/functors/NullaryFunctors.h b/examples/ThirdPartyLibs/Eigen/src/Core/functors/NullaryFunctors.h
index b03be0269..192f225dd 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/functors/NullaryFunctors.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/functors/NullaryFunctors.h
@@ -37,26 +37,27 @@ template<typename Scalar>
struct functor_traits<scalar_identity_op<Scalar> >
{ enum { Cost = NumTraits<Scalar>::AddCost, PacketAccess = false, IsRepeatable = true }; };
-template <typename Scalar, typename Packet, bool IsInteger> struct linspaced_op_impl;
+template <typename Scalar, bool IsInteger> struct linspaced_op_impl;
-template <typename Scalar, typename Packet>
-struct linspaced_op_impl<Scalar,Packet,/*IsInteger*/false>
+template <typename Scalar>
+struct linspaced_op_impl<Scalar,/*IsInteger*/false>
{
- linspaced_op_impl(const Scalar& low, const Scalar& high, Index num_steps) :
- m_low(low), m_high(high), m_size1(num_steps==1 ? 1 : num_steps-1), m_step(num_steps==1 ? Scalar() : (high-low)/Scalar(num_steps-1)),
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ EIGEN_DEVICE_FUNC linspaced_op_impl(const Scalar& low, const Scalar& high, Index num_steps) :
+ m_low(low), m_high(high), m_size1(num_steps==1 ? 1 : num_steps-1), m_step(num_steps==1 ? Scalar() : Scalar((high-low)/RealScalar(num_steps-1))),
m_flip(numext::abs(high)<numext::abs(low))
{}
template<typename IndexType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (IndexType i) const {
- typedef typename NumTraits<Scalar>::Real RealScalar;
if(m_flip)
- return (i==0)? m_low : (m_high - RealScalar(m_size1-i)*m_step);
+ return (i==0)? m_low : Scalar(m_high - RealScalar(m_size1-i)*m_step);
else
- return (i==m_size1)? m_high : (m_low + RealScalar(i)*m_step);
+ return (i==m_size1)? m_high : Scalar(m_low + RealScalar(i)*m_step);
}
- template<typename IndexType>
+ template<typename Packet, typename IndexType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(IndexType i) const
{
// Principle:
@@ -65,17 +66,17 @@ struct linspaced_op_impl<Scalar,Packet,/*IsInteger*/false>
{
Packet pi = plset<Packet>(Scalar(i-m_size1));
Packet res = padd(pset1<Packet>(m_high), pmul(pset1<Packet>(m_step), pi));
- if(i==0)
- res = pinsertfirst(res, m_low);
- return res;
+ if (EIGEN_PREDICT_TRUE(i != 0)) return res;
+ Packet mask = pcmp_lt(pset1<Packet>(0), plset<Packet>(0));
+ return pselect<Packet>(mask, res, pset1<Packet>(m_low));
}
else
{
Packet pi = plset<Packet>(Scalar(i));
Packet res = padd(pset1<Packet>(m_low), pmul(pset1<Packet>(m_step), pi));
- if(i==m_size1-unpacket_traits<Packet>::size+1)
- res = pinsertlast(res, m_high);
- return res;
+ if(EIGEN_PREDICT_TRUE(i != m_size1-unpacket_traits<Packet>::size+1)) return res;
+ Packet mask = pcmp_lt(plset<Packet>(0), pset1<Packet>(unpacket_traits<Packet>::size-1));
+ return pselect<Packet>(mask, res, pset1<Packet>(m_high));
}
}
@@ -86,10 +87,10 @@ struct linspaced_op_impl<Scalar,Packet,/*IsInteger*/false>
const bool m_flip;
};
-template <typename Scalar, typename Packet>
-struct linspaced_op_impl<Scalar,Packet,/*IsInteger*/true>
+template <typename Scalar>
+struct linspaced_op_impl<Scalar,/*IsInteger*/true>
{
- linspaced_op_impl(const Scalar& low, const Scalar& high, Index num_steps) :
+ EIGEN_DEVICE_FUNC linspaced_op_impl(const Scalar& low, const Scalar& high, Index num_steps) :
m_low(low),
m_multiplier((high-low)/convert_index<Scalar>(num_steps<=1 ? 1 : num_steps-1)),
m_divisor(convert_index<Scalar>((high>=low?num_steps:-num_steps)+(high-low))/((numext::abs(high-low)+1)==0?1:(numext::abs(high-low)+1))),
@@ -115,8 +116,8 @@ struct linspaced_op_impl<Scalar,Packet,/*IsInteger*/true>
// Forward declaration (we default to random access which does not really give
// us a speed gain when using packet access but it allows to use the functor in
// nested expressions).
-template <typename Scalar, typename PacketType> struct linspaced_op;
-template <typename Scalar, typename PacketType> struct functor_traits< linspaced_op<Scalar,PacketType> >
+template <typename Scalar> struct linspaced_op;
+template <typename Scalar> struct functor_traits< linspaced_op<Scalar> >
{
enum
{
@@ -126,9 +127,9 @@ template <typename Scalar, typename PacketType> struct functor_traits< linspaced
IsRepeatable = true
};
};
-template <typename Scalar, typename PacketType> struct linspaced_op
+template <typename Scalar> struct linspaced_op
{
- linspaced_op(const Scalar& low, const Scalar& high, Index num_steps)
+ EIGEN_DEVICE_FUNC linspaced_op(const Scalar& low, const Scalar& high, Index num_steps)
: impl((num_steps==1 ? high : low),high,num_steps)
{}
@@ -136,11 +137,11 @@ template <typename Scalar, typename PacketType> struct linspaced_op
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (IndexType i) const { return impl(i); }
template<typename Packet,typename IndexType>
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(IndexType i) const { return impl.packetOp(i); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(IndexType i) const { return impl.template packetOp<Packet>(i); }
// This proxy object handles the actual required temporaries and the different
// implementations (integer vs. floating point).
- const linspaced_op_impl<Scalar,PacketType,NumTraits<Scalar>::IsInteger> impl;
+ const linspaced_op_impl<Scalar,NumTraits<Scalar>::IsInteger> impl;
};
// Linear access is automatically determined from the operator() prototypes available for the given functor.
@@ -166,12 +167,12 @@ struct has_unary_operator<scalar_identity_op<Scalar>,IndexType> { enum { value =
template<typename Scalar,typename IndexType>
struct has_binary_operator<scalar_identity_op<Scalar>,IndexType> { enum { value = 1}; };
-template<typename Scalar, typename PacketType,typename IndexType>
-struct has_nullary_operator<linspaced_op<Scalar,PacketType>,IndexType> { enum { value = 0}; };
-template<typename Scalar, typename PacketType,typename IndexType>
-struct has_unary_operator<linspaced_op<Scalar,PacketType>,IndexType> { enum { value = 1}; };
-template<typename Scalar, typename PacketType,typename IndexType>
-struct has_binary_operator<linspaced_op<Scalar,PacketType>,IndexType> { enum { value = 0}; };
+template<typename Scalar,typename IndexType>
+struct has_nullary_operator<linspaced_op<Scalar>,IndexType> { enum { value = 0}; };
+template<typename Scalar,typename IndexType>
+struct has_unary_operator<linspaced_op<Scalar>,IndexType> { enum { value = 1}; };
+template<typename Scalar,typename IndexType>
+struct has_binary_operator<linspaced_op<Scalar>,IndexType> { enum { value = 0}; };
template<typename Scalar,typename IndexType>
struct has_nullary_operator<scalar_random_op<Scalar>,IndexType> { enum { value = 1}; };
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/functors/StlFunctors.h b/examples/ThirdPartyLibs/Eigen/src/Core/functors/StlFunctors.h
index 6df3fa501..4570c9b63 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/functors/StlFunctors.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/functors/StlFunctors.h
@@ -12,6 +12,28 @@
namespace Eigen {
+// Portable replacements for certain functors.
+namespace numext {
+
+template<typename T = void>
+struct equal_to {
+ typedef bool result_type;
+ EIGEN_DEVICE_FUNC bool operator()(const T& lhs, const T& rhs) const {
+ return lhs == rhs;
+ }
+};
+
+template<typename T = void>
+struct not_equal_to {
+ typedef bool result_type;
+ EIGEN_DEVICE_FUNC bool operator()(const T& lhs, const T& rhs) const {
+ return lhs != rhs;
+ }
+};
+
+}
+
+
namespace internal {
// default functor traits for STL functors:
@@ -69,10 +91,18 @@ struct functor_traits<std::equal_to<T> >
{ enum { Cost = 1, PacketAccess = false }; };
template<typename T>
+struct functor_traits<numext::equal_to<T> >
+ : functor_traits<std::equal_to<T> > {};
+
+template<typename T>
struct functor_traits<std::not_equal_to<T> >
{ enum { Cost = 1, PacketAccess = false }; };
-#if (__cplusplus < 201103L) && (EIGEN_COMP_MSVC <= 1900)
+template<typename T>
+struct functor_traits<numext::not_equal_to<T> >
+ : functor_traits<std::not_equal_to<T> > {};
+
+#if (EIGEN_COMP_CXXVER < 11)
// std::binder* are deprecated since c++11 and will be removed in c++17
template<typename T>
struct functor_traits<std::binder2nd<T> >
@@ -83,13 +113,17 @@ struct functor_traits<std::binder1st<T> >
{ enum { Cost = functor_traits<T>::Cost, PacketAccess = false }; };
#endif
+#if (EIGEN_COMP_CXXVER < 17)
+// std::unary_negate is deprecated since c++17 and will be removed in c++20
template<typename T>
struct functor_traits<std::unary_negate<T> >
{ enum { Cost = 1 + functor_traits<T>::Cost, PacketAccess = false }; };
+// std::binary_negate is deprecated since c++17 and will be removed in c++20
template<typename T>
struct functor_traits<std::binary_negate<T> >
{ enum { Cost = 1 + functor_traits<T>::Cost, PacketAccess = false }; };
+#endif
#ifdef EIGEN_STDEXT_SUPPORT
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/functors/UnaryFunctors.h b/examples/ThirdPartyLibs/Eigen/src/Core/functors/UnaryFunctors.h
index bfc046556..16136d185 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/functors/UnaryFunctors.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/functors/UnaryFunctors.h
@@ -109,7 +109,7 @@ struct functor_traits<scalar_abs2_op<Scalar> >
template<typename Scalar> struct scalar_conjugate_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_conjugate_op)
EIGEN_DEVICE_FUNC
- EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { using numext::conj; return conj(a); }
+ EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return numext::conj(a); }
template<typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const { return internal::pconj(a); }
};
@@ -117,7 +117,15 @@ template<typename Scalar>
struct functor_traits<scalar_conjugate_op<Scalar> >
{
enum {
- Cost = NumTraits<Scalar>::IsComplex ? NumTraits<Scalar>::AddCost : 0,
+ Cost = 0,
+ // Yes the cost is zero even for complexes because in most cases for which
+ // the cost is used, conjugation turns to be a no-op. Some examples:
+ // cost(a*conj(b)) == cost(a*b)
+ // cost(a+conj(b)) == cost(a+b)
+ // <etc.
+ // If we don't set it to zero, then:
+ // A.conjugate().lazyProduct(B.conjugate())
+ // will bake its operands. We definitely don't want that!
PacketAccess = packet_traits<Scalar>::HasConj
};
};
@@ -130,7 +138,7 @@ struct functor_traits<scalar_conjugate_op<Scalar> >
template<typename Scalar> struct scalar_arg_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_arg_op)
typedef typename NumTraits<Scalar>::Real result_type;
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { using numext::arg; return arg(a); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return numext::arg(a); }
template<typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
{ return internal::parg(a); }
@@ -159,6 +167,44 @@ struct functor_traits<scalar_cast_op<Scalar,NewType> >
{ enum { Cost = is_same<Scalar, NewType>::value ? 0 : NumTraits<NewType>::AddCost, PacketAccess = false }; };
/** \internal
+ * \brief Template functor to arithmetically shift a scalar right by a number of bits
+ *
+ * \sa class CwiseUnaryOp, MatrixBase::shift_right()
+ */
+template<typename Scalar, int N>
+struct scalar_shift_right_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_shift_right_op)
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const
+ { return a >> N; }
+ template<typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
+ { return internal::parithmetic_shift_right<N>(a); }
+};
+template<typename Scalar, int N>
+struct functor_traits<scalar_shift_right_op<Scalar,N> >
+{ enum { Cost = NumTraits<Scalar>::AddCost, PacketAccess = packet_traits<Scalar>::HasShift }; };
+
+/** \internal
+ * \brief Template functor to logically shift a scalar left by a number of bits
+ *
+ * \sa class CwiseUnaryOp, MatrixBase::shift_left()
+ */
+template<typename Scalar, int N>
+struct scalar_shift_left_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_shift_left_op)
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const
+ { return a << N; }
+ template<typename Packet>
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
+ { return internal::plogical_shift_left<N>(a); }
+};
+template<typename Scalar, int N>
+struct functor_traits<scalar_shift_left_op<Scalar,N> >
+{ enum { Cost = NumTraits<Scalar>::AddCost, PacketAccess = packet_traits<Scalar>::HasShift }; };
+
+/** \internal
* \brief Template functor to extract the real part of a complex
*
* \sa class CwiseUnaryOp, MatrixBase::real()
@@ -341,7 +387,7 @@ struct functor_traits<scalar_log1p_op<Scalar> > {
*/
template<typename Scalar> struct scalar_log10_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_log10_op)
- EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { EIGEN_USING_STD_MATH(log10) return log10(a); }
+ EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { EIGEN_USING_STD(log10) return log10(a); }
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::plog10(a); }
};
@@ -350,6 +396,22 @@ struct functor_traits<scalar_log10_op<Scalar> >
{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasLog10 }; };
/** \internal
+ *
+ * \brief Template functor to compute the base-2 logarithm of a scalar
+ *
+ * \sa class CwiseUnaryOp, Cwise::log2()
+ */
+template<typename Scalar> struct scalar_log2_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_log2_op)
+ EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return Scalar(EIGEN_LOG2E) * numext::log(a); }
+ template <typename Packet>
+ EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::plog2(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_log2_op<Scalar> >
+{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasLog }; };
+
+/** \internal
* \brief Template functor to compute the square root of a scalar
* \sa class CwiseUnaryOp, Cwise::sqrt()
*/
@@ -376,13 +438,25 @@ struct functor_traits<scalar_sqrt_op<Scalar> > {
};
};
+// Boolean specialization to eliminate -Wimplicit-conversion-floating-point-to-bool warnings.
+template<> struct scalar_sqrt_op<bool> {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_sqrt_op)
+ EIGEN_DEPRECATED EIGEN_DEVICE_FUNC inline bool operator() (const bool& a) const { return a; }
+ template <typename Packet>
+ EIGEN_DEPRECATED EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return a; }
+};
+template <>
+struct functor_traits<scalar_sqrt_op<bool> > {
+ enum { Cost = 1, PacketAccess = packet_traits<bool>::Vectorizable };
+};
+
/** \internal
* \brief Template functor to compute the reciprocal square root of a scalar
* \sa class CwiseUnaryOp, Cwise::rsqrt()
*/
template<typename Scalar> struct scalar_rsqrt_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_rsqrt_op)
- EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return Scalar(1)/numext::sqrt(a); }
+ EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::rsqrt(a); }
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::prsqrt(a); }
};
@@ -548,6 +622,23 @@ struct functor_traits<scalar_tanh_op<Scalar> > {
};
};
+#if EIGEN_HAS_CXX11_MATH
+/** \internal
+ * \brief Template functor to compute the atanh of a scalar
+ * \sa class CwiseUnaryOp, ArrayBase::atanh()
+ */
+template <typename Scalar>
+struct scalar_atanh_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_atanh_op)
+ EIGEN_DEVICE_FUNC inline const Scalar operator()(const Scalar& a) const { return numext::atanh(a); }
+};
+
+template <typename Scalar>
+struct functor_traits<scalar_atanh_op<Scalar> > {
+ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false };
+};
+#endif
+
/** \internal
* \brief Template functor to compute the sinh of a scalar
* \sa class CwiseUnaryOp, ArrayBase::sinh()
@@ -567,6 +658,23 @@ struct functor_traits<scalar_sinh_op<Scalar> >
};
};
+#if EIGEN_HAS_CXX11_MATH
+/** \internal
+ * \brief Template functor to compute the asinh of a scalar
+ * \sa class CwiseUnaryOp, ArrayBase::asinh()
+ */
+template <typename Scalar>
+struct scalar_asinh_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_asinh_op)
+ EIGEN_DEVICE_FUNC inline const Scalar operator()(const Scalar& a) const { return numext::asinh(a); }
+};
+
+template <typename Scalar>
+struct functor_traits<scalar_asinh_op<Scalar> > {
+ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false };
+};
+#endif
+
/** \internal
* \brief Template functor to compute the cosh of a scalar
* \sa class CwiseUnaryOp, ArrayBase::cosh()
@@ -586,6 +694,23 @@ struct functor_traits<scalar_cosh_op<Scalar> >
};
};
+#if EIGEN_HAS_CXX11_MATH
+/** \internal
+ * \brief Template functor to compute the acosh of a scalar
+ * \sa class CwiseUnaryOp, ArrayBase::acosh()
+ */
+template <typename Scalar>
+struct scalar_acosh_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_acosh_op)
+ EIGEN_DEVICE_FUNC inline const Scalar operator()(const Scalar& a) const { return numext::acosh(a); }
+};
+
+template <typename Scalar>
+struct functor_traits<scalar_acosh_op<Scalar> > {
+ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false };
+};
+#endif
+
/** \internal
* \brief Template functor to compute the inverse of a scalar
* \sa class CwiseUnaryOp, Cwise::inverse()
@@ -598,9 +723,13 @@ struct scalar_inverse_op {
EIGEN_DEVICE_FUNC inline const Packet packetOp(const Packet& a) const
{ return internal::pdiv(pset1<Packet>(Scalar(1)),a); }
};
-template<typename Scalar>
-struct functor_traits<scalar_inverse_op<Scalar> >
-{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasDiv }; };
+template <typename Scalar>
+struct functor_traits<scalar_inverse_op<Scalar> > {
+ enum {
+ PacketAccess = packet_traits<Scalar>::HasDiv,
+ Cost = scalar_div_cost<Scalar, PacketAccess>::value
+ };
+};
/** \internal
* \brief Template functor to compute the square of a scalar
@@ -618,6 +747,19 @@ template<typename Scalar>
struct functor_traits<scalar_square_op<Scalar> >
{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasMul }; };
+// Boolean specialization to avoid -Wint-in-bool-context warnings on GCC.
+template<>
+struct scalar_square_op<bool> {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_square_op)
+ EIGEN_DEPRECATED EIGEN_DEVICE_FUNC inline bool operator() (const bool& a) const { return a; }
+ template<typename Packet>
+ EIGEN_DEPRECATED EIGEN_DEVICE_FUNC inline const Packet packetOp(const Packet& a) const
+ { return a; }
+};
+template<>
+struct functor_traits<scalar_square_op<bool> >
+{ enum { Cost = 0, PacketAccess = packet_traits<bool>::Vectorizable }; };
+
/** \internal
* \brief Template functor to compute the cube of a scalar
* \sa class CwiseUnaryOp, Cwise::cube()
@@ -634,6 +776,19 @@ template<typename Scalar>
struct functor_traits<scalar_cube_op<Scalar> >
{ enum { Cost = 2*NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasMul }; };
+// Boolean specialization to avoid -Wint-in-bool-context warnings on GCC.
+template<>
+struct scalar_cube_op<bool> {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_cube_op)
+ EIGEN_DEPRECATED EIGEN_DEVICE_FUNC inline bool operator() (const bool& a) const { return a; }
+ template<typename Packet>
+ EIGEN_DEPRECATED EIGEN_DEVICE_FUNC inline const Packet packetOp(const Packet& a) const
+ { return a; }
+};
+template<>
+struct functor_traits<scalar_cube_op<bool> >
+{ enum { Cost = 0, PacketAccess = packet_traits<bool>::Vectorizable }; };
+
/** \internal
* \brief Template functor to compute the rounded value of a scalar
* \sa class CwiseUnaryOp, ArrayBase::round()
@@ -673,6 +828,25 @@ struct functor_traits<scalar_floor_op<Scalar> >
};
/** \internal
+ * \brief Template functor to compute the rounded (with current rounding mode) value of a scalar
+ * \sa class CwiseUnaryOp, ArrayBase::rint()
+ */
+template<typename Scalar> struct scalar_rint_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_rint_op)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return numext::rint(a); }
+ template <typename Packet>
+ EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::print(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_rint_op<Scalar> >
+{
+ enum {
+ Cost = NumTraits<Scalar>::MulCost,
+ PacketAccess = packet_traits<Scalar>::HasRint
+ };
+};
+
+/** \internal
* \brief Template functor to compute the ceil of a scalar
* \sa class CwiseUnaryOp, ArrayBase::ceil()
*/
@@ -699,9 +873,9 @@ template<typename Scalar> struct scalar_isnan_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_isnan_op)
typedef bool result_type;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const {
-#if defined(__SYCL_DEVICE_ONLY__)
+#if defined(SYCL_DEVICE_ONLY)
return numext::isnan(a);
-#else
+#else
return (numext::isnan)(a);
#endif
}
@@ -723,7 +897,7 @@ template<typename Scalar> struct scalar_isinf_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_isinf_op)
typedef bool result_type;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const {
-#if defined(__SYCL_DEVICE_ONLY__)
+#if defined(SYCL_DEVICE_ONLY)
return numext::isinf(a);
#else
return (numext::isinf)(a);
@@ -747,7 +921,7 @@ template<typename Scalar> struct scalar_isfinite_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_isfinite_op)
typedef bool result_type;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const {
-#if defined(__SYCL_DEVICE_ONLY__)
+#if defined(SYCL_DEVICE_ONLY)
return numext::isfinite(a);
#else
return (numext::isfinite)(a);
@@ -784,9 +958,9 @@ struct functor_traits<scalar_boolean_not_op<Scalar> > {
* \brief Template functor to compute the signum of a scalar
* \sa class CwiseUnaryOp, Cwise::sign()
*/
-template<typename Scalar,bool iscpx=(NumTraits<Scalar>::IsComplex!=0) > struct scalar_sign_op;
+template<typename Scalar,bool is_complex=(NumTraits<Scalar>::IsComplex!=0), bool is_integer=(NumTraits<Scalar>::IsInteger!=0) > struct scalar_sign_op;
template<typename Scalar>
-struct scalar_sign_op<Scalar,false> {
+struct scalar_sign_op<Scalar, false, true> {
EIGEN_EMPTY_STRUCT_CTOR(scalar_sign_op)
EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const
{
@@ -796,8 +970,21 @@ struct scalar_sign_op<Scalar,false> {
//template <typename Packet>
//EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::psign(a); }
};
+
template<typename Scalar>
-struct scalar_sign_op<Scalar,true> {
+struct scalar_sign_op<Scalar, false, false> {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_sign_op)
+ EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const
+ {
+ return (numext::isnan)(a) ? a : Scalar( (a>Scalar(0)) - (a<Scalar(0)) );
+ }
+ //TODO
+ //template <typename Packet>
+ //EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::psign(a); }
+};
+
+template<typename Scalar, bool is_integer>
+struct scalar_sign_op<Scalar,true, is_integer> {
EIGEN_EMPTY_STRUCT_CTOR(scalar_sign_op)
EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const
{
@@ -806,7 +993,7 @@ struct scalar_sign_op<Scalar,true> {
if (aa==real_type(0))
return Scalar(0);
aa = real_type(1)/aa;
- return Scalar(real(a)*aa, imag(a)*aa );
+ return Scalar(a.real()*aa, a.imag()*aa );
}
//TODO
//template <typename Packet>
@@ -815,7 +1002,7 @@ struct scalar_sign_op<Scalar,true> {
template<typename Scalar>
struct functor_traits<scalar_sign_op<Scalar> >
{ enum {
- Cost =
+ Cost =
NumTraits<Scalar>::IsComplex
? ( 8*NumTraits<Scalar>::MulCost ) // roughly
: ( 3*NumTraits<Scalar>::AddCost),
@@ -823,6 +1010,120 @@ struct functor_traits<scalar_sign_op<Scalar> >
};
};
+/** \internal
+ * \brief Template functor to compute the logistic function of a scalar
+ * \sa class CwiseUnaryOp, ArrayBase::logistic()
+ */
+template <typename T>
+struct scalar_logistic_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_logistic_op)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& x) const {
+ return packetOp(x);
+ }
+
+ template <typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Packet packetOp(const Packet& x) const {
+ const Packet one = pset1<Packet>(T(1));
+ return pdiv(one, padd(one, pexp(pnegate(x))));
+ }
+};
+
+#ifndef EIGEN_GPU_COMPILE_PHASE
+/** \internal
+ * \brief Template specialization of the logistic function for float.
+ *
+ * Uses just a 9/10-degree rational interpolant which
+ * interpolates 1/(1+exp(-x)) - 0.5 up to a couple of ulps in the range
+ * [-9, 18]. Below -9 we use the more accurate approximation
+ * 1/(1+exp(-x)) ~= exp(x), and above 18 the logistic function is 1 withing
+ * one ulp. The shifted logistic is interpolated because it was easier to
+ * make the fit converge.
+ *
+ */
+template <>
+struct scalar_logistic_op<float> {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_logistic_op)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float operator()(const float& x) const {
+ return packetOp(x);
+ }
+
+ template <typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ Packet packetOp(const Packet& _x) const {
+ const Packet cutoff_lower = pset1<Packet>(-9.f);
+ const Packet lt_mask = pcmp_lt<Packet>(_x, cutoff_lower);
+ const bool any_small = predux_any(lt_mask);
+
+ // The upper cut-off is the smallest x for which the rational approximation evaluates to 1.
+ // Choosing this value saves us a few instructions clamping the results at the end.
+#ifdef EIGEN_VECTORIZE_FMA
+ const Packet cutoff_upper = pset1<Packet>(15.7243833541870117f);
+#else
+ const Packet cutoff_upper = pset1<Packet>(15.6437711715698242f);
+#endif
+ const Packet x = pmin(_x, cutoff_upper);
+
+ // The monomial coefficients of the numerator polynomial (odd).
+ const Packet alpha_1 = pset1<Packet>(2.48287947061529e-01f);
+ const Packet alpha_3 = pset1<Packet>(8.51377133304701e-03f);
+ const Packet alpha_5 = pset1<Packet>(6.08574864600143e-05f);
+ const Packet alpha_7 = pset1<Packet>(1.15627324459942e-07f);
+ const Packet alpha_9 = pset1<Packet>(4.37031012579801e-11f);
+
+ // The monomial coefficients of the denominator polynomial (even).
+ const Packet beta_0 = pset1<Packet>(9.93151921023180e-01f);
+ const Packet beta_2 = pset1<Packet>(1.16817656904453e-01f);
+ const Packet beta_4 = pset1<Packet>(1.70198817374094e-03f);
+ const Packet beta_6 = pset1<Packet>(6.29106785017040e-06f);
+ const Packet beta_8 = pset1<Packet>(5.76102136993427e-09f);
+ const Packet beta_10 = pset1<Packet>(6.10247389755681e-13f);
+
+ // Since the polynomials are odd/even, we need x^2.
+ const Packet x2 = pmul(x, x);
+
+ // Evaluate the numerator polynomial p.
+ Packet p = pmadd(x2, alpha_9, alpha_7);
+ p = pmadd(x2, p, alpha_5);
+ p = pmadd(x2, p, alpha_3);
+ p = pmadd(x2, p, alpha_1);
+ p = pmul(x, p);
+
+ // Evaluate the denominator polynomial q.
+ Packet q = pmadd(x2, beta_10, beta_8);
+ q = pmadd(x2, q, beta_6);
+ q = pmadd(x2, q, beta_4);
+ q = pmadd(x2, q, beta_2);
+ q = pmadd(x2, q, beta_0);
+ // Divide the numerator by the denominator and shift it up.
+ const Packet logistic = padd(pdiv(p, q), pset1<Packet>(0.5f));
+ if (EIGEN_PREDICT_FALSE(any_small)) {
+ const Packet exponential = pexp(_x);
+ return pselect(lt_mask, exponential, logistic);
+ } else {
+ return logistic;
+ }
+ }
+};
+#endif // #ifndef EIGEN_GPU_COMPILE_PHASE
+
+template <typename T>
+struct functor_traits<scalar_logistic_op<T> > {
+ enum {
+ // The cost estimate for float here here is for the common(?) case where
+ // all arguments are greater than -9.
+ Cost = scalar_div_cost<T, packet_traits<T>::HasDiv>::value +
+ (internal::is_same<T, float>::value
+ ? NumTraits<T>::AddCost * 15 + NumTraits<T>::MulCost * 11
+ : NumTraits<T>::AddCost * 2 +
+ functor_traits<scalar_exp_op<T> >::Cost),
+ PacketAccess =
+ packet_traits<T>::HasAdd && packet_traits<T>::HasDiv &&
+ (internal::is_same<T, float>::value
+ ? packet_traits<T>::HasMul && packet_traits<T>::HasMax &&
+ packet_traits<T>::HasMin
+ : packet_traits<T>::HasNegate && packet_traits<T>::HasExp)
+ };
+};
+
} // end namespace internal
} // end namespace Eigen
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralBlockPanelKernel.h b/examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralBlockPanelKernel.h
index 45230bce5..f35b760c1 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralBlockPanelKernel.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralBlockPanelKernel.h
@@ -15,7 +15,13 @@ namespace Eigen {
namespace internal {
-template<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs=false, bool _ConjRhs=false>
+enum GEBPPacketSizeType {
+ GEBPPacketFull = 0,
+ GEBPPacketHalf,
+ GEBPPacketQuarter
+};
+
+template<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs=false, bool _ConjRhs=false, int Arch=Architecture::Target, int _PacketSize=GEBPPacketFull>
class gebp_traits;
@@ -25,16 +31,42 @@ inline std::ptrdiff_t manage_caching_sizes_helper(std::ptrdiff_t a, std::ptrdiff
return a<=0 ? b : a;
}
+#if defined(EIGEN_DEFAULT_L1_CACHE_SIZE)
+#define EIGEN_SET_DEFAULT_L1_CACHE_SIZE(val) EIGEN_DEFAULT_L1_CACHE_SIZE
+#else
+#define EIGEN_SET_DEFAULT_L1_CACHE_SIZE(val) val
+#endif // defined(EIGEN_DEFAULT_L1_CACHE_SIZE)
+
+#if defined(EIGEN_DEFAULT_L2_CACHE_SIZE)
+#define EIGEN_SET_DEFAULT_L2_CACHE_SIZE(val) EIGEN_DEFAULT_L2_CACHE_SIZE
+#else
+#define EIGEN_SET_DEFAULT_L2_CACHE_SIZE(val) val
+#endif // defined(EIGEN_DEFAULT_L2_CACHE_SIZE)
+
+#if defined(EIGEN_DEFAULT_L3_CACHE_SIZE)
+#define EIGEN_SET_DEFAULT_L3_CACHE_SIZE(val) EIGEN_DEFAULT_L3_CACHE_SIZE
+#else
+#define EIGEN_SET_DEFAULT_L3_CACHE_SIZE(val) val
+#endif // defined(EIGEN_DEFAULT_L3_CACHE_SIZE)
+
#if EIGEN_ARCH_i386_OR_x86_64
-const std::ptrdiff_t defaultL1CacheSize = 32*1024;
-const std::ptrdiff_t defaultL2CacheSize = 256*1024;
-const std::ptrdiff_t defaultL3CacheSize = 2*1024*1024;
+const std::ptrdiff_t defaultL1CacheSize = EIGEN_SET_DEFAULT_L1_CACHE_SIZE(32*1024);
+const std::ptrdiff_t defaultL2CacheSize = EIGEN_SET_DEFAULT_L2_CACHE_SIZE(256*1024);
+const std::ptrdiff_t defaultL3CacheSize = EIGEN_SET_DEFAULT_L3_CACHE_SIZE(2*1024*1024);
+#elif EIGEN_ARCH_PPC
+const std::ptrdiff_t defaultL1CacheSize = EIGEN_SET_DEFAULT_L1_CACHE_SIZE(64*1024);
+const std::ptrdiff_t defaultL2CacheSize = EIGEN_SET_DEFAULT_L2_CACHE_SIZE(512*1024);
+const std::ptrdiff_t defaultL3CacheSize = EIGEN_SET_DEFAULT_L3_CACHE_SIZE(4*1024*1024);
#else
-const std::ptrdiff_t defaultL1CacheSize = 16*1024;
-const std::ptrdiff_t defaultL2CacheSize = 512*1024;
-const std::ptrdiff_t defaultL3CacheSize = 512*1024;
+const std::ptrdiff_t defaultL1CacheSize = EIGEN_SET_DEFAULT_L1_CACHE_SIZE(16*1024);
+const std::ptrdiff_t defaultL2CacheSize = EIGEN_SET_DEFAULT_L2_CACHE_SIZE(512*1024);
+const std::ptrdiff_t defaultL3CacheSize = EIGEN_SET_DEFAULT_L3_CACHE_SIZE(512*1024);
#endif
+#undef EIGEN_SET_DEFAULT_L1_CACHE_SIZE
+#undef EIGEN_SET_DEFAULT_L2_CACHE_SIZE
+#undef EIGEN_SET_DEFAULT_L3_CACHE_SIZE
+
/** \internal */
struct CacheSizes {
CacheSizes(): m_l1(-1),m_l2(-1),m_l3(-1) {
@@ -50,7 +82,6 @@ struct CacheSizes {
std::ptrdiff_t m_l3;
};
-
/** \internal */
inline void manage_caching_sizes(Action action, std::ptrdiff_t* l1, std::ptrdiff_t* l2, std::ptrdiff_t* l3)
{
@@ -101,6 +132,16 @@ void evaluateProductBlockingSizesHeuristic(Index& k, Index& m, Index& n, Index n
// at the register level. This small horizontal panel has to stay within L1 cache.
std::ptrdiff_t l1, l2, l3;
manage_caching_sizes(GetAction, &l1, &l2, &l3);
+ #ifdef EIGEN_VECTORIZE_AVX512
+ // We need to find a rationale for that, but without this adjustment,
+ // performance with AVX512 is pretty bad, like -20% slower.
+ // One reason is that with increasing packet-size, the blocking size k
+ // has to become pretty small if we want that 1 lhs panel fit within L1.
+ // For instance, with the 3pX4 kernel and double, the size of the lhs+rhs panels are:
+ // k*(3*64 + 4*8) Bytes, with l1=32kBytes, and k%8=0, we have k=144.
+ // This is quite small for a good reuse of the accumulation registers.
+ l1 *= 4;
+ #endif
if (num_threads > 1) {
typedef typename Traits::ResScalar ResScalar;
@@ -115,7 +156,8 @@ void evaluateProductBlockingSizesHeuristic(Index& k, Index& m, Index& n, Index n
// registers. However once the latency is hidden there is no point in
// increasing the value of k, so we'll cap it at 320 (value determined
// experimentally).
- const Index k_cache = (numext::mini<Index>)((l1-ksub)/kdiv, 320);
+ // To avoid that k vanishes, we make k_cache at least as big as kr
+ const Index k_cache = numext::maxi<Index>(kr, (numext::mini<Index>)((l1-ksub)/kdiv, 320));
if (k_cache < k) {
k = k_cache - (k_cache % kr);
eigen_internal_assert(k > 0);
@@ -307,35 +349,60 @@ inline void computeProductBlockingSizes(Index& k, Index& m, Index& n, Index num_
computeProductBlockingSizes<LhsScalar,RhsScalar,1,Index>(k, m, n, num_threads);
}
-#ifdef EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD
- #define CJMADD(CJ,A,B,C,T) C = CJ.pmadd(A,B,C);
-#else
-
- // FIXME (a bit overkill maybe ?)
-
- template<typename CJ, typename A, typename B, typename C, typename T> struct gebp_madd_selector {
- EIGEN_ALWAYS_INLINE static void run(const CJ& cj, A& a, B& b, C& c, T& /*t*/)
- {
- c = cj.pmadd(a,b,c);
- }
- };
-
- template<typename CJ, typename T> struct gebp_madd_selector<CJ,T,T,T,T> {
- EIGEN_ALWAYS_INLINE static void run(const CJ& cj, T& a, T& b, T& c, T& t)
- {
- t = b; t = cj.pmul(a,t); c = padd(c,t);
- }
- };
+template <typename RhsPacket, typename RhsPacketx4, int registers_taken>
+struct RhsPanelHelper {
+ private:
+ static const int remaining_registers = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS - registers_taken;
+ public:
+ typedef typename conditional<remaining_registers>=4, RhsPacketx4, RhsPacket>::type type;
+};
- template<typename CJ, typename A, typename B, typename C, typename T>
- EIGEN_STRONG_INLINE void gebp_madd(const CJ& cj, A& a, B& b, C& c, T& t)
- {
- gebp_madd_selector<CJ,A,B,C,T>::run(cj,a,b,c,t);
- }
+template <typename Packet>
+struct QuadPacket
+{
+ Packet B_0, B1, B2, B3;
+ const Packet& get(const FixedInt<0>&) const { return B_0; }
+ const Packet& get(const FixedInt<1>&) const { return B1; }
+ const Packet& get(const FixedInt<2>&) const { return B2; }
+ const Packet& get(const FixedInt<3>&) const { return B3; }
+};
- #define CJMADD(CJ,A,B,C,T) gebp_madd(CJ,A,B,C,T);
-// #define CJMADD(CJ,A,B,C,T) T = B; T = CJ.pmul(A,T); C = padd(C,T);
-#endif
+template <int N, typename T1, typename T2, typename T3>
+struct packet_conditional { typedef T3 type; };
+
+template <typename T1, typename T2, typename T3>
+struct packet_conditional<GEBPPacketFull, T1, T2, T3> { typedef T1 type; };
+
+template <typename T1, typename T2, typename T3>
+struct packet_conditional<GEBPPacketHalf, T1, T2, T3> { typedef T2 type; };
+
+#define PACKET_DECL_COND_PREFIX(prefix, name, packet_size) \
+ typedef typename packet_conditional<packet_size, \
+ typename packet_traits<name ## Scalar>::type, \
+ typename packet_traits<name ## Scalar>::half, \
+ typename unpacket_traits<typename packet_traits<name ## Scalar>::half>::half>::type \
+ prefix ## name ## Packet
+
+#define PACKET_DECL_COND(name, packet_size) \
+ typedef typename packet_conditional<packet_size, \
+ typename packet_traits<name ## Scalar>::type, \
+ typename packet_traits<name ## Scalar>::half, \
+ typename unpacket_traits<typename packet_traits<name ## Scalar>::half>::half>::type \
+ name ## Packet
+
+#define PACKET_DECL_COND_SCALAR_PREFIX(prefix, packet_size) \
+ typedef typename packet_conditional<packet_size, \
+ typename packet_traits<Scalar>::type, \
+ typename packet_traits<Scalar>::half, \
+ typename unpacket_traits<typename packet_traits<Scalar>::half>::half>::type \
+ prefix ## ScalarPacket
+
+#define PACKET_DECL_COND_SCALAR(packet_size) \
+ typedef typename packet_conditional<packet_size, \
+ typename packet_traits<Scalar>::type, \
+ typename packet_traits<Scalar>::half, \
+ typename unpacket_traits<typename packet_traits<Scalar>::half>::half>::type \
+ ScalarPacket
/* Vectorization logic
* real*real: unpack rhs to constant packets, ...
@@ -347,7 +414,7 @@ inline void computeProductBlockingSizes(Index& k, Index& m, Index& n, Index num_
* cplx*real : unpack rhs to constant packets, ...
* real*cplx : load lhs as (a0,a0,a1,a1), and mul as usual
*/
-template<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs, bool _ConjRhs>
+template<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs, bool _ConjRhs, int Arch, int _PacketSize>
class gebp_traits
{
public:
@@ -355,13 +422,17 @@ public:
typedef _RhsScalar RhsScalar;
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
+ PACKET_DECL_COND_PREFIX(_, Lhs, _PacketSize);
+ PACKET_DECL_COND_PREFIX(_, Rhs, _PacketSize);
+ PACKET_DECL_COND_PREFIX(_, Res, _PacketSize);
+
enum {
ConjLhs = _ConjLhs,
ConjRhs = _ConjRhs,
- Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
- LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
- RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
- ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
+ Vectorizable = unpacket_traits<_LhsPacket>::vectorizable && unpacket_traits<_RhsPacket>::vectorizable,
+ LhsPacketSize = Vectorizable ? unpacket_traits<_LhsPacket>::size : 1,
+ RhsPacketSize = Vectorizable ? unpacket_traits<_RhsPacket>::size : 1,
+ ResPacketSize = Vectorizable ? unpacket_traits<_ResPacket>::size : 1,
NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
@@ -370,10 +441,12 @@ public:
// register block size along the M direction (currently, this one cannot be modified)
default_mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*LhsPacketSize,
-#if defined(EIGEN_HAS_SINGLE_INSTRUCTION_MADD) && !defined(EIGEN_VECTORIZE_ALTIVEC) && !defined(EIGEN_VECTORIZE_VSX)
- // we assume 16 registers
+#if defined(EIGEN_HAS_SINGLE_INSTRUCTION_MADD) && !defined(EIGEN_VECTORIZE_ALTIVEC) && !defined(EIGEN_VECTORIZE_VSX) \
+ && ((!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1914))
+ // we assume 16 registers or more
// See bug 992, if the scalar type is not vectorizable but that EIGEN_HAS_SINGLE_INSTRUCTION_MADD is defined,
// then using 3*LhsPacketSize triggers non-implemented paths in syrk.
+ // Bug 1515: MSVC prior to v19.14 yields to register spilling.
mr = Vectorizable ? 3*LhsPacketSize : default_mr,
#else
mr = default_mr,
@@ -383,37 +456,41 @@ public:
RhsProgress = 1
};
- typedef typename packet_traits<LhsScalar>::type _LhsPacket;
- typedef typename packet_traits<RhsScalar>::type _RhsPacket;
- typedef typename packet_traits<ResScalar>::type _ResPacket;
typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
+ typedef LhsPacket LhsPacket4Packing;
+ typedef QuadPacket<RhsPacket> RhsPacketx4;
typedef ResPacket AccPacket;
EIGEN_STRONG_INLINE void initAcc(AccPacket& p)
{
p = pset1<ResPacket>(ResScalar(0));
}
-
- EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
- {
- pbroadcast4(b, b0, b1, b2, b3);
- }
-
-// EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1)
-// {
-// pbroadcast2(b, b0, b1);
-// }
-
+
template<typename RhsPacketType>
EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketType& dest) const
{
dest = pset1<RhsPacketType>(*b);
}
-
+
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketx4& dest) const
+ {
+ pbroadcast4(b, dest.B_0, dest.B1, dest.B2, dest.B3);
+ }
+
+ template<typename RhsPacketType>
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar* b, RhsPacketType& dest) const
+ {
+ loadRhs(b, dest);
+ }
+
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar*, RhsPacketx4&) const
+ {
+ }
+
EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const
{
dest = ploadquad<RhsPacket>(b);
@@ -431,8 +508,8 @@ public:
dest = ploadu<LhsPacketType>(a);
}
- template<typename LhsPacketType, typename RhsPacketType, typename AccPacketType>
- EIGEN_STRONG_INLINE void madd(const LhsPacketType& a, const RhsPacketType& b, AccPacketType& c, AccPacketType& tmp) const
+ template<typename LhsPacketType, typename RhsPacketType, typename AccPacketType, typename LaneIdType>
+ EIGEN_STRONG_INLINE void madd(const LhsPacketType& a, const RhsPacketType& b, AccPacketType& c, RhsPacketType& tmp, const LaneIdType&) const
{
conj_helper<LhsPacketType,RhsPacketType,ConjLhs,ConjRhs> cj;
// It would be a lot cleaner to call pmadd all the time. Unfortunately if we
@@ -447,6 +524,12 @@ public:
#endif
}
+ template<typename LhsPacketType, typename AccPacketType, typename LaneIdType>
+ EIGEN_STRONG_INLINE void madd(const LhsPacketType& a, const RhsPacketx4& b, AccPacketType& c, RhsPacket& tmp, const LaneIdType& lane) const
+ {
+ madd(a, b.get(lane), c, tmp, lane);
+ }
+
EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const
{
r = pmadd(c,alpha,r);
@@ -460,21 +543,25 @@ public:
};
-template<typename RealScalar, bool _ConjLhs>
-class gebp_traits<std::complex<RealScalar>, RealScalar, _ConjLhs, false>
+template<typename RealScalar, bool _ConjLhs, int Arch, int _PacketSize>
+class gebp_traits<std::complex<RealScalar>, RealScalar, _ConjLhs, false, Arch, _PacketSize>
{
public:
typedef std::complex<RealScalar> LhsScalar;
typedef RealScalar RhsScalar;
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
+ PACKET_DECL_COND_PREFIX(_, Lhs, _PacketSize);
+ PACKET_DECL_COND_PREFIX(_, Rhs, _PacketSize);
+ PACKET_DECL_COND_PREFIX(_, Res, _PacketSize);
+
enum {
ConjLhs = _ConjLhs,
ConjRhs = false,
- Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
- LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
- RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
- ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
+ Vectorizable = unpacket_traits<_LhsPacket>::vectorizable && unpacket_traits<_RhsPacket>::vectorizable,
+ LhsPacketSize = Vectorizable ? unpacket_traits<_LhsPacket>::size : 1,
+ RhsPacketSize = Vectorizable ? unpacket_traits<_RhsPacket>::size : 1,
+ ResPacketSize = Vectorizable ? unpacket_traits<_ResPacket>::size : 1,
NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
nr = 4,
@@ -489,13 +576,12 @@ public:
RhsProgress = 1
};
- typedef typename packet_traits<LhsScalar>::type _LhsPacket;
- typedef typename packet_traits<RhsScalar>::type _RhsPacket;
- typedef typename packet_traits<ResScalar>::type _ResPacket;
-
typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
+ typedef LhsPacket LhsPacket4Packing;
+
+ typedef QuadPacket<RhsPacket> RhsPacketx4;
typedef ResPacket AccPacket;
@@ -504,42 +590,64 @@ public:
p = pset1<ResPacket>(ResScalar(0));
}
- EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
+ template<typename RhsPacketType>
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketType& dest) const
{
- dest = pset1<RhsPacket>(*b);
+ dest = pset1<RhsPacketType>(*b);
+ }
+
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketx4& dest) const
+ {
+ pbroadcast4(b, dest.B_0, dest.B1, dest.B2, dest.B3);
}
+
+ template<typename RhsPacketType>
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar* b, RhsPacketType& dest) const
+ {
+ loadRhs(b, dest);
+ }
+
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar*, RhsPacketx4&) const
+ {}
EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const
{
- dest = pset1<RhsPacket>(*b);
+ loadRhsQuad_impl(b,dest, typename conditional<RhsPacketSize==16,true_type,false_type>::type());
}
- EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
+ EIGEN_STRONG_INLINE void loadRhsQuad_impl(const RhsScalar* b, RhsPacket& dest, const true_type&) const
{
- dest = pload<LhsPacket>(a);
+ // FIXME we can do better!
+ // what we want here is a ploadheight
+ RhsScalar tmp[4] = {b[0],b[0],b[1],b[1]};
+ dest = ploadquad<RhsPacket>(tmp);
}
- EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacket& dest) const
+ EIGEN_STRONG_INLINE void loadRhsQuad_impl(const RhsScalar* b, RhsPacket& dest, const false_type&) const
{
- dest = ploadu<LhsPacket>(a);
+ eigen_internal_assert(RhsPacketSize<=8);
+ dest = pset1<RhsPacket>(*b);
}
- EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
+ EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
{
- pbroadcast4(b, b0, b1, b2, b3);
+ dest = pload<LhsPacket>(a);
}
-
-// EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1)
-// {
-// pbroadcast2(b, b0, b1);
-// }
- EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const
+ template<typename LhsPacketType>
+ EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacketType& dest) const
+ {
+ dest = ploadu<LhsPacketType>(a);
+ }
+
+ template <typename LhsPacketType, typename RhsPacketType, typename AccPacketType, typename LaneIdType>
+ EIGEN_STRONG_INLINE void madd(const LhsPacketType& a, const RhsPacketType& b, AccPacketType& c, RhsPacketType& tmp, const LaneIdType&) const
{
madd_impl(a, b, c, tmp, typename conditional<Vectorizable,true_type,false_type>::type());
}
- EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const true_type&) const
+ template <typename LhsPacketType, typename RhsPacketType, typename AccPacketType>
+ EIGEN_STRONG_INLINE void madd_impl(const LhsPacketType& a, const RhsPacketType& b, AccPacketType& c, RhsPacketType& tmp, const true_type&) const
{
#ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
EIGEN_UNUSED_VARIABLE(tmp);
@@ -554,13 +662,20 @@ public:
c += a * b;
}
- EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const
+ template<typename LhsPacketType, typename AccPacketType, typename LaneIdType>
+ EIGEN_STRONG_INLINE void madd(const LhsPacketType& a, const RhsPacketx4& b, AccPacketType& c, RhsPacket& tmp, const LaneIdType& lane) const
{
+ madd(a, b.get(lane), c, tmp, lane);
+ }
+
+ template <typename ResPacketType, typename AccPacketType>
+ EIGEN_STRONG_INLINE void acc(const AccPacketType& c, const ResPacketType& alpha, ResPacketType& r) const
+ {
+ conj_helper<ResPacketType,ResPacketType,ConjLhs,false> cj;
r = cj.pmadd(c,alpha,r);
}
protected:
- conj_helper<ResPacket,ResPacket,ConjLhs,false> cj;
};
template<typename Packet>
@@ -579,13 +694,57 @@ DoublePacket<Packet> padd(const DoublePacket<Packet> &a, const DoublePacket<Pack
return res;
}
+// note that for DoublePacket<RealPacket> the "4" in "downto4"
+// corresponds to the number of complexes, so it means "8"
+// it terms of real coefficients.
+
template<typename Packet>
-const DoublePacket<Packet>& predux_downto4(const DoublePacket<Packet> &a)
+const DoublePacket<Packet>&
+predux_half_dowto4(const DoublePacket<Packet> &a,
+ typename enable_if<unpacket_traits<Packet>::size<=8>::type* = 0)
{
return a;
}
-template<typename Packet> struct unpacket_traits<DoublePacket<Packet> > { typedef DoublePacket<Packet> half; };
+template<typename Packet>
+DoublePacket<typename unpacket_traits<Packet>::half>
+predux_half_dowto4(const DoublePacket<Packet> &a,
+ typename enable_if<unpacket_traits<Packet>::size==16>::type* = 0)
+{
+ // yes, that's pretty hackish :(
+ DoublePacket<typename unpacket_traits<Packet>::half> res;
+ typedef std::complex<typename unpacket_traits<Packet>::type> Cplx;
+ typedef typename packet_traits<Cplx>::type CplxPacket;
+ res.first = predux_half_dowto4(CplxPacket(a.first)).v;
+ res.second = predux_half_dowto4(CplxPacket(a.second)).v;
+ return res;
+}
+
+// same here, "quad" actually means "8" in terms of real coefficients
+template<typename Scalar, typename RealPacket>
+void loadQuadToDoublePacket(const Scalar* b, DoublePacket<RealPacket>& dest,
+ typename enable_if<unpacket_traits<RealPacket>::size<=8>::type* = 0)
+{
+ dest.first = pset1<RealPacket>(numext::real(*b));
+ dest.second = pset1<RealPacket>(numext::imag(*b));
+}
+
+template<typename Scalar, typename RealPacket>
+void loadQuadToDoublePacket(const Scalar* b, DoublePacket<RealPacket>& dest,
+ typename enable_if<unpacket_traits<RealPacket>::size==16>::type* = 0)
+{
+ // yes, that's pretty hackish too :(
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ RealScalar r[4] = {numext::real(b[0]), numext::real(b[0]), numext::real(b[1]), numext::real(b[1])};
+ RealScalar i[4] = {numext::imag(b[0]), numext::imag(b[0]), numext::imag(b[1]), numext::imag(b[1])};
+ dest.first = ploadquad<RealPacket>(r);
+ dest.second = ploadquad<RealPacket>(i);
+}
+
+
+template<typename Packet> struct unpacket_traits<DoublePacket<Packet> > {
+ typedef DoublePacket<typename unpacket_traits<Packet>::half> half;
+};
// template<typename Packet>
// DoublePacket<Packet> pmadd(const DoublePacket<Packet> &a, const DoublePacket<Packet> &b)
// {
@@ -595,8 +754,8 @@ template<typename Packet> struct unpacket_traits<DoublePacket<Packet> > { typede
// return res;
// }
-template<typename RealScalar, bool _ConjLhs, bool _ConjRhs>
-class gebp_traits<std::complex<RealScalar>, std::complex<RealScalar>, _ConjLhs, _ConjRhs >
+template<typename RealScalar, bool _ConjLhs, bool _ConjRhs, int Arch, int _PacketSize>
+class gebp_traits<std::complex<RealScalar>, std::complex<RealScalar>, _ConjLhs, _ConjRhs, Arch, _PacketSize >
{
public:
typedef std::complex<RealScalar> Scalar;
@@ -604,15 +763,21 @@ public:
typedef std::complex<RealScalar> RhsScalar;
typedef std::complex<RealScalar> ResScalar;
+ PACKET_DECL_COND_PREFIX(_, Lhs, _PacketSize);
+ PACKET_DECL_COND_PREFIX(_, Rhs, _PacketSize);
+ PACKET_DECL_COND_PREFIX(_, Res, _PacketSize);
+ PACKET_DECL_COND(Real, _PacketSize);
+ PACKET_DECL_COND_SCALAR(_PacketSize);
+
enum {
ConjLhs = _ConjLhs,
ConjRhs = _ConjRhs,
- Vectorizable = packet_traits<RealScalar>::Vectorizable
- && packet_traits<Scalar>::Vectorizable,
- RealPacketSize = Vectorizable ? packet_traits<RealScalar>::size : 1,
- ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
- LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
- RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
+ Vectorizable = unpacket_traits<RealPacket>::vectorizable
+ && unpacket_traits<ScalarPacket>::vectorizable,
+ ResPacketSize = Vectorizable ? unpacket_traits<_ResPacket>::size : 1,
+ LhsPacketSize = Vectorizable ? unpacket_traits<_LhsPacket>::size : 1,
+ RhsPacketSize = Vectorizable ? unpacket_traits<RhsScalar>::size : 1,
+ RealPacketSize = Vectorizable ? unpacket_traits<RealPacket>::size : 1,
// FIXME: should depend on NumberOfRegisters
nr = 4,
@@ -622,14 +787,16 @@ public:
RhsProgress = 1
};
- typedef typename packet_traits<RealScalar>::type RealPacket;
- typedef typename packet_traits<Scalar>::type ScalarPacket;
- typedef DoublePacket<RealPacket> DoublePacketType;
+ typedef DoublePacket<RealPacket> DoublePacketType;
+ typedef typename conditional<Vectorizable,ScalarPacket,Scalar>::type LhsPacket4Packing;
typedef typename conditional<Vectorizable,RealPacket, Scalar>::type LhsPacket;
typedef typename conditional<Vectorizable,DoublePacketType,Scalar>::type RhsPacket;
typedef typename conditional<Vectorizable,ScalarPacket,Scalar>::type ResPacket;
typedef typename conditional<Vectorizable,DoublePacketType,Scalar>::type AccPacket;
+
+ // this actualy holds 8 packets!
+ typedef QuadPacket<RhsPacket> RhsPacketx4;
EIGEN_STRONG_INLINE void initAcc(Scalar& p) { p = Scalar(0); }
@@ -640,51 +807,49 @@ public:
}
// Scalar path
- EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, ResPacket& dest) const
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, ScalarPacket& dest) const
{
- dest = pset1<ResPacket>(*b);
+ dest = pset1<ScalarPacket>(*b);
}
// Vectorized path
- EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, DoublePacketType& dest) const
+ template<typename RealPacketType>
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, DoublePacket<RealPacketType>& dest) const
{
- dest.first = pset1<RealPacket>(real(*b));
- dest.second = pset1<RealPacket>(imag(*b));
+ dest.first = pset1<RealPacketType>(numext::real(*b));
+ dest.second = pset1<RealPacketType>(numext::imag(*b));
}
-
- EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, ResPacket& dest) const
+
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketx4& dest) const
{
- loadRhs(b,dest);
+ loadRhs(b, dest.B_0);
+ loadRhs(b + 1, dest.B1);
+ loadRhs(b + 2, dest.B2);
+ loadRhs(b + 3, dest.B3);
}
- EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, DoublePacketType& dest) const
+
+ // Scalar path
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar* b, ScalarPacket& dest) const
{
- eigen_internal_assert(unpacket_traits<ScalarPacket>::size<=4);
- loadRhs(b,dest);
+ loadRhs(b, dest);
}
-
- EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
+
+ // Vectorized path
+ template<typename RealPacketType>
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar* b, DoublePacket<RealPacketType>& dest) const
{
- // FIXME not sure that's the best way to implement it!
- loadRhs(b+0, b0);
- loadRhs(b+1, b1);
- loadRhs(b+2, b2);
- loadRhs(b+3, b3);
+ loadRhs(b, dest);
}
+
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar*, RhsPacketx4&) const {}
- // Vectorized path
- EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, DoublePacketType& b0, DoublePacketType& b1)
+ EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, ResPacket& dest) const
{
- // FIXME not sure that's the best way to implement it!
- loadRhs(b+0, b0);
- loadRhs(b+1, b1);
+ loadRhs(b,dest);
}
-
- // Scalar path
- EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsScalar& b0, RhsScalar& b1)
+ EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, DoublePacketType& dest) const
{
- // FIXME not sure that's the best way to implement it!
- loadRhs(b+0, b0);
- loadRhs(b+1, b1);
+ loadQuadToDoublePacket(b,dest);
}
// nothing special here
@@ -693,47 +858,59 @@ public:
dest = pload<LhsPacket>((const typename unpacket_traits<LhsPacket>::type*)(a));
}
- EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacket& dest) const
+ template<typename LhsPacketType>
+ EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacketType& dest) const
{
- dest = ploadu<LhsPacket>((const typename unpacket_traits<LhsPacket>::type*)(a));
+ dest = ploadu<LhsPacketType>((const typename unpacket_traits<LhsPacketType>::type*)(a));
}
- EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, DoublePacketType& c, RhsPacket& /*tmp*/) const
+ template<typename LhsPacketType, typename RhsPacketType, typename ResPacketType, typename TmpType, typename LaneIdType>
+ EIGEN_STRONG_INLINE
+ typename enable_if<!is_same<RhsPacketType,RhsPacketx4>::value>::type
+ madd(const LhsPacketType& a, const RhsPacketType& b, DoublePacket<ResPacketType>& c, TmpType& /*tmp*/, const LaneIdType&) const
{
c.first = padd(pmul(a,b.first), c.first);
c.second = padd(pmul(a,b.second),c.second);
}
- EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, ResPacket& c, RhsPacket& /*tmp*/) const
+ template<typename LaneIdType>
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, ResPacket& c, RhsPacket& /*tmp*/, const LaneIdType&) const
{
c = cj.pmadd(a,b,c);
}
+
+ template<typename LhsPacketType, typename AccPacketType, typename LaneIdType>
+ EIGEN_STRONG_INLINE void madd(const LhsPacketType& a, const RhsPacketx4& b, AccPacketType& c, RhsPacket& tmp, const LaneIdType& lane) const
+ {
+ madd(a, b.get(lane), c, tmp, lane);
+ }
EIGEN_STRONG_INLINE void acc(const Scalar& c, const Scalar& alpha, Scalar& r) const { r += alpha * c; }
- EIGEN_STRONG_INLINE void acc(const DoublePacketType& c, const ResPacket& alpha, ResPacket& r) const
+ template<typename RealPacketType, typename ResPacketType>
+ EIGEN_STRONG_INLINE void acc(const DoublePacket<RealPacketType>& c, const ResPacketType& alpha, ResPacketType& r) const
{
// assemble c
- ResPacket tmp;
+ ResPacketType tmp;
if((!ConjLhs)&&(!ConjRhs))
{
- tmp = pcplxflip(pconj(ResPacket(c.second)));
- tmp = padd(ResPacket(c.first),tmp);
+ tmp = pcplxflip(pconj(ResPacketType(c.second)));
+ tmp = padd(ResPacketType(c.first),tmp);
}
else if((!ConjLhs)&&(ConjRhs))
{
- tmp = pconj(pcplxflip(ResPacket(c.second)));
- tmp = padd(ResPacket(c.first),tmp);
+ tmp = pconj(pcplxflip(ResPacketType(c.second)));
+ tmp = padd(ResPacketType(c.first),tmp);
}
else if((ConjLhs)&&(!ConjRhs))
{
- tmp = pcplxflip(ResPacket(c.second));
- tmp = padd(pconj(ResPacket(c.first)),tmp);
+ tmp = pcplxflip(ResPacketType(c.second));
+ tmp = padd(pconj(ResPacketType(c.first)),tmp);
}
else if((ConjLhs)&&(ConjRhs))
{
- tmp = pcplxflip(ResPacket(c.second));
- tmp = psub(pconj(ResPacket(c.first)),tmp);
+ tmp = pcplxflip(ResPacketType(c.second));
+ tmp = psub(pconj(ResPacketType(c.first)),tmp);
}
r = pmadd(tmp,alpha,r);
@@ -743,8 +920,8 @@ protected:
conj_helper<LhsScalar,RhsScalar,ConjLhs,ConjRhs> cj;
};
-template<typename RealScalar, bool _ConjRhs>
-class gebp_traits<RealScalar, std::complex<RealScalar>, false, _ConjRhs >
+template<typename RealScalar, bool _ConjRhs, int Arch, int _PacketSize>
+class gebp_traits<RealScalar, std::complex<RealScalar>, false, _ConjRhs, Arch, _PacketSize >
{
public:
typedef std::complex<RealScalar> Scalar;
@@ -752,14 +929,25 @@ public:
typedef Scalar RhsScalar;
typedef Scalar ResScalar;
+ PACKET_DECL_COND_PREFIX(_, Lhs, _PacketSize);
+ PACKET_DECL_COND_PREFIX(_, Rhs, _PacketSize);
+ PACKET_DECL_COND_PREFIX(_, Res, _PacketSize);
+ PACKET_DECL_COND_PREFIX(_, Real, _PacketSize);
+ PACKET_DECL_COND_SCALAR_PREFIX(_, _PacketSize);
+
+#undef PACKET_DECL_COND_SCALAR_PREFIX
+#undef PACKET_DECL_COND_PREFIX
+#undef PACKET_DECL_COND_SCALAR
+#undef PACKET_DECL_COND
+
enum {
ConjLhs = false,
ConjRhs = _ConjRhs,
- Vectorizable = packet_traits<RealScalar>::Vectorizable
- && packet_traits<Scalar>::Vectorizable,
- LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
- RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
- ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
+ Vectorizable = unpacket_traits<_RealPacket>::vectorizable
+ && unpacket_traits<_ScalarPacket>::vectorizable,
+ LhsPacketSize = Vectorizable ? unpacket_traits<_LhsPacket>::size : 1,
+ RhsPacketSize = Vectorizable ? unpacket_traits<_RhsPacket>::size : 1,
+ ResPacketSize = Vectorizable ? unpacket_traits<_ResPacket>::size : 1,
NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
// FIXME: should depend on NumberOfRegisters
@@ -770,14 +958,11 @@ public:
RhsProgress = 1
};
- typedef typename packet_traits<LhsScalar>::type _LhsPacket;
- typedef typename packet_traits<RhsScalar>::type _RhsPacket;
- typedef typename packet_traits<ResScalar>::type _ResPacket;
-
typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
-
+ typedef LhsPacket LhsPacket4Packing;
+ typedef QuadPacket<RhsPacket> RhsPacketx4;
typedef ResPacket AccPacket;
EIGEN_STRONG_INLINE void initAcc(AccPacket& p)
@@ -785,22 +970,25 @@ public:
p = pset1<ResPacket>(ResScalar(0));
}
- EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
+ template<typename RhsPacketType>
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketType& dest) const
{
- dest = pset1<RhsPacket>(*b);
+ dest = pset1<RhsPacketType>(*b);
}
-
- void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
+
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketx4& dest) const
{
- pbroadcast4(b, b0, b1, b2, b3);
+ pbroadcast4(b, dest.B_0, dest.B1, dest.B2, dest.B3);
}
-
-// EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1)
-// {
-// // FIXME not sure that's the best way to implement it!
-// b0 = pload1<RhsPacket>(b+0);
-// b1 = pload1<RhsPacket>(b+1);
-// }
+
+ template<typename RhsPacketType>
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar* b, RhsPacketType& dest) const
+ {
+ loadRhs(b, dest);
+ }
+
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar*, RhsPacketx4&) const
+ {}
EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
{
@@ -809,21 +997,23 @@ public:
EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const
{
- eigen_internal_assert(unpacket_traits<RhsPacket>::size<=4);
- loadRhs(b,dest);
+ dest = ploadquad<RhsPacket>(b);
}
- EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacket& dest) const
+ template<typename LhsPacketType>
+ EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacketType& dest) const
{
- dest = ploaddup<LhsPacket>(a);
+ dest = ploaddup<LhsPacketType>(a);
}
- EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const
+ template <typename LhsPacketType, typename RhsPacketType, typename AccPacketType, typename LaneIdType>
+ EIGEN_STRONG_INLINE void madd(const LhsPacketType& a, const RhsPacketType& b, AccPacketType& c, RhsPacketType& tmp, const LaneIdType&) const
{
madd_impl(a, b, c, tmp, typename conditional<Vectorizable,true_type,false_type>::type());
}
- EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const true_type&) const
+ template <typename LhsPacketType, typename RhsPacketType, typename AccPacketType>
+ EIGEN_STRONG_INLINE void madd_impl(const LhsPacketType& a, const RhsPacketType& b, AccPacketType& c, RhsPacketType& tmp, const true_type&) const
{
#ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
EIGEN_UNUSED_VARIABLE(tmp);
@@ -839,16 +1029,24 @@ public:
c += a * b;
}
- EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const
+ template<typename LhsPacketType, typename AccPacketType, typename LaneIdType>
+ EIGEN_STRONG_INLINE void madd(const LhsPacketType& a, const RhsPacketx4& b, AccPacketType& c, RhsPacket& tmp, const LaneIdType& lane) const
+ {
+ madd(a, b.get(lane), c, tmp, lane);
+ }
+
+ template <typename ResPacketType, typename AccPacketType>
+ EIGEN_STRONG_INLINE void acc(const AccPacketType& c, const ResPacketType& alpha, ResPacketType& r) const
{
+ conj_helper<ResPacketType,ResPacketType,false,ConjRhs> cj;
r = cj.pmadd(alpha,c,r);
}
protected:
- conj_helper<ResPacket,ResPacket,false,ConjRhs> cj;
+
};
-/* optimized GEneral packed Block * packed Panel product kernel
+/* optimized General packed Block * packed Panel product kernel
*
* Mixing type logic: C += A * B
* | A | B | comments
@@ -858,26 +1056,47 @@ protected:
template<typename LhsScalar, typename RhsScalar, typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
struct gebp_kernel
{
- typedef gebp_traits<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> Traits;
+ typedef gebp_traits<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs,Architecture::Target> Traits;
+ typedef gebp_traits<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs,Architecture::Target,GEBPPacketHalf> HalfTraits;
+ typedef gebp_traits<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs,Architecture::Target,GEBPPacketQuarter> QuarterTraits;
+
typedef typename Traits::ResScalar ResScalar;
typedef typename Traits::LhsPacket LhsPacket;
typedef typename Traits::RhsPacket RhsPacket;
typedef typename Traits::ResPacket ResPacket;
typedef typename Traits::AccPacket AccPacket;
+ typedef typename Traits::RhsPacketx4 RhsPacketx4;
+
+ typedef typename RhsPanelHelper<RhsPacket, RhsPacketx4, 15>::type RhsPanel15;
+
+ typedef gebp_traits<RhsScalar,LhsScalar,ConjugateRhs,ConjugateLhs,Architecture::Target> SwappedTraits;
- typedef gebp_traits<RhsScalar,LhsScalar,ConjugateRhs,ConjugateLhs> SwappedTraits;
typedef typename SwappedTraits::ResScalar SResScalar;
typedef typename SwappedTraits::LhsPacket SLhsPacket;
typedef typename SwappedTraits::RhsPacket SRhsPacket;
typedef typename SwappedTraits::ResPacket SResPacket;
typedef typename SwappedTraits::AccPacket SAccPacket;
+ typedef typename HalfTraits::LhsPacket LhsPacketHalf;
+ typedef typename HalfTraits::RhsPacket RhsPacketHalf;
+ typedef typename HalfTraits::ResPacket ResPacketHalf;
+ typedef typename HalfTraits::AccPacket AccPacketHalf;
+
+ typedef typename QuarterTraits::LhsPacket LhsPacketQuarter;
+ typedef typename QuarterTraits::RhsPacket RhsPacketQuarter;
+ typedef typename QuarterTraits::ResPacket ResPacketQuarter;
+ typedef typename QuarterTraits::AccPacket AccPacketQuarter;
+
typedef typename DataMapper::LinearMapper LinearMapper;
enum {
Vectorizable = Traits::Vectorizable,
LhsProgress = Traits::LhsProgress,
+ LhsProgressHalf = HalfTraits::LhsProgress,
+ LhsProgressQuarter = QuarterTraits::LhsProgress,
RhsProgress = Traits::RhsProgress,
+ RhsProgressHalf = HalfTraits::RhsProgress,
+ RhsProgressQuarter = QuarterTraits::RhsProgress,
ResPacketSize = Traits::ResPacketSize
};
@@ -887,6 +1106,299 @@ struct gebp_kernel
Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0);
};
+template<typename LhsScalar, typename RhsScalar, typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs,
+int SwappedLhsProgress = gebp_traits<RhsScalar,LhsScalar,ConjugateRhs,ConjugateLhs,Architecture::Target>::LhsProgress>
+struct last_row_process_16_packets
+{
+ typedef gebp_traits<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs,Architecture::Target> Traits;
+ typedef gebp_traits<RhsScalar,LhsScalar,ConjugateRhs,ConjugateLhs,Architecture::Target> SwappedTraits;
+
+ typedef typename Traits::ResScalar ResScalar;
+ typedef typename SwappedTraits::LhsPacket SLhsPacket;
+ typedef typename SwappedTraits::RhsPacket SRhsPacket;
+ typedef typename SwappedTraits::ResPacket SResPacket;
+ typedef typename SwappedTraits::AccPacket SAccPacket;
+
+ EIGEN_STRONG_INLINE void operator()(const DataMapper& res, SwappedTraits &straits, const LhsScalar* blA,
+ const RhsScalar* blB, Index depth, const Index endk, Index i, Index j2,
+ ResScalar alpha, SAccPacket &C0)
+ {
+ EIGEN_UNUSED_VARIABLE(res);
+ EIGEN_UNUSED_VARIABLE(straits);
+ EIGEN_UNUSED_VARIABLE(blA);
+ EIGEN_UNUSED_VARIABLE(blB);
+ EIGEN_UNUSED_VARIABLE(depth);
+ EIGEN_UNUSED_VARIABLE(endk);
+ EIGEN_UNUSED_VARIABLE(i);
+ EIGEN_UNUSED_VARIABLE(j2);
+ EIGEN_UNUSED_VARIABLE(alpha);
+ EIGEN_UNUSED_VARIABLE(C0);
+ }
+};
+
+
+template<typename LhsScalar, typename RhsScalar, typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
+struct last_row_process_16_packets<LhsScalar, RhsScalar, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs, 16> {
+ typedef gebp_traits<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs,Architecture::Target> Traits;
+ typedef gebp_traits<RhsScalar,LhsScalar,ConjugateRhs,ConjugateLhs,Architecture::Target> SwappedTraits;
+
+ typedef typename Traits::ResScalar ResScalar;
+ typedef typename SwappedTraits::LhsPacket SLhsPacket;
+ typedef typename SwappedTraits::RhsPacket SRhsPacket;
+ typedef typename SwappedTraits::ResPacket SResPacket;
+ typedef typename SwappedTraits::AccPacket SAccPacket;
+
+ EIGEN_STRONG_INLINE void operator()(const DataMapper& res, SwappedTraits &straits, const LhsScalar* blA,
+ const RhsScalar* blB, Index depth, const Index endk, Index i, Index j2,
+ ResScalar alpha, SAccPacket &C0)
+ {
+ typedef typename unpacket_traits<typename unpacket_traits<SResPacket>::half>::half SResPacketQuarter;
+ typedef typename unpacket_traits<typename unpacket_traits<SLhsPacket>::half>::half SLhsPacketQuarter;
+ typedef typename unpacket_traits<typename unpacket_traits<SRhsPacket>::half>::half SRhsPacketQuarter;
+ typedef typename unpacket_traits<typename unpacket_traits<SAccPacket>::half>::half SAccPacketQuarter;
+
+ SResPacketQuarter R = res.template gatherPacket<SResPacketQuarter>(i, j2);
+ SResPacketQuarter alphav = pset1<SResPacketQuarter>(alpha);
+
+ if (depth - endk > 0)
+ {
+ // We have to handle the last row(s) of the rhs, which
+ // correspond to a half-packet
+ SAccPacketQuarter c0 = predux_half_dowto4(predux_half_dowto4(C0));
+
+ for (Index kk = endk; kk < depth; kk++)
+ {
+ SLhsPacketQuarter a0;
+ SRhsPacketQuarter b0;
+ straits.loadLhsUnaligned(blB, a0);
+ straits.loadRhs(blA, b0);
+ straits.madd(a0,b0,c0,b0, fix<0>);
+ blB += SwappedTraits::LhsProgress/4;
+ blA += 1;
+ }
+ straits.acc(c0, alphav, R);
+ }
+ else
+ {
+ straits.acc(predux_half_dowto4(predux_half_dowto4(C0)), alphav, R);
+ }
+ res.scatterPacket(i, j2, R);
+ }
+};
+
+template<int nr, Index LhsProgress, Index RhsProgress, typename LhsScalar, typename RhsScalar, typename ResScalar, typename AccPacket, typename LhsPacket, typename RhsPacket, typename ResPacket, typename GEBPTraits, typename LinearMapper, typename DataMapper>
+struct lhs_process_one_packet
+{
+ typedef typename GEBPTraits::RhsPacketx4 RhsPacketx4;
+
+ EIGEN_STRONG_INLINE void peeled_kc_onestep(Index K, const LhsScalar* blA, const RhsScalar* blB, GEBPTraits traits, LhsPacket *A0, RhsPacketx4 *rhs_panel, RhsPacket *T0, AccPacket *C0, AccPacket *C1, AccPacket *C2, AccPacket *C3)
+ {
+ EIGEN_ASM_COMMENT("begin step of gebp micro kernel 1X4");
+ EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!");
+ traits.loadLhs(&blA[(0+1*K)*LhsProgress], *A0);
+ traits.loadRhs(&blB[(0+4*K)*RhsProgress], *rhs_panel);
+ traits.madd(*A0, *rhs_panel, *C0, *T0, fix<0>);
+ traits.madd(*A0, *rhs_panel, *C1, *T0, fix<1>);
+ traits.madd(*A0, *rhs_panel, *C2, *T0, fix<2>);
+ traits.madd(*A0, *rhs_panel, *C3, *T0, fix<3>);
+ #if EIGEN_GNUC_AT_LEAST(6,0) && defined(EIGEN_VECTORIZE_SSE)
+ __asm__ ("" : "+x,m" (*A0));
+ #endif
+ EIGEN_ASM_COMMENT("end step of gebp micro kernel 1X4");
+ }
+
+ EIGEN_STRONG_INLINE void operator()(
+ const DataMapper& res, const LhsScalar* blockA, const RhsScalar* blockB, ResScalar alpha,
+ Index peelStart, Index peelEnd, Index strideA, Index strideB, Index offsetA, Index offsetB,
+ int prefetch_res_offset, Index peeled_kc, Index pk, Index cols, Index depth, Index packet_cols4)
+ {
+ GEBPTraits traits;
+
+ // loops on each largest micro horizontal panel of lhs
+ // (LhsProgress x depth)
+ for(Index i=peelStart; i<peelEnd; i+=LhsProgress)
+ {
+ // loops on each largest micro vertical panel of rhs (depth * nr)
+ for(Index j2=0; j2<packet_cols4; j2+=nr)
+ {
+ // We select a LhsProgress x nr micro block of res
+ // which is entirely stored into 1 x nr registers.
+
+ const LhsScalar* blA = &blockA[i*strideA+offsetA*(LhsProgress)];
+ prefetch(&blA[0]);
+
+ // gets res block as register
+ AccPacket C0, C1, C2, C3;
+ traits.initAcc(C0);
+ traits.initAcc(C1);
+ traits.initAcc(C2);
+ traits.initAcc(C3);
+ // To improve instruction pipelining, let's double the accumulation registers:
+ // even k will accumulate in C*, while odd k will accumulate in D*.
+ // This trick is crutial to get good performance with FMA, otherwise it is
+ // actually faster to perform separated MUL+ADD because of a naturally
+ // better instruction-level parallelism.
+ AccPacket D0, D1, D2, D3;
+ traits.initAcc(D0);
+ traits.initAcc(D1);
+ traits.initAcc(D2);
+ traits.initAcc(D3);
+
+ LinearMapper r0 = res.getLinearMapper(i, j2 + 0);
+ LinearMapper r1 = res.getLinearMapper(i, j2 + 1);
+ LinearMapper r2 = res.getLinearMapper(i, j2 + 2);
+ LinearMapper r3 = res.getLinearMapper(i, j2 + 3);
+
+ r0.prefetch(prefetch_res_offset);
+ r1.prefetch(prefetch_res_offset);
+ r2.prefetch(prefetch_res_offset);
+ r3.prefetch(prefetch_res_offset);
+
+ // performs "inner" products
+ const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
+ prefetch(&blB[0]);
+ LhsPacket A0, A1;
+
+ for(Index k=0; k<peeled_kc; k+=pk)
+ {
+ EIGEN_ASM_COMMENT("begin gebp micro kernel 1/half/quarterX4");
+ RhsPacketx4 rhs_panel;
+ RhsPacket T0;
+
+ internal::prefetch(blB+(48+0));
+ peeled_kc_onestep(0, blA, blB, traits, &A0, &rhs_panel, &T0, &C0, &C1, &C2, &C3);
+ peeled_kc_onestep(1, blA, blB, traits, &A1, &rhs_panel, &T0, &D0, &D1, &D2, &D3);
+ peeled_kc_onestep(2, blA, blB, traits, &A0, &rhs_panel, &T0, &C0, &C1, &C2, &C3);
+ peeled_kc_onestep(3, blA, blB, traits, &A1, &rhs_panel, &T0, &D0, &D1, &D2, &D3);
+ internal::prefetch(blB+(48+16));
+ peeled_kc_onestep(4, blA, blB, traits, &A0, &rhs_panel, &T0, &C0, &C1, &C2, &C3);
+ peeled_kc_onestep(5, blA, blB, traits, &A1, &rhs_panel, &T0, &D0, &D1, &D2, &D3);
+ peeled_kc_onestep(6, blA, blB, traits, &A0, &rhs_panel, &T0, &C0, &C1, &C2, &C3);
+ peeled_kc_onestep(7, blA, blB, traits, &A1, &rhs_panel, &T0, &D0, &D1, &D2, &D3);
+
+ blB += pk*4*RhsProgress;
+ blA += pk*LhsProgress;
+
+ EIGEN_ASM_COMMENT("end gebp micro kernel 1/half/quarterX4");
+ }
+ C0 = padd(C0,D0);
+ C1 = padd(C1,D1);
+ C2 = padd(C2,D2);
+ C3 = padd(C3,D3);
+
+ // process remaining peeled loop
+ for(Index k=peeled_kc; k<depth; k++)
+ {
+ RhsPacketx4 rhs_panel;
+ RhsPacket T0;
+ peeled_kc_onestep(0, blA, blB, traits, &A0, &rhs_panel, &T0, &C0, &C1, &C2, &C3);
+ blB += 4*RhsProgress;
+ blA += LhsProgress;
+ }
+
+ ResPacket R0, R1;
+ ResPacket alphav = pset1<ResPacket>(alpha);
+
+ R0 = r0.template loadPacket<ResPacket>(0);
+ R1 = r1.template loadPacket<ResPacket>(0);
+ traits.acc(C0, alphav, R0);
+ traits.acc(C1, alphav, R1);
+ r0.storePacket(0, R0);
+ r1.storePacket(0, R1);
+
+ R0 = r2.template loadPacket<ResPacket>(0);
+ R1 = r3.template loadPacket<ResPacket>(0);
+ traits.acc(C2, alphav, R0);
+ traits.acc(C3, alphav, R1);
+ r2.storePacket(0, R0);
+ r3.storePacket(0, R1);
+ }
+
+ // Deal with remaining columns of the rhs
+ for(Index j2=packet_cols4; j2<cols; j2++)
+ {
+ // One column at a time
+ const LhsScalar* blA = &blockA[i*strideA+offsetA*(LhsProgress)];
+ prefetch(&blA[0]);
+
+ // gets res block as register
+ AccPacket C0;
+ traits.initAcc(C0);
+
+ LinearMapper r0 = res.getLinearMapper(i, j2);
+
+ // performs "inner" products
+ const RhsScalar* blB = &blockB[j2*strideB+offsetB];
+ LhsPacket A0;
+
+ for(Index k= 0; k<peeled_kc; k+=pk)
+ {
+ EIGEN_ASM_COMMENT("begin gebp micro kernel 1/half/quarterX1");
+ RhsPacket B_0;
+
+#define EIGEN_GEBGP_ONESTEP(K) \
+ do { \
+ EIGEN_ASM_COMMENT("begin step of gebp micro kernel 1/half/quarterX1"); \
+ EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
+ /* FIXME: why unaligned???? */ \
+ traits.loadLhsUnaligned(&blA[(0+1*K)*LhsProgress], A0); \
+ traits.loadRhs(&blB[(0+K)*RhsProgress], B_0); \
+ traits.madd(A0, B_0, C0, B_0, fix<0>); \
+ EIGEN_ASM_COMMENT("end step of gebp micro kernel 1/half/quarterX1"); \
+ } while(false);
+
+ EIGEN_GEBGP_ONESTEP(0);
+ EIGEN_GEBGP_ONESTEP(1);
+ EIGEN_GEBGP_ONESTEP(2);
+ EIGEN_GEBGP_ONESTEP(3);
+ EIGEN_GEBGP_ONESTEP(4);
+ EIGEN_GEBGP_ONESTEP(5);
+ EIGEN_GEBGP_ONESTEP(6);
+ EIGEN_GEBGP_ONESTEP(7);
+
+ blB += pk*RhsProgress;
+ blA += pk*LhsProgress;
+
+ EIGEN_ASM_COMMENT("end gebp micro kernel 1/half/quarterX1");
+ }
+
+ // process remaining peeled loop
+ for(Index k=peeled_kc; k<depth; k++)
+ {
+ RhsPacket B_0;
+ EIGEN_GEBGP_ONESTEP(0);
+ blB += RhsProgress;
+ blA += LhsProgress;
+ }
+#undef EIGEN_GEBGP_ONESTEP
+ ResPacket R0;
+ ResPacket alphav = pset1<ResPacket>(alpha);
+ R0 = r0.template loadPacket<ResPacket>(0);
+ traits.acc(C0, alphav, R0);
+ r0.storePacket(0, R0);
+ }
+ }
+ }
+};
+
+template<int nr, Index LhsProgress, Index RhsProgress, typename LhsScalar, typename RhsScalar, typename ResScalar, typename AccPacket, typename LhsPacket, typename RhsPacket, typename ResPacket, typename GEBPTraits, typename LinearMapper, typename DataMapper>
+struct lhs_process_fraction_of_packet : lhs_process_one_packet<nr, LhsProgress, RhsProgress, LhsScalar, RhsScalar, ResScalar, AccPacket, LhsPacket, RhsPacket, ResPacket, GEBPTraits, LinearMapper, DataMapper>
+{
+
+EIGEN_STRONG_INLINE void peeled_kc_onestep(Index K, const LhsScalar* blA, const RhsScalar* blB, GEBPTraits traits, LhsPacket *A0, RhsPacket *B_0, RhsPacket *B1, RhsPacket *B2, RhsPacket *B3, AccPacket *C0, AccPacket *C1, AccPacket *C2, AccPacket *C3)
+ {
+ EIGEN_ASM_COMMENT("begin step of gebp micro kernel 1X4");
+ EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!");
+ traits.loadLhsUnaligned(&blA[(0+1*K)*(LhsProgress)], *A0);
+ traits.broadcastRhs(&blB[(0+4*K)*RhsProgress], *B_0, *B1, *B2, *B3);
+ traits.madd(*A0, *B_0, *C0, *B_0);
+ traits.madd(*A0, *B1, *C1, *B1);
+ traits.madd(*A0, *B2, *C2, *B2);
+ traits.madd(*A0, *B3, *C3, *B3);
+ EIGEN_ASM_COMMENT("end step of gebp micro kernel 1X4");
+ }
+};
+
template<typename LhsScalar, typename RhsScalar, typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
EIGEN_DONT_INLINE
void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,ConjugateRhs>
@@ -903,10 +1415,12 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
const Index peeled_mc3 = mr>=3*Traits::LhsProgress ? (rows/(3*LhsProgress))*(3*LhsProgress) : 0;
const Index peeled_mc2 = mr>=2*Traits::LhsProgress ? peeled_mc3+((rows-peeled_mc3)/(2*LhsProgress))*(2*LhsProgress) : 0;
- const Index peeled_mc1 = mr>=1*Traits::LhsProgress ? (rows/(1*LhsProgress))*(1*LhsProgress) : 0;
+ const Index peeled_mc1 = mr>=1*Traits::LhsProgress ? peeled_mc2+((rows-peeled_mc2)/(1*LhsProgress))*(1*LhsProgress) : 0;
+ const Index peeled_mc_half = mr>=LhsProgressHalf ? peeled_mc1+((rows-peeled_mc1)/(LhsProgressHalf))*(LhsProgressHalf) : 0;
+ const Index peeled_mc_quarter = mr>=LhsProgressQuarter ? peeled_mc_half+((rows-peeled_mc_half)/(LhsProgressQuarter))*(LhsProgressQuarter) : 0;
enum { pk = 8 }; // NOTE Such a large peeling factor is important for large matrices (~ +5% when >1000 on Haswell)
const Index peeled_kc = depth & ~(pk-1);
- const Index prefetch_res_offset = 32/sizeof(ResScalar);
+ const int prefetch_res_offset = 32/sizeof(ResScalar);
// const Index depth2 = depth & ~1;
//---------- Process 3 * LhsProgress rows at once ----------
@@ -964,36 +1478,48 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
for(Index k=0; k<peeled_kc; k+=pk)
{
EIGEN_ASM_COMMENT("begin gebp micro kernel 3pX4");
- RhsPacket B_0, T0;
+ // 15 registers are taken (12 for acc, 2 for lhs).
+ RhsPanel15 rhs_panel;
+ RhsPacket T0;
LhsPacket A2;
-
-#define EIGEN_GEBP_ONESTEP(K) \
- do { \
- EIGEN_ASM_COMMENT("begin step of gebp micro kernel 3pX4"); \
+ #if EIGEN_COMP_GNUC_STRICT && EIGEN_ARCH_ARM64 && defined(EIGEN_VECTORIZE_NEON) && !(EIGEN_GNUC_AT_LEAST(9,0))
+ // see http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1633
+ // without this workaround A0, A1, and A2 are loaded in the same register,
+ // which is not good for pipelining
+ #define EIGEN_GEBP_3PX4_REGISTER_ALLOC_WORKAROUND __asm__ ("" : "+w,m" (A0), "+w,m" (A1), "+w,m" (A2));
+ #else
+ #define EIGEN_GEBP_3PX4_REGISTER_ALLOC_WORKAROUND
+ #endif
+#define EIGEN_GEBP_ONESTEP(K) \
+ do { \
+ EIGEN_ASM_COMMENT("begin step of gebp micro kernel 3pX4"); \
EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
- internal::prefetch(blA+(3*K+16)*LhsProgress); \
- if (EIGEN_ARCH_ARM) { internal::prefetch(blB+(4*K+16)*RhsProgress); } /* Bug 953 */ \
- traits.loadLhs(&blA[(0+3*K)*LhsProgress], A0); \
- traits.loadLhs(&blA[(1+3*K)*LhsProgress], A1); \
- traits.loadLhs(&blA[(2+3*K)*LhsProgress], A2); \
- traits.loadRhs(blB + (0+4*K)*Traits::RhsProgress, B_0); \
- traits.madd(A0, B_0, C0, T0); \
- traits.madd(A1, B_0, C4, T0); \
- traits.madd(A2, B_0, C8, B_0); \
- traits.loadRhs(blB + (1+4*K)*Traits::RhsProgress, B_0); \
- traits.madd(A0, B_0, C1, T0); \
- traits.madd(A1, B_0, C5, T0); \
- traits.madd(A2, B_0, C9, B_0); \
- traits.loadRhs(blB + (2+4*K)*Traits::RhsProgress, B_0); \
- traits.madd(A0, B_0, C2, T0); \
- traits.madd(A1, B_0, C6, T0); \
- traits.madd(A2, B_0, C10, B_0); \
- traits.loadRhs(blB + (3+4*K)*Traits::RhsProgress, B_0); \
- traits.madd(A0, B_0, C3 , T0); \
- traits.madd(A1, B_0, C7, T0); \
- traits.madd(A2, B_0, C11, B_0); \
- EIGEN_ASM_COMMENT("end step of gebp micro kernel 3pX4"); \
- } while(false)
+ internal::prefetch(blA + (3 * K + 16) * LhsProgress); \
+ if (EIGEN_ARCH_ARM || EIGEN_ARCH_MIPS) { \
+ internal::prefetch(blB + (4 * K + 16) * RhsProgress); \
+ } /* Bug 953 */ \
+ traits.loadLhs(&blA[(0 + 3 * K) * LhsProgress], A0); \
+ traits.loadLhs(&blA[(1 + 3 * K) * LhsProgress], A1); \
+ traits.loadLhs(&blA[(2 + 3 * K) * LhsProgress], A2); \
+ EIGEN_GEBP_3PX4_REGISTER_ALLOC_WORKAROUND \
+ traits.loadRhs(blB + (0+4*K) * Traits::RhsProgress, rhs_panel); \
+ traits.madd(A0, rhs_panel, C0, T0, fix<0>); \
+ traits.madd(A1, rhs_panel, C4, T0, fix<0>); \
+ traits.madd(A2, rhs_panel, C8, T0, fix<0>); \
+ traits.updateRhs(blB + (1+4*K) * Traits::RhsProgress, rhs_panel); \
+ traits.madd(A0, rhs_panel, C1, T0, fix<1>); \
+ traits.madd(A1, rhs_panel, C5, T0, fix<1>); \
+ traits.madd(A2, rhs_panel, C9, T0, fix<1>); \
+ traits.updateRhs(blB + (2+4*K) * Traits::RhsProgress, rhs_panel); \
+ traits.madd(A0, rhs_panel, C2, T0, fix<2>); \
+ traits.madd(A1, rhs_panel, C6, T0, fix<2>); \
+ traits.madd(A2, rhs_panel, C10, T0, fix<2>); \
+ traits.updateRhs(blB + (3+4*K) * Traits::RhsProgress, rhs_panel); \
+ traits.madd(A0, rhs_panel, C3, T0, fix<3>); \
+ traits.madd(A1, rhs_panel, C7, T0, fix<3>); \
+ traits.madd(A2, rhs_panel, C11, T0, fix<3>); \
+ EIGEN_ASM_COMMENT("end step of gebp micro kernel 3pX4"); \
+ } while (false)
internal::prefetch(blB);
EIGEN_GEBP_ONESTEP(0);
@@ -1013,7 +1539,8 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
// process remaining peeled loop
for(Index k=peeled_kc; k<depth; k++)
{
- RhsPacket B_0, T0;
+ RhsPanel15 rhs_panel;
+ RhsPacket T0;
LhsPacket A2;
EIGEN_GEBP_ONESTEP(0);
blB += 4*RhsProgress;
@@ -1025,9 +1552,9 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
ResPacket R0, R1, R2;
ResPacket alphav = pset1<ResPacket>(alpha);
- R0 = r0.loadPacket(0 * Traits::ResPacketSize);
- R1 = r0.loadPacket(1 * Traits::ResPacketSize);
- R2 = r0.loadPacket(2 * Traits::ResPacketSize);
+ R0 = r0.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
+ R1 = r0.template loadPacket<ResPacket>(1 * Traits::ResPacketSize);
+ R2 = r0.template loadPacket<ResPacket>(2 * Traits::ResPacketSize);
traits.acc(C0, alphav, R0);
traits.acc(C4, alphav, R1);
traits.acc(C8, alphav, R2);
@@ -1035,9 +1562,9 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
r0.storePacket(1 * Traits::ResPacketSize, R1);
r0.storePacket(2 * Traits::ResPacketSize, R2);
- R0 = r1.loadPacket(0 * Traits::ResPacketSize);
- R1 = r1.loadPacket(1 * Traits::ResPacketSize);
- R2 = r1.loadPacket(2 * Traits::ResPacketSize);
+ R0 = r1.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
+ R1 = r1.template loadPacket<ResPacket>(1 * Traits::ResPacketSize);
+ R2 = r1.template loadPacket<ResPacket>(2 * Traits::ResPacketSize);
traits.acc(C1, alphav, R0);
traits.acc(C5, alphav, R1);
traits.acc(C9, alphav, R2);
@@ -1045,9 +1572,9 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
r1.storePacket(1 * Traits::ResPacketSize, R1);
r1.storePacket(2 * Traits::ResPacketSize, R2);
- R0 = r2.loadPacket(0 * Traits::ResPacketSize);
- R1 = r2.loadPacket(1 * Traits::ResPacketSize);
- R2 = r2.loadPacket(2 * Traits::ResPacketSize);
+ R0 = r2.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
+ R1 = r2.template loadPacket<ResPacket>(1 * Traits::ResPacketSize);
+ R2 = r2.template loadPacket<ResPacket>(2 * Traits::ResPacketSize);
traits.acc(C2, alphav, R0);
traits.acc(C6, alphav, R1);
traits.acc(C10, alphav, R2);
@@ -1055,9 +1582,9 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
r2.storePacket(1 * Traits::ResPacketSize, R1);
r2.storePacket(2 * Traits::ResPacketSize, R2);
- R0 = r3.loadPacket(0 * Traits::ResPacketSize);
- R1 = r3.loadPacket(1 * Traits::ResPacketSize);
- R2 = r3.loadPacket(2 * Traits::ResPacketSize);
+ R0 = r3.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
+ R1 = r3.template loadPacket<ResPacket>(1 * Traits::ResPacketSize);
+ R2 = r3.template loadPacket<ResPacket>(2 * Traits::ResPacketSize);
traits.acc(C3, alphav, R0);
traits.acc(C7, alphav, R1);
traits.acc(C11, alphav, R2);
@@ -1093,20 +1620,20 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
{
EIGEN_ASM_COMMENT("begin gebp micro kernel 3pX1");
RhsPacket B_0;
-#define EIGEN_GEBGP_ONESTEP(K) \
- do { \
- EIGEN_ASM_COMMENT("begin step of gebp micro kernel 3pX1"); \
+#define EIGEN_GEBGP_ONESTEP(K) \
+ do { \
+ EIGEN_ASM_COMMENT("begin step of gebp micro kernel 3pX1"); \
EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
- traits.loadLhs(&blA[(0+3*K)*LhsProgress], A0); \
- traits.loadLhs(&blA[(1+3*K)*LhsProgress], A1); \
- traits.loadLhs(&blA[(2+3*K)*LhsProgress], A2); \
- traits.loadRhs(&blB[(0+K)*RhsProgress], B_0); \
- traits.madd(A0, B_0, C0, B_0); \
- traits.madd(A1, B_0, C4, B_0); \
- traits.madd(A2, B_0, C8, B_0); \
- EIGEN_ASM_COMMENT("end step of gebp micro kernel 3pX1"); \
- } while(false)
-
+ traits.loadLhs(&blA[(0 + 3 * K) * LhsProgress], A0); \
+ traits.loadLhs(&blA[(1 + 3 * K) * LhsProgress], A1); \
+ traits.loadLhs(&blA[(2 + 3 * K) * LhsProgress], A2); \
+ traits.loadRhs(&blB[(0 + K) * RhsProgress], B_0); \
+ traits.madd(A0, B_0, C0, B_0, fix<0>); \
+ traits.madd(A1, B_0, C4, B_0, fix<0>); \
+ traits.madd(A2, B_0, C8, B_0, fix<0>); \
+ EIGEN_ASM_COMMENT("end step of gebp micro kernel 3pX1"); \
+ } while (false)
+
EIGEN_GEBGP_ONESTEP(0);
EIGEN_GEBGP_ONESTEP(1);
EIGEN_GEBGP_ONESTEP(2);
@@ -1116,8 +1643,8 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
EIGEN_GEBGP_ONESTEP(6);
EIGEN_GEBGP_ONESTEP(7);
- blB += pk*RhsProgress;
- blA += pk*3*Traits::LhsProgress;
+ blB += int(pk) * int(RhsProgress);
+ blA += int(pk) * 3 * int(Traits::LhsProgress);
EIGEN_ASM_COMMENT("end gebp micro kernel 3pX1");
}
@@ -1134,9 +1661,9 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
ResPacket R0, R1, R2;
ResPacket alphav = pset1<ResPacket>(alpha);
- R0 = r0.loadPacket(0 * Traits::ResPacketSize);
- R1 = r0.loadPacket(1 * Traits::ResPacketSize);
- R2 = r0.loadPacket(2 * Traits::ResPacketSize);
+ R0 = r0.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
+ R1 = r0.template loadPacket<ResPacket>(1 * Traits::ResPacketSize);
+ R2 = r0.template loadPacket<ResPacket>(2 * Traits::ResPacketSize);
traits.acc(C0, alphav, R0);
traits.acc(C4, alphav, R1);
traits.acc(C8, alphav, R2);
@@ -1195,26 +1722,34 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
for(Index k=0; k<peeled_kc; k+=pk)
{
EIGEN_ASM_COMMENT("begin gebp micro kernel 2pX4");
- RhsPacket B_0, B1, B2, B3, T0;
+ RhsPacketx4 rhs_panel;
+ RhsPacket T0;
+
+ // NOTE: the begin/end asm comments below work around bug 935!
+ // but they are not enough for gcc>=6 without FMA (bug 1637)
+ #if EIGEN_GNUC_AT_LEAST(6,0) && defined(EIGEN_VECTORIZE_SSE)
+ #define EIGEN_GEBP_2PX4_SPILLING_WORKAROUND __asm__ ("" : [a0] "+x,m" (A0),[a1] "+x,m" (A1));
+ #else
+ #define EIGEN_GEBP_2PX4_SPILLING_WORKAROUND
+ #endif
+#define EIGEN_GEBGP_ONESTEP(K) \
+ do { \
+ EIGEN_ASM_COMMENT("begin step of gebp micro kernel 2pX4"); \
+ traits.loadLhs(&blA[(0 + 2 * K) * LhsProgress], A0); \
+ traits.loadLhs(&blA[(1 + 2 * K) * LhsProgress], A1); \
+ traits.loadRhs(&blB[(0 + 4 * K) * RhsProgress], rhs_panel); \
+ traits.madd(A0, rhs_panel, C0, T0, fix<0>); \
+ traits.madd(A1, rhs_panel, C4, T0, fix<0>); \
+ traits.madd(A0, rhs_panel, C1, T0, fix<1>); \
+ traits.madd(A1, rhs_panel, C5, T0, fix<1>); \
+ traits.madd(A0, rhs_panel, C2, T0, fix<2>); \
+ traits.madd(A1, rhs_panel, C6, T0, fix<2>); \
+ traits.madd(A0, rhs_panel, C3, T0, fix<3>); \
+ traits.madd(A1, rhs_panel, C7, T0, fix<3>); \
+ EIGEN_GEBP_2PX4_SPILLING_WORKAROUND \
+ EIGEN_ASM_COMMENT("end step of gebp micro kernel 2pX4"); \
+ } while (false)
- #define EIGEN_GEBGP_ONESTEP(K) \
- do { \
- EIGEN_ASM_COMMENT("begin step of gebp micro kernel 2pX4"); \
- EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
- traits.loadLhs(&blA[(0+2*K)*LhsProgress], A0); \
- traits.loadLhs(&blA[(1+2*K)*LhsProgress], A1); \
- traits.broadcastRhs(&blB[(0+4*K)*RhsProgress], B_0, B1, B2, B3); \
- traits.madd(A0, B_0, C0, T0); \
- traits.madd(A1, B_0, C4, B_0); \
- traits.madd(A0, B1, C1, T0); \
- traits.madd(A1, B1, C5, B1); \
- traits.madd(A0, B2, C2, T0); \
- traits.madd(A1, B2, C6, B2); \
- traits.madd(A0, B3, C3, T0); \
- traits.madd(A1, B3, C7, B3); \
- EIGEN_ASM_COMMENT("end step of gebp micro kernel 2pX4"); \
- } while(false)
-
internal::prefetch(blB+(48+0));
EIGEN_GEBGP_ONESTEP(0);
EIGEN_GEBGP_ONESTEP(1);
@@ -1234,7 +1769,8 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
// process remaining peeled loop
for(Index k=peeled_kc; k<depth; k++)
{
- RhsPacket B_0, B1, B2, B3, T0;
+ RhsPacketx4 rhs_panel;
+ RhsPacket T0;
EIGEN_GEBGP_ONESTEP(0);
blB += 4*RhsProgress;
blA += 2*Traits::LhsProgress;
@@ -1244,10 +1780,10 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
ResPacket R0, R1, R2, R3;
ResPacket alphav = pset1<ResPacket>(alpha);
- R0 = r0.loadPacket(0 * Traits::ResPacketSize);
- R1 = r0.loadPacket(1 * Traits::ResPacketSize);
- R2 = r1.loadPacket(0 * Traits::ResPacketSize);
- R3 = r1.loadPacket(1 * Traits::ResPacketSize);
+ R0 = r0.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
+ R1 = r0.template loadPacket<ResPacket>(1 * Traits::ResPacketSize);
+ R2 = r1.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
+ R3 = r1.template loadPacket<ResPacket>(1 * Traits::ResPacketSize);
traits.acc(C0, alphav, R0);
traits.acc(C4, alphav, R1);
traits.acc(C1, alphav, R2);
@@ -1257,10 +1793,10 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
r1.storePacket(0 * Traits::ResPacketSize, R2);
r1.storePacket(1 * Traits::ResPacketSize, R3);
- R0 = r2.loadPacket(0 * Traits::ResPacketSize);
- R1 = r2.loadPacket(1 * Traits::ResPacketSize);
- R2 = r3.loadPacket(0 * Traits::ResPacketSize);
- R3 = r3.loadPacket(1 * Traits::ResPacketSize);
+ R0 = r2.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
+ R1 = r2.template loadPacket<ResPacket>(1 * Traits::ResPacketSize);
+ R2 = r3.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
+ R3 = r3.template loadPacket<ResPacket>(1 * Traits::ResPacketSize);
traits.acc(C2, alphav, R0);
traits.acc(C6, alphav, R1);
traits.acc(C3, alphav, R2);
@@ -1305,8 +1841,8 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
traits.loadLhs(&blA[(0+2*K)*LhsProgress], A0); \
traits.loadLhs(&blA[(1+2*K)*LhsProgress], A1); \
traits.loadRhs(&blB[(0+K)*RhsProgress], B_0); \
- traits.madd(A0, B_0, C0, B1); \
- traits.madd(A1, B_0, C4, B_0); \
+ traits.madd(A0, B_0, C0, B1, fix<0>); \
+ traits.madd(A1, B_0, C4, B_0, fix<0>); \
EIGEN_ASM_COMMENT("end step of gebp micro kernel 2pX1"); \
} while(false)
@@ -1319,8 +1855,8 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
EIGEN_GEBGP_ONESTEP(6);
EIGEN_GEBGP_ONESTEP(7);
- blB += pk*RhsProgress;
- blA += pk*2*Traits::LhsProgress;
+ blB += int(pk) * int(RhsProgress);
+ blA += int(pk) * 2 * int(Traits::LhsProgress);
EIGEN_ASM_COMMENT("end gebp micro kernel 2pX1");
}
@@ -1337,8 +1873,8 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
ResPacket R0, R1;
ResPacket alphav = pset1<ResPacket>(alpha);
- R0 = r0.loadPacket(0 * Traits::ResPacketSize);
- R1 = r0.loadPacket(1 * Traits::ResPacketSize);
+ R0 = r0.template loadPacket<ResPacket>(0 * Traits::ResPacketSize);
+ R1 = r0.template loadPacket<ResPacket>(1 * Traits::ResPacketSize);
traits.acc(C0, alphav, R0);
traits.acc(C4, alphav, R1);
r0.storePacket(0 * Traits::ResPacketSize, R0);
@@ -1350,186 +1886,43 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
//---------- Process 1 * LhsProgress rows at once ----------
if(mr>=1*Traits::LhsProgress)
{
- // loops on each largest micro horizontal panel of lhs (1*LhsProgress x depth)
- for(Index i=peeled_mc2; i<peeled_mc1; i+=1*LhsProgress)
- {
- // loops on each largest micro vertical panel of rhs (depth * nr)
- for(Index j2=0; j2<packet_cols4; j2+=nr)
- {
- // We select a 1*Traits::LhsProgress x nr micro block of res which is entirely
- // stored into 1 x nr registers.
-
- const LhsScalar* blA = &blockA[i*strideA+offsetA*(1*Traits::LhsProgress)];
- prefetch(&blA[0]);
-
- // gets res block as register
- AccPacket C0, C1, C2, C3;
- traits.initAcc(C0);
- traits.initAcc(C1);
- traits.initAcc(C2);
- traits.initAcc(C3);
-
- LinearMapper r0 = res.getLinearMapper(i, j2 + 0);
- LinearMapper r1 = res.getLinearMapper(i, j2 + 1);
- LinearMapper r2 = res.getLinearMapper(i, j2 + 2);
- LinearMapper r3 = res.getLinearMapper(i, j2 + 3);
-
- r0.prefetch(prefetch_res_offset);
- r1.prefetch(prefetch_res_offset);
- r2.prefetch(prefetch_res_offset);
- r3.prefetch(prefetch_res_offset);
-
- // performs "inner" products
- const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
- prefetch(&blB[0]);
- LhsPacket A0;
-
- for(Index k=0; k<peeled_kc; k+=pk)
- {
- EIGEN_ASM_COMMENT("begin gebp micro kernel 1pX4");
- RhsPacket B_0, B1, B2, B3;
-
-#define EIGEN_GEBGP_ONESTEP(K) \
- do { \
- EIGEN_ASM_COMMENT("begin step of gebp micro kernel 1pX4"); \
- EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
- traits.loadLhs(&blA[(0+1*K)*LhsProgress], A0); \
- traits.broadcastRhs(&blB[(0+4*K)*RhsProgress], B_0, B1, B2, B3); \
- traits.madd(A0, B_0, C0, B_0); \
- traits.madd(A0, B1, C1, B1); \
- traits.madd(A0, B2, C2, B2); \
- traits.madd(A0, B3, C3, B3); \
- EIGEN_ASM_COMMENT("end step of gebp micro kernel 1pX4"); \
- } while(false)
-
- internal::prefetch(blB+(48+0));
- EIGEN_GEBGP_ONESTEP(0);
- EIGEN_GEBGP_ONESTEP(1);
- EIGEN_GEBGP_ONESTEP(2);
- EIGEN_GEBGP_ONESTEP(3);
- internal::prefetch(blB+(48+16));
- EIGEN_GEBGP_ONESTEP(4);
- EIGEN_GEBGP_ONESTEP(5);
- EIGEN_GEBGP_ONESTEP(6);
- EIGEN_GEBGP_ONESTEP(7);
-
- blB += pk*4*RhsProgress;
- blA += pk*1*LhsProgress;
-
- EIGEN_ASM_COMMENT("end gebp micro kernel 1pX4");
- }
- // process remaining peeled loop
- for(Index k=peeled_kc; k<depth; k++)
- {
- RhsPacket B_0, B1, B2, B3;
- EIGEN_GEBGP_ONESTEP(0);
- blB += 4*RhsProgress;
- blA += 1*LhsProgress;
- }
-#undef EIGEN_GEBGP_ONESTEP
-
- ResPacket R0, R1;
- ResPacket alphav = pset1<ResPacket>(alpha);
-
- R0 = r0.loadPacket(0 * Traits::ResPacketSize);
- R1 = r1.loadPacket(0 * Traits::ResPacketSize);
- traits.acc(C0, alphav, R0);
- traits.acc(C1, alphav, R1);
- r0.storePacket(0 * Traits::ResPacketSize, R0);
- r1.storePacket(0 * Traits::ResPacketSize, R1);
-
- R0 = r2.loadPacket(0 * Traits::ResPacketSize);
- R1 = r3.loadPacket(0 * Traits::ResPacketSize);
- traits.acc(C2, alphav, R0);
- traits.acc(C3, alphav, R1);
- r2.storePacket(0 * Traits::ResPacketSize, R0);
- r3.storePacket(0 * Traits::ResPacketSize, R1);
- }
-
- // Deal with remaining columns of the rhs
- for(Index j2=packet_cols4; j2<cols; j2++)
- {
- // One column at a time
- const LhsScalar* blA = &blockA[i*strideA+offsetA*(1*Traits::LhsProgress)];
- prefetch(&blA[0]);
-
- // gets res block as register
- AccPacket C0;
- traits.initAcc(C0);
-
- LinearMapper r0 = res.getLinearMapper(i, j2);
-
- // performs "inner" products
- const RhsScalar* blB = &blockB[j2*strideB+offsetB];
- LhsPacket A0;
-
- for(Index k=0; k<peeled_kc; k+=pk)
- {
- EIGEN_ASM_COMMENT("begin gebp micro kernel 1pX1");
- RhsPacket B_0;
-
-#define EIGEN_GEBGP_ONESTEP(K) \
- do { \
- EIGEN_ASM_COMMENT("begin step of gebp micro kernel 1pX1"); \
- EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
- traits.loadLhs(&blA[(0+1*K)*LhsProgress], A0); \
- traits.loadRhs(&blB[(0+K)*RhsProgress], B_0); \
- traits.madd(A0, B_0, C0, B_0); \
- EIGEN_ASM_COMMENT("end step of gebp micro kernel 1pX1"); \
- } while(false);
-
- EIGEN_GEBGP_ONESTEP(0);
- EIGEN_GEBGP_ONESTEP(1);
- EIGEN_GEBGP_ONESTEP(2);
- EIGEN_GEBGP_ONESTEP(3);
- EIGEN_GEBGP_ONESTEP(4);
- EIGEN_GEBGP_ONESTEP(5);
- EIGEN_GEBGP_ONESTEP(6);
- EIGEN_GEBGP_ONESTEP(7);
-
- blB += pk*RhsProgress;
- blA += pk*1*Traits::LhsProgress;
-
- EIGEN_ASM_COMMENT("end gebp micro kernel 1pX1");
- }
-
- // process remaining peeled loop
- for(Index k=peeled_kc; k<depth; k++)
- {
- RhsPacket B_0;
- EIGEN_GEBGP_ONESTEP(0);
- blB += RhsProgress;
- blA += 1*Traits::LhsProgress;
- }
-#undef EIGEN_GEBGP_ONESTEP
- ResPacket R0;
- ResPacket alphav = pset1<ResPacket>(alpha);
- R0 = r0.loadPacket(0 * Traits::ResPacketSize);
- traits.acc(C0, alphav, R0);
- r0.storePacket(0 * Traits::ResPacketSize, R0);
- }
- }
+ lhs_process_one_packet<nr, LhsProgress, RhsProgress, LhsScalar, RhsScalar, ResScalar, AccPacket, LhsPacket, RhsPacket, ResPacket, Traits, LinearMapper, DataMapper> p;
+ p(res, blockA, blockB, alpha, peeled_mc2, peeled_mc1, strideA, strideB, offsetA, offsetB, prefetch_res_offset, peeled_kc, pk, cols, depth, packet_cols4);
+ }
+ //---------- Process LhsProgressHalf rows at once ----------
+ if((LhsProgressHalf < LhsProgress) && mr>=LhsProgressHalf)
+ {
+ lhs_process_fraction_of_packet<nr, LhsProgressHalf, RhsProgressHalf, LhsScalar, RhsScalar, ResScalar, AccPacketHalf, LhsPacketHalf, RhsPacketHalf, ResPacketHalf, HalfTraits, LinearMapper, DataMapper> p;
+ p(res, blockA, blockB, alpha, peeled_mc1, peeled_mc_half, strideA, strideB, offsetA, offsetB, prefetch_res_offset, peeled_kc, pk, cols, depth, packet_cols4);
+ }
+ //---------- Process LhsProgressQuarter rows at once ----------
+ if((LhsProgressQuarter < LhsProgressHalf) && mr>=LhsProgressQuarter)
+ {
+ lhs_process_fraction_of_packet<nr, LhsProgressQuarter, RhsProgressQuarter, LhsScalar, RhsScalar, ResScalar, AccPacketQuarter, LhsPacketQuarter, RhsPacketQuarter, ResPacketQuarter, QuarterTraits, LinearMapper, DataMapper> p;
+ p(res, blockA, blockB, alpha, peeled_mc_half, peeled_mc_quarter, strideA, strideB, offsetA, offsetB, prefetch_res_offset, peeled_kc, pk, cols, depth, packet_cols4);
}
//---------- Process remaining rows, 1 at once ----------
- if(peeled_mc1<rows)
+ if(peeled_mc_quarter<rows)
{
// loop on each panel of the rhs
for(Index j2=0; j2<packet_cols4; j2+=nr)
{
// loop on each row of the lhs (1*LhsProgress x depth)
- for(Index i=peeled_mc1; i<rows; i+=1)
+ for(Index i=peeled_mc_quarter; i<rows; i+=1)
{
const LhsScalar* blA = &blockA[i*strideA+offsetA];
prefetch(&blA[0]);
const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
- // The following piece of code wont work for 512 bit registers
- // Moreover, if LhsProgress==8 it assumes that there is a half packet of the same size
- // as nr (which is currently 4) for the return type.
- typedef typename unpacket_traits<SResPacket>::half SResPacketHalf;
+ // If LhsProgress is 8 or 16, it assumes that there is a
+ // half or quarter packet, respectively, of the same size as
+ // nr (which is currently 4) for the return type.
+ const int SResPacketHalfSize = unpacket_traits<typename unpacket_traits<SResPacket>::half>::size;
+ const int SResPacketQuarterSize = unpacket_traits<typename unpacket_traits<typename unpacket_traits<SResPacket>::half>::half>::size;
if ((SwappedTraits::LhsProgress % 4) == 0 &&
- (SwappedTraits::LhsProgress <= 8) &&
- (SwappedTraits::LhsProgress!=8 || unpacket_traits<SResPacketHalf>::size==nr))
+ (SwappedTraits::LhsProgress<=16) &&
+ (SwappedTraits::LhsProgress!=8 || SResPacketHalfSize==nr) &&
+ (SwappedTraits::LhsProgress!=16 || SResPacketQuarterSize==nr))
{
SAccPacket C0, C1, C2, C3;
straits.initAcc(C0);
@@ -1552,15 +1945,15 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
straits.loadRhsQuad(blA+0*spk, B_0);
straits.loadRhsQuad(blA+1*spk, B_1);
- straits.madd(A0,B_0,C0,B_0);
- straits.madd(A1,B_1,C1,B_1);
+ straits.madd(A0,B_0,C0,B_0, fix<0>);
+ straits.madd(A1,B_1,C1,B_1, fix<0>);
straits.loadLhsUnaligned(blB+2*SwappedTraits::LhsProgress, A0);
straits.loadLhsUnaligned(blB+3*SwappedTraits::LhsProgress, A1);
straits.loadRhsQuad(blA+2*spk, B_0);
straits.loadRhsQuad(blA+3*spk, B_1);
- straits.madd(A0,B_0,C2,B_0);
- straits.madd(A1,B_1,C3,B_1);
+ straits.madd(A0,B_0,C2,B_0, fix<0>);
+ straits.madd(A1,B_1,C3,B_1, fix<0>);
blB += 4*SwappedTraits::LhsProgress;
blA += 4*spk;
@@ -1573,7 +1966,7 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
straits.loadLhsUnaligned(blB, A0);
straits.loadRhsQuad(blA, B_0);
- straits.madd(A0,B_0,C0,B_0);
+ straits.madd(A0,B_0,C0,B_0, fix<0>);
blB += SwappedTraits::LhsProgress;
blA += spk;
@@ -1583,7 +1976,7 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
// Special case where we have to first reduce the accumulation register C0
typedef typename conditional<SwappedTraits::LhsProgress>=8,typename unpacket_traits<SResPacket>::half,SResPacket>::type SResPacketHalf;
typedef typename conditional<SwappedTraits::LhsProgress>=8,typename unpacket_traits<SLhsPacket>::half,SLhsPacket>::type SLhsPacketHalf;
- typedef typename conditional<SwappedTraits::LhsProgress>=8,typename unpacket_traits<SLhsPacket>::half,SRhsPacket>::type SRhsPacketHalf;
+ typedef typename conditional<SwappedTraits::LhsProgress>=8,typename unpacket_traits<SRhsPacket>::half,SRhsPacket>::type SRhsPacketHalf;
typedef typename conditional<SwappedTraits::LhsProgress>=8,typename unpacket_traits<SAccPacket>::half,SAccPacket>::type SAccPacketHalf;
SResPacketHalf R = res.template gatherPacket<SResPacketHalf>(i, j2);
@@ -1596,16 +1989,25 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
SRhsPacketHalf b0;
straits.loadLhsUnaligned(blB, a0);
straits.loadRhs(blA, b0);
- SAccPacketHalf c0 = predux_downto4(C0);
- straits.madd(a0,b0,c0,b0);
+ SAccPacketHalf c0 = predux_half_dowto4(C0);
+ straits.madd(a0,b0,c0,b0, fix<0>);
straits.acc(c0, alphav, R);
}
else
{
- straits.acc(predux_downto4(C0), alphav, R);
+ straits.acc(predux_half_dowto4(C0), alphav, R);
}
res.scatterPacket(i, j2, R);
}
+ else if (SwappedTraits::LhsProgress==16)
+ {
+ // Special case where we have to first reduce the
+ // accumulation register C0. We specialize the block in
+ // template form, so that LhsProgress < 16 paths don't
+ // fail to compile
+ last_row_process_16_packets<LhsScalar, RhsScalar, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs> p;
+ p(res, straits, blA, blB, depth, endk, i, j2,alpha, C0);
+ }
else
{
SResPacket R = res.template gatherPacket<SResPacket>(i, j2);
@@ -1628,14 +2030,14 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
B_0 = blB[0];
B_1 = blB[1];
- CJMADD(cj,A0,B_0,C0, B_0);
- CJMADD(cj,A0,B_1,C1, B_1);
-
+ C0 = cj.pmadd(A0,B_0,C0);
+ C1 = cj.pmadd(A0,B_1,C1);
+
B_0 = blB[2];
B_1 = blB[3];
- CJMADD(cj,A0,B_0,C2, B_0);
- CJMADD(cj,A0,B_1,C3, B_1);
-
+ C2 = cj.pmadd(A0,B_0,C2);
+ C3 = cj.pmadd(A0,B_1,C3);
+
blB += 4;
}
res(i, j2 + 0) += alpha * C0;
@@ -1649,7 +2051,7 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
for(Index j2=packet_cols4; j2<cols; j2++)
{
// loop on each row of the lhs (1*LhsProgress x depth)
- for(Index i=peeled_mc1; i<rows; i+=1)
+ for(Index i=peeled_mc_quarter; i<rows; i+=1)
{
const LhsScalar* blA = &blockA[i*strideA+offsetA];
prefetch(&blA[0]);
@@ -1660,7 +2062,7 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
{
LhsScalar A0 = blA[k];
RhsScalar B_0 = blB[k];
- CJMADD(cj, A0, B_0, C0, B_0);
+ C0 = cj.pmadd(A0, B_0, C0);
}
res(i, j2) += alpha * C0;
}
@@ -1669,8 +2071,6 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
}
-#undef CJMADD
-
// pack a block of the lhs
// The traversal is as follow (mr==4):
// 0 4 8 12 ...
@@ -1685,19 +2085,24 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
//
// 32 33 34 35 ...
// 36 36 38 39 ...
-template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, bool Conjugate, bool PanelMode>
-struct gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, ColMajor, Conjugate, PanelMode>
+template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, typename Packet, bool Conjugate, bool PanelMode>
+struct gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, Packet, ColMajor, Conjugate, PanelMode>
{
typedef typename DataMapper::LinearMapper LinearMapper;
EIGEN_DONT_INLINE void operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0);
};
-template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, bool Conjugate, bool PanelMode>
-EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, ColMajor, Conjugate, PanelMode>
+template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, typename Packet, bool Conjugate, bool PanelMode>
+EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, Packet, ColMajor, Conjugate, PanelMode>
::operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)
{
- typedef typename packet_traits<Scalar>::type Packet;
- enum { PacketSize = packet_traits<Scalar>::size };
+ typedef typename unpacket_traits<Packet>::half HalfPacket;
+ typedef typename unpacket_traits<typename unpacket_traits<Packet>::half>::half QuarterPacket;
+ enum { PacketSize = unpacket_traits<Packet>::size,
+ HalfPacketSize = unpacket_traits<HalfPacket>::size,
+ QuarterPacketSize = unpacket_traits<QuarterPacket>::size,
+ HasHalf = (int)HalfPacketSize < (int)PacketSize,
+ HasQuarter = (int)QuarterPacketSize < (int)HalfPacketSize};
EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS");
EIGEN_UNUSED_VARIABLE(stride);
@@ -1709,9 +2114,12 @@ EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, Co
const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0;
const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0;
- const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0;
- const Index peeled_mc0 = Pack2>=1*PacketSize ? peeled_mc1
- : Pack2>1 ? (rows/Pack2)*Pack2 : 0;
+ const Index peeled_mc1 = Pack1>=1*PacketSize ? peeled_mc2+((rows-peeled_mc2)/(1*PacketSize))*(1*PacketSize) : 0;
+ const Index peeled_mc_half = Pack1>=HalfPacketSize ? peeled_mc1+((rows-peeled_mc1)/(HalfPacketSize))*(HalfPacketSize) : 0;
+ const Index peeled_mc_quarter = Pack1>=QuarterPacketSize ? (rows/(QuarterPacketSize))*(QuarterPacketSize) : 0;
+ const Index last_lhs_progress = rows > peeled_mc_quarter ? (rows - peeled_mc_quarter) & ~1 : 0;
+ const Index peeled_mc0 = Pack2>=PacketSize ? peeled_mc_quarter
+ : Pack2>1 && last_lhs_progress ? (rows/last_lhs_progress)*last_lhs_progress : 0;
Index i=0;
@@ -1725,9 +2133,9 @@ EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, Co
for(Index k=0; k<depth; k++)
{
Packet A, B, C;
- A = lhs.loadPacket(i+0*PacketSize, k);
- B = lhs.loadPacket(i+1*PacketSize, k);
- C = lhs.loadPacket(i+2*PacketSize, k);
+ A = lhs.template loadPacket<Packet>(i+0*PacketSize, k);
+ B = lhs.template loadPacket<Packet>(i+1*PacketSize, k);
+ C = lhs.template loadPacket<Packet>(i+2*PacketSize, k);
pstore(blockA+count, cj.pconj(A)); count+=PacketSize;
pstore(blockA+count, cj.pconj(B)); count+=PacketSize;
pstore(blockA+count, cj.pconj(C)); count+=PacketSize;
@@ -1745,8 +2153,8 @@ EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, Co
for(Index k=0; k<depth; k++)
{
Packet A, B;
- A = lhs.loadPacket(i+0*PacketSize, k);
- B = lhs.loadPacket(i+1*PacketSize, k);
+ A = lhs.template loadPacket<Packet>(i+0*PacketSize, k);
+ B = lhs.template loadPacket<Packet>(i+1*PacketSize, k);
pstore(blockA+count, cj.pconj(A)); count+=PacketSize;
pstore(blockA+count, cj.pconj(B)); count+=PacketSize;
}
@@ -1763,27 +2171,67 @@ EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, Co
for(Index k=0; k<depth; k++)
{
Packet A;
- A = lhs.loadPacket(i+0*PacketSize, k);
+ A = lhs.template loadPacket<Packet>(i+0*PacketSize, k);
pstore(blockA+count, cj.pconj(A));
count+=PacketSize;
}
if(PanelMode) count += (1*PacketSize) * (stride-offset-depth);
}
}
- // Pack scalars
+ // Pack half packets
+ if(HasHalf && Pack1>=HalfPacketSize)
+ {
+ for(; i<peeled_mc_half; i+=HalfPacketSize)
+ {
+ if(PanelMode) count += (HalfPacketSize) * offset;
+
+ for(Index k=0; k<depth; k++)
+ {
+ HalfPacket A;
+ A = lhs.template loadPacket<HalfPacket>(i+0*(HalfPacketSize), k);
+ pstoreu(blockA+count, cj.pconj(A));
+ count+=HalfPacketSize;
+ }
+ if(PanelMode) count += (HalfPacketSize) * (stride-offset-depth);
+ }
+ }
+ // Pack quarter packets
+ if(HasQuarter && Pack1>=QuarterPacketSize)
+ {
+ for(; i<peeled_mc_quarter; i+=QuarterPacketSize)
+ {
+ if(PanelMode) count += (QuarterPacketSize) * offset;
+
+ for(Index k=0; k<depth; k++)
+ {
+ QuarterPacket A;
+ A = lhs.template loadPacket<QuarterPacket>(i+0*(QuarterPacketSize), k);
+ pstoreu(blockA+count, cj.pconj(A));
+ count+=QuarterPacketSize;
+ }
+ if(PanelMode) count += (QuarterPacketSize) * (stride-offset-depth);
+ }
+ }
+ // Pack2 may be *smaller* than PacketSize—that happens for
+ // products like real * complex, where we have to go half the
+ // progress on the lhs in order to duplicate those operands to
+ // address both real & imaginary parts on the rhs. This portion will
+ // pack those half ones until they match the number expected on the
+ // last peeling loop at this point (for the rhs).
if(Pack2<PacketSize && Pack2>1)
{
- for(; i<peeled_mc0; i+=Pack2)
+ for(; i<peeled_mc0; i+=last_lhs_progress)
{
- if(PanelMode) count += Pack2 * offset;
+ if(PanelMode) count += last_lhs_progress * offset;
for(Index k=0; k<depth; k++)
- for(Index w=0; w<Pack2; w++)
+ for(Index w=0; w<last_lhs_progress; w++)
blockA[count++] = cj(lhs(i+w, k));
- if(PanelMode) count += Pack2 * (stride-offset-depth);
+ if(PanelMode) count += last_lhs_progress * (stride-offset-depth);
}
}
+ // Pack scalars
for(; i<rows; i++)
{
if(PanelMode) count += offset;
@@ -1793,19 +2241,24 @@ EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, Co
}
}
-template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, bool Conjugate, bool PanelMode>
-struct gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, RowMajor, Conjugate, PanelMode>
+template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, typename Packet, bool Conjugate, bool PanelMode>
+struct gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, Packet, RowMajor, Conjugate, PanelMode>
{
typedef typename DataMapper::LinearMapper LinearMapper;
EIGEN_DONT_INLINE void operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0);
};
-template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, bool Conjugate, bool PanelMode>
-EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, RowMajor, Conjugate, PanelMode>
+template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, typename Packet, bool Conjugate, bool PanelMode>
+EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, Packet, RowMajor, Conjugate, PanelMode>
::operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)
{
- typedef typename packet_traits<Scalar>::type Packet;
- enum { PacketSize = packet_traits<Scalar>::size };
+ typedef typename unpacket_traits<Packet>::half HalfPacket;
+ typedef typename unpacket_traits<typename unpacket_traits<Packet>::half>::half QuarterPacket;
+ enum { PacketSize = unpacket_traits<Packet>::size,
+ HalfPacketSize = unpacket_traits<HalfPacket>::size,
+ QuarterPacketSize = unpacket_traits<QuarterPacket>::size,
+ HasHalf = (int)HalfPacketSize < (int)PacketSize,
+ HasQuarter = (int)QuarterPacketSize < (int)HalfPacketSize};
EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS");
EIGEN_UNUSED_VARIABLE(stride);
@@ -1813,37 +2266,51 @@ EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, Ro
eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
Index count = 0;
+ bool gone_half = false, gone_quarter = false, gone_last = false;
-// const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0;
-// const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0;
-// const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0;
-
- int pack = Pack1;
Index i = 0;
+ int pack = Pack1;
+ int psize = PacketSize;
while(pack>0)
{
Index remaining_rows = rows-i;
- Index peeled_mc = i+(remaining_rows/pack)*pack;
+ Index peeled_mc = gone_last ? Pack2>1 ? (rows/pack)*pack : 0 : i+(remaining_rows/pack)*pack;
+ Index starting_pos = i;
for(; i<peeled_mc; i+=pack)
{
if(PanelMode) count += pack * offset;
- const Index peeled_k = (depth/PacketSize)*PacketSize;
Index k=0;
- if(pack>=PacketSize)
+ if(pack>=psize && psize >= QuarterPacketSize)
{
- for(; k<peeled_k; k+=PacketSize)
+ const Index peeled_k = (depth/psize)*psize;
+ for(; k<peeled_k; k+=psize)
{
- for (Index m = 0; m < pack; m += PacketSize)
+ for (Index m = 0; m < pack; m += psize)
{
- PacketBlock<Packet> kernel;
- for (int p = 0; p < PacketSize; ++p) kernel.packet[p] = lhs.loadPacket(i+p+m, k);
- ptranspose(kernel);
- for (int p = 0; p < PacketSize; ++p) pstore(blockA+count+m+(pack)*p, cj.pconj(kernel.packet[p]));
+ if (psize == PacketSize) {
+ PacketBlock<Packet> kernel;
+ for (int p = 0; p < psize; ++p) kernel.packet[p] = lhs.template loadPacket<Packet>(i+p+m, k);
+ ptranspose(kernel);
+ for (int p = 0; p < psize; ++p) pstore(blockA+count+m+(pack)*p, cj.pconj(kernel.packet[p]));
+ } else if (HasHalf && psize == HalfPacketSize) {
+ gone_half = true;
+ PacketBlock<HalfPacket> kernel_half;
+ for (int p = 0; p < psize; ++p) kernel_half.packet[p] = lhs.template loadPacket<HalfPacket>(i+p+m, k);
+ ptranspose(kernel_half);
+ for (int p = 0; p < psize; ++p) pstore(blockA+count+m+(pack)*p, cj.pconj(kernel_half.packet[p]));
+ } else if (HasQuarter && psize == QuarterPacketSize) {
+ gone_quarter = true;
+ PacketBlock<QuarterPacket> kernel_quarter;
+ for (int p = 0; p < psize; ++p) kernel_quarter.packet[p] = lhs.template loadPacket<QuarterPacket>(i+p+m, k);
+ ptranspose(kernel_quarter);
+ for (int p = 0; p < psize; ++p) pstore(blockA+count+m+(pack)*p, cj.pconj(kernel_quarter.packet[p]));
+ }
}
- count += PacketSize*pack;
+ count += psize*pack;
}
}
+
for(; k<depth; k++)
{
Index w=0;
@@ -1866,9 +2333,28 @@ EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, Ro
if(PanelMode) count += pack * (stride-offset-depth);
}
- pack -= PacketSize;
- if(pack<Pack2 && (pack+PacketSize)!=Pack2)
- pack = Pack2;
+ pack -= psize;
+ Index left = rows - i;
+ if (pack <= 0) {
+ if (!gone_last &&
+ (starting_pos == i || left >= psize/2 || left >= psize/4) &&
+ ((psize/2 == HalfPacketSize && HasHalf && !gone_half) ||
+ (psize/2 == QuarterPacketSize && HasQuarter && !gone_quarter))) {
+ psize /= 2;
+ pack = psize;
+ continue;
+ }
+ // Pack2 may be *smaller* than PacketSize—that happens for
+ // products like real * complex, where we have to go half the
+ // progress on the lhs in order to duplicate those operands to
+ // address both real & imaginary parts on the rhs. This portion will
+ // pack those half ones until they match the number expected on the
+ // last peeling loop at this point (for the rhs).
+ if (Pack2 < PacketSize && !gone_last) {
+ gone_last = true;
+ psize = pack = left & ~1;
+ }
+ }
}
for(; i<rows; i++)
@@ -1924,7 +2410,7 @@ EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, DataMapper, nr, ColMajor, Co
// const Scalar* b6 = &rhs[(j2+6)*rhsStride];
// const Scalar* b7 = &rhs[(j2+7)*rhsStride];
// Index k=0;
-// if(PacketSize==8) // TODO enbale vectorized transposition for PacketSize==4
+// if(PacketSize==8) // TODO enable vectorized transposition for PacketSize==4
// {
// for(; k<peeled_k; k+=PacketSize) {
// PacketBlock<Packet> kernel;
@@ -1971,10 +2457,10 @@ EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, DataMapper, nr, ColMajor, Co
{
for(; k<peeled_k; k+=PacketSize) {
PacketBlock<Packet,(PacketSize%4)==0?4:PacketSize> kernel;
- kernel.packet[0] = dm0.loadPacket(k);
- kernel.packet[1%PacketSize] = dm1.loadPacket(k);
- kernel.packet[2%PacketSize] = dm2.loadPacket(k);
- kernel.packet[3%PacketSize] = dm3.loadPacket(k);
+ kernel.packet[0 ] = dm0.template loadPacket<Packet>(k);
+ kernel.packet[1%PacketSize] = dm1.template loadPacket<Packet>(k);
+ kernel.packet[2%PacketSize] = dm2.template loadPacket<Packet>(k);
+ kernel.packet[3%PacketSize] = dm3.template loadPacket<Packet>(k);
ptranspose(kernel);
pstoreu(blockB+count+0*PacketSize, cj.pconj(kernel.packet[0]));
pstoreu(blockB+count+1*PacketSize, cj.pconj(kernel.packet[1%PacketSize]));
@@ -2015,94 +2501,104 @@ template<typename Scalar, typename Index, typename DataMapper, int nr, bool Conj
struct gemm_pack_rhs<Scalar, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode>
{
typedef typename packet_traits<Scalar>::type Packet;
+ typedef typename unpacket_traits<Packet>::half HalfPacket;
+ typedef typename unpacket_traits<typename unpacket_traits<Packet>::half>::half QuarterPacket;
typedef typename DataMapper::LinearMapper LinearMapper;
- enum { PacketSize = packet_traits<Scalar>::size };
- EIGEN_DONT_INLINE void operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0);
-};
-
-template<typename Scalar, typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>
-EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode>
- ::operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset)
-{
- EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS ROWMAJOR");
- EIGEN_UNUSED_VARIABLE(stride);
- EIGEN_UNUSED_VARIABLE(offset);
- eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
- conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
- Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0;
- Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
- Index count = 0;
-
-// if(nr>=8)
-// {
-// for(Index j2=0; j2<packet_cols8; j2+=8)
-// {
-// // skip what we have before
-// if(PanelMode) count += 8 * offset;
-// for(Index k=0; k<depth; k++)
-// {
-// if (PacketSize==8) {
-// Packet A = ploadu<Packet>(&rhs[k*rhsStride + j2]);
-// pstoreu(blockB+count, cj.pconj(A));
-// } else if (PacketSize==4) {
-// Packet A = ploadu<Packet>(&rhs[k*rhsStride + j2]);
-// Packet B = ploadu<Packet>(&rhs[k*rhsStride + j2 + PacketSize]);
-// pstoreu(blockB+count, cj.pconj(A));
-// pstoreu(blockB+count+PacketSize, cj.pconj(B));
-// } else {
-// const Scalar* b0 = &rhs[k*rhsStride + j2];
-// blockB[count+0] = cj(b0[0]);
-// blockB[count+1] = cj(b0[1]);
-// blockB[count+2] = cj(b0[2]);
-// blockB[count+3] = cj(b0[3]);
-// blockB[count+4] = cj(b0[4]);
-// blockB[count+5] = cj(b0[5]);
-// blockB[count+6] = cj(b0[6]);
-// blockB[count+7] = cj(b0[7]);
-// }
-// count += 8;
-// }
-// // skip what we have after
-// if(PanelMode) count += 8 * (stride-offset-depth);
-// }
-// }
- if(nr>=4)
+ enum { PacketSize = packet_traits<Scalar>::size,
+ HalfPacketSize = unpacket_traits<HalfPacket>::size,
+ QuarterPacketSize = unpacket_traits<QuarterPacket>::size};
+ EIGEN_DONT_INLINE void operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0)
{
- for(Index j2=packet_cols8; j2<packet_cols4; j2+=4)
+ EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS ROWMAJOR");
+ EIGEN_UNUSED_VARIABLE(stride);
+ EIGEN_UNUSED_VARIABLE(offset);
+ eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
+ const bool HasHalf = (int)HalfPacketSize < (int)PacketSize;
+ const bool HasQuarter = (int)QuarterPacketSize < (int)HalfPacketSize;
+ conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
+ Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0;
+ Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
+ Index count = 0;
+
+ // if(nr>=8)
+ // {
+ // for(Index j2=0; j2<packet_cols8; j2+=8)
+ // {
+ // // skip what we have before
+ // if(PanelMode) count += 8 * offset;
+ // for(Index k=0; k<depth; k++)
+ // {
+ // if (PacketSize==8) {
+ // Packet A = ploadu<Packet>(&rhs[k*rhsStride + j2]);
+ // pstoreu(blockB+count, cj.pconj(A));
+ // } else if (PacketSize==4) {
+ // Packet A = ploadu<Packet>(&rhs[k*rhsStride + j2]);
+ // Packet B = ploadu<Packet>(&rhs[k*rhsStride + j2 + PacketSize]);
+ // pstoreu(blockB+count, cj.pconj(A));
+ // pstoreu(blockB+count+PacketSize, cj.pconj(B));
+ // } else {
+ // const Scalar* b0 = &rhs[k*rhsStride + j2];
+ // blockB[count+0] = cj(b0[0]);
+ // blockB[count+1] = cj(b0[1]);
+ // blockB[count+2] = cj(b0[2]);
+ // blockB[count+3] = cj(b0[3]);
+ // blockB[count+4] = cj(b0[4]);
+ // blockB[count+5] = cj(b0[5]);
+ // blockB[count+6] = cj(b0[6]);
+ // blockB[count+7] = cj(b0[7]);
+ // }
+ // count += 8;
+ // }
+ // // skip what we have after
+ // if(PanelMode) count += 8 * (stride-offset-depth);
+ // }
+ // }
+ if(nr>=4)
{
- // skip what we have before
- if(PanelMode) count += 4 * offset;
- for(Index k=0; k<depth; k++)
+ for(Index j2=packet_cols8; j2<packet_cols4; j2+=4)
{
- if (PacketSize==4) {
- Packet A = rhs.loadPacket(k, j2);
- pstoreu(blockB+count, cj.pconj(A));
- count += PacketSize;
- } else {
- const LinearMapper dm0 = rhs.getLinearMapper(k, j2);
- blockB[count+0] = cj(dm0(0));
- blockB[count+1] = cj(dm0(1));
- blockB[count+2] = cj(dm0(2));
- blockB[count+3] = cj(dm0(3));
- count += 4;
+ // skip what we have before
+ if(PanelMode) count += 4 * offset;
+ for(Index k=0; k<depth; k++)
+ {
+ if (PacketSize==4) {
+ Packet A = rhs.template loadPacket<Packet>(k, j2);
+ pstoreu(blockB+count, cj.pconj(A));
+ count += PacketSize;
+ } else if (HasHalf && HalfPacketSize==4) {
+ HalfPacket A = rhs.template loadPacket<HalfPacket>(k, j2);
+ pstoreu(blockB+count, cj.pconj(A));
+ count += HalfPacketSize;
+ } else if (HasQuarter && QuarterPacketSize==4) {
+ QuarterPacket A = rhs.template loadPacket<QuarterPacket>(k, j2);
+ pstoreu(blockB+count, cj.pconj(A));
+ count += QuarterPacketSize;
+ } else {
+ const LinearMapper dm0 = rhs.getLinearMapper(k, j2);
+ blockB[count+0] = cj(dm0(0));
+ blockB[count+1] = cj(dm0(1));
+ blockB[count+2] = cj(dm0(2));
+ blockB[count+3] = cj(dm0(3));
+ count += 4;
+ }
}
+ // skip what we have after
+ if(PanelMode) count += 4 * (stride-offset-depth);
}
- // skip what we have after
- if(PanelMode) count += 4 * (stride-offset-depth);
}
- }
- // copy the remaining columns one at a time (nr==1)
- for(Index j2=packet_cols4; j2<cols; ++j2)
- {
- if(PanelMode) count += offset;
- for(Index k=0; k<depth; k++)
+ // copy the remaining columns one at a time (nr==1)
+ for(Index j2=packet_cols4; j2<cols; ++j2)
{
- blockB[count] = cj(rhs(k, j2));
- count += 1;
+ if(PanelMode) count += offset;
+ for(Index k=0; k<depth; k++)
+ {
+ blockB[count] = cj(rhs(k, j2));
+ count += 1;
+ }
+ if(PanelMode) count += stride-offset-depth;
}
- if(PanelMode) count += stride-offset-depth;
}
-}
+};
} // end namespace internal
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralMatrixMatrix.h b/examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralMatrixMatrix.h
index ed4d3182b..caa65fccc 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralMatrixMatrix.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralMatrixMatrix.h
@@ -20,8 +20,9 @@ template<typename _LhsScalar, typename _RhsScalar> class level3_blocking;
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
- typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
-struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor>
+ typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs,
+ int ResInnerStride>
+struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor,ResInnerStride>
{
typedef gebp_traits<RhsScalar,LhsScalar> Traits;
@@ -30,7 +31,7 @@ struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLh
Index rows, Index cols, Index depth,
const LhsScalar* lhs, Index lhsStride,
const RhsScalar* rhs, Index rhsStride,
- ResScalar* res, Index resStride,
+ ResScalar* res, Index resIncr, Index resStride,
ResScalar alpha,
level3_blocking<RhsScalar,LhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
@@ -39,8 +40,8 @@ struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLh
general_matrix_matrix_product<Index,
RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
- ColMajor>
- ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info);
+ ColMajor,ResInnerStride>
+ ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resIncr,resStride,alpha,blocking,info);
}
};
@@ -49,8 +50,9 @@ struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLh
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
- typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
-struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor>
+ typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs,
+ int ResInnerStride>
+struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor,ResInnerStride>
{
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
@@ -59,23 +61,23 @@ typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScala
static void run(Index rows, Index cols, Index depth,
const LhsScalar* _lhs, Index lhsStride,
const RhsScalar* _rhs, Index rhsStride,
- ResScalar* _res, Index resStride,
+ ResScalar* _res, Index resIncr, Index resStride,
ResScalar alpha,
level3_blocking<LhsScalar,RhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper;
typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper;
- typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper;
- LhsMapper lhs(_lhs,lhsStride);
- RhsMapper rhs(_rhs,rhsStride);
- ResMapper res(_res, resStride);
+ typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor,Unaligned,ResInnerStride> ResMapper;
+ LhsMapper lhs(_lhs, lhsStride);
+ RhsMapper rhs(_rhs, rhsStride);
+ ResMapper res(_res, resStride, resIncr);
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction
- gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
+ gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs;
gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
@@ -108,7 +110,7 @@ static void run(Index rows, Index cols, Index depth,
// i.e., we test that info[tid].users equals 0.
// Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
while(info[tid].users!=0) {}
- info[tid].users += threads;
+ info[tid].users = threads;
pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length);
@@ -146,7 +148,9 @@ static void run(Index rows, Index cols, Index depth,
// Release all the sub blocks A'_i of A' for the current thread,
// i.e., we simply decrement the number of users by 1
for(Index i=0; i<threads; ++i)
+#if !EIGEN_HAS_CXX11_ATOMIC
#pragma omp atomic
+#endif
info[i].users -= 1;
}
}
@@ -226,7 +230,7 @@ struct gemm_functor
Gemm::run(rows, cols, m_lhs.cols(),
&m_lhs.coeffRef(row,0), m_lhs.outerStride(),
&m_rhs.coeffRef(0,col), m_rhs.outerStride(),
- (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(),
+ (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.innerStride(), m_dest.outerStride(),
m_actualAlpha, m_blocking, info);
}
@@ -431,10 +435,10 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct>
// to determine the following heuristic.
// EIGEN_GEMM_TO_COEFFBASED_THRESHOLD is typically defined to 20 in GeneralProduct.h,
// unless it has been specialized by the user or for a given architecture.
- // Note that the condition rhs.rows()>0 was required because lazy produc is (was?) not happy with empty inputs.
+ // Note that the condition rhs.rows()>0 was required because lazy product is (was?) not happy with empty inputs.
// I'm not sure it is still required.
if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0)
- lazyproduct::evalTo(dst, lhs, rhs);
+ lazyproduct::eval_dynamic(dst, lhs, rhs, internal::assign_op<typename Dst::Scalar,Scalar>());
else
{
dst.setZero();
@@ -446,7 +450,7 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct>
static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0)
- lazyproduct::addTo(dst, lhs, rhs);
+ lazyproduct::eval_dynamic(dst, lhs, rhs, internal::add_assign_op<typename Dst::Scalar,Scalar>());
else
scaleAndAddTo(dst,lhs, rhs, Scalar(1));
}
@@ -455,7 +459,7 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct>
static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0)
- lazyproduct::subTo(dst, lhs, rhs);
+ lazyproduct::eval_dynamic(dst, lhs, rhs, internal::sub_assign_op<typename Dst::Scalar,Scalar>());
else
scaleAndAddTo(dst, lhs, rhs, Scalar(-1));
}
@@ -467,11 +471,25 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct>
if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0)
return;
+ if (dst.cols() == 1)
+ {
+ // Fallback to GEMV if either the lhs or rhs is a runtime vector
+ typename Dest::ColXpr dst_vec(dst.col(0));
+ return internal::generic_product_impl<Lhs,typename Rhs::ConstColXpr,DenseShape,DenseShape,GemvProduct>
+ ::scaleAndAddTo(dst_vec, a_lhs, a_rhs.col(0), alpha);
+ }
+ else if (dst.rows() == 1)
+ {
+ // Fallback to GEMV if either the lhs or rhs is a runtime vector
+ typename Dest::RowXpr dst_vec(dst.row(0));
+ return internal::generic_product_impl<typename Lhs::ConstRowXpr,Rhs,DenseShape,DenseShape,GemvProduct>
+ ::scaleAndAddTo(dst_vec, a_lhs.row(0), a_rhs, alpha);
+ }
+
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs);
- Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs)
- * RhsBlasTraits::extractScalarFactor(a_rhs);
+ Scalar actualAlpha = combine_scalar_factors(alpha, a_lhs, a_rhs);
typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar,
Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;
@@ -482,7 +500,8 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct>
Index,
LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
- (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>,
+ (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,
+ Dest::InnerStrideAtCompileTime>,
ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor;
BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true);
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h b/examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h
index e436c50a4..6ba0d9bdb 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h
@@ -25,51 +25,54 @@ namespace internal {
**********************************************************************/
// forward declarations (defined at the end of this file)
-template<typename LhsScalar, typename RhsScalar, typename Index, int mr, int nr, bool ConjLhs, bool ConjRhs, int UpLo>
+template<typename LhsScalar, typename RhsScalar, typename Index, int mr, int nr, bool ConjLhs, bool ConjRhs, int ResInnerStride, int UpLo>
struct tribb_kernel;
/* Optimized matrix-matrix product evaluating only one triangular half */
template <typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs,
- int ResStorageOrder, int UpLo, int Version = Specialized>
+ int ResStorageOrder, int ResInnerStride, int UpLo, int Version = Specialized>
struct general_matrix_matrix_triangular_product;
// as usual if the result is row major => we transpose the product
template <typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
- typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int UpLo, int Version>
-struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor,UpLo,Version>
+ typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs,
+ int ResInnerStride, int UpLo, int Version>
+struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor,ResInnerStride,UpLo,Version>
{
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static EIGEN_STRONG_INLINE void run(Index size, Index depth,const LhsScalar* lhs, Index lhsStride,
- const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride,
+ const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resIncr, Index resStride,
const ResScalar& alpha, level3_blocking<RhsScalar,LhsScalar>& blocking)
{
general_matrix_matrix_triangular_product<Index,
RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
- ColMajor, UpLo==Lower?Upper:Lower>
- ::run(size,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking);
+ ColMajor, ResInnerStride, UpLo==Lower?Upper:Lower>
+ ::run(size,depth,rhs,rhsStride,lhs,lhsStride,res,resIncr,resStride,alpha,blocking);
}
};
template <typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
- typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int UpLo, int Version>
-struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor,UpLo,Version>
+ typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs,
+ int ResInnerStride, int UpLo, int Version>
+struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor,ResInnerStride,UpLo,Version>
{
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static EIGEN_STRONG_INLINE void run(Index size, Index depth,const LhsScalar* _lhs, Index lhsStride,
- const RhsScalar* _rhs, Index rhsStride, ResScalar* _res, Index resStride,
+ const RhsScalar* _rhs, Index rhsStride,
+ ResScalar* _res, Index resIncr, Index resStride,
const ResScalar& alpha, level3_blocking<LhsScalar,RhsScalar>& blocking)
{
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper;
typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper;
- typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper;
+ typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor, Unaligned, ResInnerStride> ResMapper;
LhsMapper lhs(_lhs,lhsStride);
RhsMapper rhs(_rhs,rhsStride);
- ResMapper res(_res, resStride);
+ ResMapper res(_res, resStride, resIncr);
Index kc = blocking.kc();
Index mc = (std::min)(size,blocking.mc());
@@ -84,10 +87,10 @@ struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
- gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
+ gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs;
gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
- tribb_kernel<LhsScalar, RhsScalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs, UpLo> sybb;
+ tribb_kernel<LhsScalar, RhsScalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs, ResInnerStride, UpLo> sybb;
for(Index k2=0; k2<depth; k2+=kc)
{
@@ -110,8 +113,7 @@ struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,
gebp(res.getSubMapper(i2, 0), blockA, blockB, actual_mc, actual_kc,
(std::min)(size,i2), alpha, -1, -1, 0, 0);
-
- sybb(_res+resStride*i2 + i2, resStride, blockA, blockB + actual_kc*i2, actual_mc, actual_kc, alpha);
+ sybb(_res+resStride*i2 + resIncr*i2, resIncr, resStride, blockA, blockB + actual_kc*i2, actual_mc, actual_kc, alpha);
if (UpLo==Upper)
{
@@ -133,7 +135,7 @@ struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,
// while the triangular block overlapping the diagonal is evaluated into a
// small temporary buffer which is then accumulated into the result using a
// triangular traversal.
-template<typename LhsScalar, typename RhsScalar, typename Index, int mr, int nr, bool ConjLhs, bool ConjRhs, int UpLo>
+template<typename LhsScalar, typename RhsScalar, typename Index, int mr, int nr, bool ConjLhs, bool ConjRhs, int ResInnerStride, int UpLo>
struct tribb_kernel
{
typedef gebp_traits<LhsScalar,RhsScalar,ConjLhs,ConjRhs> Traits;
@@ -142,11 +144,13 @@ struct tribb_kernel
enum {
BlockSize = meta_least_common_multiple<EIGEN_PLAIN_ENUM_MAX(mr,nr),EIGEN_PLAIN_ENUM_MIN(mr,nr)>::ret
};
- void operator()(ResScalar* _res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index size, Index depth, const ResScalar& alpha)
+ void operator()(ResScalar* _res, Index resIncr, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index size, Index depth, const ResScalar& alpha)
{
- typedef blas_data_mapper<ResScalar, Index, ColMajor> ResMapper;
- ResMapper res(_res, resStride);
- gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, mr, nr, ConjLhs, ConjRhs> gebp_kernel;
+ typedef blas_data_mapper<ResScalar, Index, ColMajor, Unaligned, ResInnerStride> ResMapper;
+ typedef blas_data_mapper<ResScalar, Index, ColMajor, Unaligned> BufferMapper;
+ ResMapper res(_res, resStride, resIncr);
+ gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, mr, nr, ConjLhs, ConjRhs> gebp_kernel1;
+ gebp_kernel<LhsScalar, RhsScalar, Index, BufferMapper, mr, nr, ConjLhs, ConjRhs> gebp_kernel2;
Matrix<ResScalar,BlockSize,BlockSize,ColMajor> buffer((internal::constructor_without_unaligned_array_assert()));
@@ -158,31 +162,32 @@ struct tribb_kernel
const RhsScalar* actual_b = blockB+j*depth;
if(UpLo==Upper)
- gebp_kernel(res.getSubMapper(0, j), blockA, actual_b, j, depth, actualBlockSize, alpha,
- -1, -1, 0, 0);
-
+ gebp_kernel1(res.getSubMapper(0, j), blockA, actual_b, j, depth, actualBlockSize, alpha,
+ -1, -1, 0, 0);
+
// selfadjoint micro block
{
Index i = j;
buffer.setZero();
// 1 - apply the kernel on the temporary buffer
- gebp_kernel(ResMapper(buffer.data(), BlockSize), blockA+depth*i, actual_b, actualBlockSize, depth, actualBlockSize, alpha,
- -1, -1, 0, 0);
+ gebp_kernel2(BufferMapper(buffer.data(), BlockSize), blockA+depth*i, actual_b, actualBlockSize, depth, actualBlockSize, alpha,
+ -1, -1, 0, 0);
+
// 2 - triangular accumulation
for(Index j1=0; j1<actualBlockSize; ++j1)
{
- ResScalar* r = &res(i, j + j1);
+ typename ResMapper::LinearMapper r = res.getLinearMapper(i,j+j1);
for(Index i1=UpLo==Lower ? j1 : 0;
UpLo==Lower ? i1<actualBlockSize : i1<=j1; ++i1)
- r[i1] += buffer(i1,j1);
+ r(i1) += buffer(i1,j1);
}
}
if(UpLo==Lower)
{
Index i = j+actualBlockSize;
- gebp_kernel(res.getSubMapper(i, j), blockA+depth*i, actual_b, size-i,
- depth, actualBlockSize, alpha, -1, -1, 0, 0);
+ gebp_kernel1(res.getSubMapper(i, j), blockA+depth*i, actual_b, size-i,
+ depth, actualBlockSize, alpha, -1, -1, 0, 0);
}
}
}
@@ -286,11 +291,12 @@ struct general_product_to_triangular_selector<MatrixType,ProductType,UpLo,false>
internal::general_matrix_matrix_triangular_product<Index,
typename Lhs::Scalar, LhsIsRowMajor ? RowMajor : ColMajor, LhsBlasTraits::NeedToConjugate,
typename Rhs::Scalar, RhsIsRowMajor ? RowMajor : ColMajor, RhsBlasTraits::NeedToConjugate,
- IsRowMajor ? RowMajor : ColMajor, UpLo&(Lower|Upper)>
+ IsRowMajor ? RowMajor : ColMajor, MatrixType::InnerStrideAtCompileTime, UpLo&(Lower|Upper)>
::run(size, depth,
&actualLhs.coeffRef(SkipDiag&&(UpLo&Lower)==Lower ? 1 : 0,0), actualLhs.outerStride(),
&actualRhs.coeffRef(0,SkipDiag&&(UpLo&Upper)==Upper ? 1 : 0), actualRhs.outerStride(),
- mat.data() + (SkipDiag ? (bool(IsRowMajor) != ((UpLo&Lower)==Lower) ? 1 : mat.outerStride() ) : 0), mat.outerStride(), actualAlpha, blocking);
+ mat.data() + (SkipDiag ? (bool(IsRowMajor) != ((UpLo&Lower)==Lower) ? mat.innerStride() : mat.outerStride() ) : 0),
+ mat.innerStride(), mat.outerStride(), actualAlpha, blocking);
}
};
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h b/examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h
index 9176a1382..9a650ec23 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h
@@ -37,10 +37,10 @@ namespace Eigen {
namespace internal {
-template <typename Index, typename Scalar, int AStorageOrder, bool ConjugateA, int ResStorageOrder, int UpLo>
+template <typename Index, typename Scalar, int AStorageOrder, bool ConjugateA, int ResStorageOrder, int UpLo>
struct general_matrix_matrix_rankupdate :
general_matrix_matrix_triangular_product<
- Index,Scalar,AStorageOrder,ConjugateA,Scalar,AStorageOrder,ConjugateA,ResStorageOrder,UpLo,BuiltIn> {};
+ Index,Scalar,AStorageOrder,ConjugateA,Scalar,AStorageOrder,ConjugateA,ResStorageOrder,1,UpLo,BuiltIn> {};
// try to go to BLAS specialization
@@ -48,19 +48,19 @@ struct general_matrix_matrix_rankupdate :
template <typename Index, int LhsStorageOrder, bool ConjugateLhs, \
int RhsStorageOrder, bool ConjugateRhs, int UpLo> \
struct general_matrix_matrix_triangular_product<Index,Scalar,LhsStorageOrder,ConjugateLhs, \
- Scalar,RhsStorageOrder,ConjugateRhs,ColMajor,UpLo,Specialized> { \
+ Scalar,RhsStorageOrder,ConjugateRhs,ColMajor,1,UpLo,Specialized> { \
static EIGEN_STRONG_INLINE void run(Index size, Index depth,const Scalar* lhs, Index lhsStride, \
- const Scalar* rhs, Index rhsStride, Scalar* res, Index resStride, Scalar alpha, level3_blocking<Scalar, Scalar>& blocking) \
+ const Scalar* rhs, Index rhsStride, Scalar* res, Index resIncr, Index resStride, Scalar alpha, level3_blocking<Scalar, Scalar>& blocking) \
{ \
- if ( lhs==rhs && ((UpLo&(Lower|Upper)==UpLo)) ) { \
+ if ( lhs==rhs && ((UpLo&(Lower|Upper))==UpLo) ) { \
general_matrix_matrix_rankupdate<Index,Scalar,LhsStorageOrder,ConjugateLhs,ColMajor,UpLo> \
::run(size,depth,lhs,lhsStride,rhs,rhsStride,res,resStride,alpha,blocking); \
} else { \
general_matrix_matrix_triangular_product<Index, \
Scalar, LhsStorageOrder, ConjugateLhs, \
Scalar, RhsStorageOrder, ConjugateRhs, \
- ColMajor, UpLo, BuiltIn> \
- ::run(size,depth,lhs,lhsStride,rhs,rhsStride,res,resStride,alpha,blocking); \
+ ColMajor, 1, UpLo, BuiltIn> \
+ ::run(size,depth,lhs,lhsStride,rhs,rhsStride,res,resIncr,resStride,alpha,blocking); \
} \
} \
};
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h b/examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h
index b0f6b0d5b..71abf4013 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h
@@ -51,20 +51,22 @@ template< \
typename Index, \
int LhsStorageOrder, bool ConjugateLhs, \
int RhsStorageOrder, bool ConjugateRhs> \
-struct general_matrix_matrix_product<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,RhsStorageOrder,ConjugateRhs,ColMajor> \
+struct general_matrix_matrix_product<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,RhsStorageOrder,ConjugateRhs,ColMajor,1> \
{ \
typedef gebp_traits<EIGTYPE,EIGTYPE> Traits; \
\
static void run(Index rows, Index cols, Index depth, \
const EIGTYPE* _lhs, Index lhsStride, \
const EIGTYPE* _rhs, Index rhsStride, \
- EIGTYPE* res, Index resStride, \
+ EIGTYPE* res, Index resIncr, Index resStride, \
EIGTYPE alpha, \
level3_blocking<EIGTYPE, EIGTYPE>& /*blocking*/, \
GemmParallelInfo<Index>* /*info = 0*/) \
{ \
using std::conj; \
\
+ EIGEN_ONLY_USED_FOR_DEBUG(resIncr); \
+ eigen_assert(resIncr == 1); \
char transa, transb; \
BlasIndex m, n, k, lda, ldb, ldc; \
const EIGTYPE *a, *b; \
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralMatrixVector.h b/examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralMatrixVector.h
index 41d8242e1..dfb6aebce 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralMatrixVector.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/products/GeneralMatrixVector.h
@@ -14,6 +14,54 @@ namespace Eigen {
namespace internal {
+enum GEMVPacketSizeType {
+ GEMVPacketFull = 0,
+ GEMVPacketHalf,
+ GEMVPacketQuarter
+};
+
+template <int N, typename T1, typename T2, typename T3>
+struct gemv_packet_cond { typedef T3 type; };
+
+template <typename T1, typename T2, typename T3>
+struct gemv_packet_cond<GEMVPacketFull, T1, T2, T3> { typedef T1 type; };
+
+template <typename T1, typename T2, typename T3>
+struct gemv_packet_cond<GEMVPacketHalf, T1, T2, T3> { typedef T2 type; };
+
+template<typename LhsScalar, typename RhsScalar, int _PacketSize=GEMVPacketFull>
+class gemv_traits
+{
+ typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
+
+#define PACKET_DECL_COND_PREFIX(prefix, name, packet_size) \
+ typedef typename gemv_packet_cond<packet_size, \
+ typename packet_traits<name ## Scalar>::type, \
+ typename packet_traits<name ## Scalar>::half, \
+ typename unpacket_traits<typename packet_traits<name ## Scalar>::half>::half>::type \
+ prefix ## name ## Packet
+
+ PACKET_DECL_COND_PREFIX(_, Lhs, _PacketSize);
+ PACKET_DECL_COND_PREFIX(_, Rhs, _PacketSize);
+ PACKET_DECL_COND_PREFIX(_, Res, _PacketSize);
+#undef PACKET_DECL_COND_PREFIX
+
+public:
+ enum {
+ Vectorizable = unpacket_traits<_LhsPacket>::vectorizable &&
+ unpacket_traits<_RhsPacket>::vectorizable &&
+ int(unpacket_traits<_LhsPacket>::size)==int(unpacket_traits<_RhsPacket>::size),
+ LhsPacketSize = Vectorizable ? unpacket_traits<_LhsPacket>::size : 1,
+ RhsPacketSize = Vectorizable ? unpacket_traits<_RhsPacket>::size : 1,
+ ResPacketSize = Vectorizable ? unpacket_traits<_ResPacket>::size : 1
+ };
+
+ typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
+ typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
+ typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
+};
+
+
/* Optimized col-major matrix * vector product:
* This algorithm processes the matrix per vertical panels,
* which are then processed horizontaly per chunck of 8*PacketSize x 1 vertical segments.
@@ -30,25 +78,25 @@ namespace internal {
template<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version>
struct general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>
{
+ typedef gemv_traits<LhsScalar,RhsScalar> Traits;
+ typedef gemv_traits<LhsScalar,RhsScalar,GEMVPacketHalf> HalfTraits;
+ typedef gemv_traits<LhsScalar,RhsScalar,GEMVPacketQuarter> QuarterTraits;
+
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
-enum {
- Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable
- && int(packet_traits<LhsScalar>::size)==int(packet_traits<RhsScalar>::size),
- LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
- RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
- ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1
-};
+ typedef typename Traits::LhsPacket LhsPacket;
+ typedef typename Traits::RhsPacket RhsPacket;
+ typedef typename Traits::ResPacket ResPacket;
-typedef typename packet_traits<LhsScalar>::type _LhsPacket;
-typedef typename packet_traits<RhsScalar>::type _RhsPacket;
-typedef typename packet_traits<ResScalar>::type _ResPacket;
+ typedef typename HalfTraits::LhsPacket LhsPacketHalf;
+ typedef typename HalfTraits::RhsPacket RhsPacketHalf;
+ typedef typename HalfTraits::ResPacket ResPacketHalf;
-typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
-typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
-typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
+ typedef typename QuarterTraits::LhsPacket LhsPacketQuarter;
+ typedef typename QuarterTraits::RhsPacket RhsPacketQuarter;
+ typedef typename QuarterTraits::ResPacket ResPacketQuarter;
-EIGEN_DONT_INLINE static void run(
+EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE static void run(
Index rows, Index cols,
const LhsMapper& lhs,
const RhsMapper& rhs,
@@ -57,7 +105,7 @@ EIGEN_DONT_INLINE static void run(
};
template<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version>
-EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>::run(
+EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>::run(
Index rows, Index cols,
const LhsMapper& alhs,
const RhsMapper& rhs,
@@ -73,19 +121,33 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,C
conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj;
+ conj_helper<LhsPacketHalf,RhsPacketHalf,ConjugateLhs,ConjugateRhs> pcj_half;
+ conj_helper<LhsPacketQuarter,RhsPacketQuarter,ConjugateLhs,ConjugateRhs> pcj_quarter;
+
const Index lhsStride = lhs.stride();
// TODO: for padded aligned inputs, we could enable aligned reads
- enum { LhsAlignment = Unaligned };
+ enum { LhsAlignment = Unaligned,
+ ResPacketSize = Traits::ResPacketSize,
+ ResPacketSizeHalf = HalfTraits::ResPacketSize,
+ ResPacketSizeQuarter = QuarterTraits::ResPacketSize,
+ LhsPacketSize = Traits::LhsPacketSize,
+ HasHalf = (int)ResPacketSizeHalf < (int)ResPacketSize,
+ HasQuarter = (int)ResPacketSizeQuarter < (int)ResPacketSizeHalf
+ };
const Index n8 = rows-8*ResPacketSize+1;
const Index n4 = rows-4*ResPacketSize+1;
const Index n3 = rows-3*ResPacketSize+1;
const Index n2 = rows-2*ResPacketSize+1;
const Index n1 = rows-1*ResPacketSize+1;
+ const Index n_half = rows-1*ResPacketSizeHalf+1;
+ const Index n_quarter = rows-1*ResPacketSizeQuarter+1;
// TODO: improve the following heuristic:
const Index block_cols = cols<128 ? cols : (lhsStride*sizeof(LhsScalar)<32000?16:4);
ResPacket palpha = pset1<ResPacket>(alpha);
+ ResPacketHalf palpha_half = pset1<ResPacketHalf>(alpha);
+ ResPacketQuarter palpha_quarter = pset1<ResPacketQuarter>(alpha);
for(Index j2=0; j2<cols; j2+=block_cols)
{
@@ -190,6 +252,28 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,C
pstoreu(res+i+ResPacketSize*0, pmadd(c0,palpha,ploadu<ResPacket>(res+i+ResPacketSize*0)));
i+=ResPacketSize;
}
+ if(HasHalf && i<n_half)
+ {
+ ResPacketHalf c0 = pset1<ResPacketHalf>(ResScalar(0));
+ for(Index j=j2; j<jend; j+=1)
+ {
+ RhsPacketHalf b0 = pset1<RhsPacketHalf>(rhs(j,0));
+ c0 = pcj_half.pmadd(lhs.template load<LhsPacketHalf,LhsAlignment>(i+0,j),b0,c0);
+ }
+ pstoreu(res+i+ResPacketSizeHalf*0, pmadd(c0,palpha_half,ploadu<ResPacketHalf>(res+i+ResPacketSizeHalf*0)));
+ i+=ResPacketSizeHalf;
+ }
+ if(HasQuarter && i<n_quarter)
+ {
+ ResPacketQuarter c0 = pset1<ResPacketQuarter>(ResScalar(0));
+ for(Index j=j2; j<jend; j+=1)
+ {
+ RhsPacketQuarter b0 = pset1<RhsPacketQuarter>(rhs(j,0));
+ c0 = pcj_quarter.pmadd(lhs.template load<LhsPacketQuarter,LhsAlignment>(i+0,j),b0,c0);
+ }
+ pstoreu(res+i+ResPacketSizeQuarter*0, pmadd(c0,palpha_quarter,ploadu<ResPacketQuarter>(res+i+ResPacketSizeQuarter*0)));
+ i+=ResPacketSizeQuarter;
+ }
for(;i<rows;++i)
{
ResScalar c0(0);
@@ -201,7 +285,7 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,C
}
/* Optimized row-major matrix * vector product:
- * This algorithm processes 4 rows at onces that allows to both reduce
+ * This algorithm processes 4 rows at once that allows to both reduce
* the number of load/stores of the result by a factor 4 and to reduce
* the instruction dependency. Moreover, we know that all bands have the
* same alignment pattern.
@@ -213,25 +297,25 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,C
template<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version>
struct general_matrix_vector_product<Index,LhsScalar,LhsMapper,RowMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>
{
-typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
-
-enum {
- Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable
- && int(packet_traits<LhsScalar>::size)==int(packet_traits<RhsScalar>::size),
- LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
- RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
- ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1
-};
+ typedef gemv_traits<LhsScalar,RhsScalar> Traits;
+ typedef gemv_traits<LhsScalar,RhsScalar,GEMVPacketHalf> HalfTraits;
+ typedef gemv_traits<LhsScalar,RhsScalar,GEMVPacketQuarter> QuarterTraits;
+
+ typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
-typedef typename packet_traits<LhsScalar>::type _LhsPacket;
-typedef typename packet_traits<RhsScalar>::type _RhsPacket;
-typedef typename packet_traits<ResScalar>::type _ResPacket;
+ typedef typename Traits::LhsPacket LhsPacket;
+ typedef typename Traits::RhsPacket RhsPacket;
+ typedef typename Traits::ResPacket ResPacket;
-typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
-typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
-typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
+ typedef typename HalfTraits::LhsPacket LhsPacketHalf;
+ typedef typename HalfTraits::RhsPacket RhsPacketHalf;
+ typedef typename HalfTraits::ResPacket ResPacketHalf;
-EIGEN_DONT_INLINE static void run(
+ typedef typename QuarterTraits::LhsPacket LhsPacketQuarter;
+ typedef typename QuarterTraits::RhsPacket RhsPacketQuarter;
+ typedef typename QuarterTraits::ResPacket ResPacketQuarter;
+
+EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE static void run(
Index rows, Index cols,
const LhsMapper& lhs,
const RhsMapper& rhs,
@@ -240,7 +324,7 @@ EIGEN_DONT_INLINE static void run(
};
template<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version>
-EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,RowMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>::run(
+EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,RowMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>::run(
Index rows, Index cols,
const LhsMapper& alhs,
const RhsMapper& rhs,
@@ -254,6 +338,8 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,R
eigen_internal_assert(rhs.stride()==1);
conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj;
+ conj_helper<LhsPacketHalf,RhsPacketHalf,ConjugateLhs,ConjugateRhs> pcj_half;
+ conj_helper<LhsPacketQuarter,RhsPacketQuarter,ConjugateLhs,ConjugateRhs> pcj_quarter;
// TODO: fine tune the following heuristic. The rationale is that if the matrix is very large,
// processing 8 rows at once might be counter productive wrt cache.
@@ -262,7 +348,16 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,R
const Index n2 = rows-1;
// TODO: for padded aligned inputs, we could enable aligned reads
- enum { LhsAlignment = Unaligned };
+ enum { LhsAlignment = Unaligned,
+ ResPacketSize = Traits::ResPacketSize,
+ ResPacketSizeHalf = HalfTraits::ResPacketSize,
+ ResPacketSizeQuarter = QuarterTraits::ResPacketSize,
+ LhsPacketSize = Traits::LhsPacketSize,
+ LhsPacketSizeHalf = HalfTraits::LhsPacketSize,
+ LhsPacketSizeQuarter = QuarterTraits::LhsPacketSize,
+ HasHalf = (int)ResPacketSizeHalf < (int)ResPacketSize,
+ HasQuarter = (int)ResPacketSizeQuarter < (int)ResPacketSizeHalf
+ };
Index i=0;
for(; i<n8; i+=8)
@@ -383,6 +478,8 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,R
for(; i<rows; ++i)
{
ResPacket c0 = pset1<ResPacket>(ResScalar(0));
+ ResPacketHalf c0_h = pset1<ResPacketHalf>(ResScalar(0));
+ ResPacketQuarter c0_q = pset1<ResPacketQuarter>(ResScalar(0));
Index j=0;
for(; j+LhsPacketSize<=cols; j+=LhsPacketSize)
{
@@ -390,6 +487,22 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,R
c0 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i,j),b0,c0);
}
ResScalar cc0 = predux(c0);
+ if (HasHalf) {
+ for(; j+LhsPacketSizeHalf<=cols; j+=LhsPacketSizeHalf)
+ {
+ RhsPacketHalf b0 = rhs.template load<RhsPacketHalf,Unaligned>(j,0);
+ c0_h = pcj_half.pmadd(lhs.template load<LhsPacketHalf,LhsAlignment>(i,j),b0,c0_h);
+ }
+ cc0 += predux(c0_h);
+ }
+ if (HasQuarter) {
+ for(; j+LhsPacketSizeQuarter<=cols; j+=LhsPacketSizeQuarter)
+ {
+ RhsPacketQuarter b0 = rhs.template load<RhsPacketQuarter,Unaligned>(j,0);
+ c0_q = pcj_quarter.pmadd(lhs.template load<LhsPacketQuarter,LhsAlignment>(i,j),b0,c0_q);
+ }
+ cc0 += predux(c0_q);
+ }
for(; j<cols; ++j)
{
cc0 += cj.pmul(lhs(i,j), rhs(j,0));
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/products/Parallelizer.h b/examples/ThirdPartyLibs/Eigen/src/Core/products/Parallelizer.h
index c2f084c82..8f91879e4 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/products/Parallelizer.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/products/Parallelizer.h
@@ -10,6 +10,10 @@
#ifndef EIGEN_PARALLELIZER_H
#define EIGEN_PARALLELIZER_H
+#if EIGEN_HAS_CXX11_ATOMIC
+#include <atomic>
+#endif
+
namespace Eigen {
namespace internal {
@@ -17,7 +21,8 @@ namespace internal {
/** \internal */
inline void manage_multi_threading(Action action, int* v)
{
- static EIGEN_UNUSED int m_maxThreads = -1;
+ static int m_maxThreads = -1;
+ EIGEN_UNUSED_VARIABLE(m_maxThreads)
if(action==SetAction)
{
@@ -75,8 +80,17 @@ template<typename Index> struct GemmParallelInfo
{
GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {}
+ // volatile is not enough on all architectures (see bug 1572)
+ // to guarantee that when thread A says to thread B that it is
+ // done with packing a block, then all writes have been really
+ // carried out... C++11 memory model+atomic guarantees this.
+#if EIGEN_HAS_CXX11_ATOMIC
+ std::atomic<Index> sync;
+ std::atomic<int> users;
+#else
Index volatile sync;
int volatile users;
+#endif
Index lhs_start;
Index lhs_length;
@@ -87,11 +101,14 @@ void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth,
{
// TODO when EIGEN_USE_BLAS is defined,
// we should still enable OMP for other scalar types
-#if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS)
+ // Without C++11, we have to disable GEMM's parallelization on
+ // non x86 architectures because there volatile is not enough for our purpose.
+ // See bug 1572.
+#if (! defined(EIGEN_HAS_OPENMP)) || defined(EIGEN_USE_BLAS) || ((!EIGEN_HAS_CXX11_ATOMIC) && !(EIGEN_ARCH_i386_OR_x86_64))
// FIXME the transpose variable is only needed to properly split
// the matrix product when multithreading is enabled. This is a temporary
// fix to support row-major destination matrices. This whole
- // parallelizer mechanism has to be redisigned anyway.
+ // parallelizer mechanism has to be redesigned anyway.
EIGEN_UNUSED_VARIABLE(depth);
EIGEN_UNUSED_VARIABLE(transpose);
func(0,rows, 0,cols);
@@ -112,12 +129,12 @@ void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth,
double work = static_cast<double>(rows) * static_cast<double>(cols) *
static_cast<double>(depth);
double kMinTaskSize = 50000; // FIXME improve this heuristic.
- pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, work / kMinTaskSize));
+ pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, static_cast<Index>( work / kMinTaskSize ) ));
// compute the number of threads we are going to use
Index threads = std::min<Index>(nbThreads(), pb_max_threads);
- // if multi-threading is explicitely disabled, not useful, or if we already are in a parallel session,
+ // if multi-threading is explicitly disabled, not useful, or if we already are in a parallel session,
// then abort multi-threading
// FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp?
if((!Condition) || (threads==1) || (omp_get_num_threads()>1))
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/products/SelfadjointMatrixMatrix.h b/examples/ThirdPartyLibs/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
index da6f82abc..33ecf10f6 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
@@ -45,14 +45,23 @@ struct symm_pack_lhs
}
void operator()(Scalar* blockA, const Scalar* _lhs, Index lhsStride, Index cols, Index rows)
{
- enum { PacketSize = packet_traits<Scalar>::size };
+ typedef typename unpacket_traits<typename packet_traits<Scalar>::type>::half HalfPacket;
+ typedef typename unpacket_traits<typename unpacket_traits<typename packet_traits<Scalar>::type>::half>::half QuarterPacket;
+ enum { PacketSize = packet_traits<Scalar>::size,
+ HalfPacketSize = unpacket_traits<HalfPacket>::size,
+ QuarterPacketSize = unpacket_traits<QuarterPacket>::size,
+ HasHalf = (int)HalfPacketSize < (int)PacketSize,
+ HasQuarter = (int)QuarterPacketSize < (int)HalfPacketSize};
+
const_blas_data_mapper<Scalar,Index,StorageOrder> lhs(_lhs,lhsStride);
Index count = 0;
//Index peeled_mc3 = (rows/Pack1)*Pack1;
const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0;
const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0;
- const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0;
+ const Index peeled_mc1 = Pack1>=1*PacketSize ? peeled_mc2+((rows-peeled_mc2)/(1*PacketSize))*(1*PacketSize) : 0;
+ const Index peeled_mc_half = Pack1>=HalfPacketSize ? peeled_mc1+((rows-peeled_mc1)/(HalfPacketSize))*(HalfPacketSize) : 0;
+ const Index peeled_mc_quarter = Pack1>=QuarterPacketSize ? peeled_mc_half+((rows-peeled_mc_half)/(QuarterPacketSize))*(QuarterPacketSize) : 0;
if(Pack1>=3*PacketSize)
for(Index i=0; i<peeled_mc3; i+=3*PacketSize)
@@ -66,8 +75,16 @@ struct symm_pack_lhs
for(Index i=peeled_mc2; i<peeled_mc1; i+=1*PacketSize)
pack<1*PacketSize>(blockA, lhs, cols, i, count);
+ if(HasHalf && Pack1>=HalfPacketSize)
+ for(Index i=peeled_mc1; i<peeled_mc_half; i+=HalfPacketSize)
+ pack<HalfPacketSize>(blockA, lhs, cols, i, count);
+
+ if(HasQuarter && Pack1>=QuarterPacketSize)
+ for(Index i=peeled_mc_half; i<peeled_mc_quarter; i+=QuarterPacketSize)
+ pack<QuarterPacketSize>(blockA, lhs, cols, i, count);
+
// do the same with mr==1
- for(Index i=peeled_mc1; i<rows; i++)
+ for(Index i=peeled_mc_quarter; i<rows; i++)
{
for(Index k=0; k<i; k++)
blockA[count++] = lhs(i, k); // normal
@@ -277,20 +294,21 @@ struct symm_pack_rhs
template <typename Scalar, typename Index,
int LhsStorageOrder, bool LhsSelfAdjoint, bool ConjugateLhs,
int RhsStorageOrder, bool RhsSelfAdjoint, bool ConjugateRhs,
- int ResStorageOrder>
+ int ResStorageOrder, int ResInnerStride>
struct product_selfadjoint_matrix;
template <typename Scalar, typename Index,
int LhsStorageOrder, bool LhsSelfAdjoint, bool ConjugateLhs,
- int RhsStorageOrder, bool RhsSelfAdjoint, bool ConjugateRhs>
-struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,LhsSelfAdjoint,ConjugateLhs, RhsStorageOrder,RhsSelfAdjoint,ConjugateRhs,RowMajor>
+ int RhsStorageOrder, bool RhsSelfAdjoint, bool ConjugateRhs,
+ int ResInnerStride>
+struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,LhsSelfAdjoint,ConjugateLhs, RhsStorageOrder,RhsSelfAdjoint,ConjugateRhs,RowMajor,ResInnerStride>
{
static EIGEN_STRONG_INLINE void run(
Index rows, Index cols,
const Scalar* lhs, Index lhsStride,
const Scalar* rhs, Index rhsStride,
- Scalar* res, Index resStride,
+ Scalar* res, Index resIncr, Index resStride,
const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
{
product_selfadjoint_matrix<Scalar, Index,
@@ -298,33 +316,35 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,LhsSelfAdjoint,Co
RhsSelfAdjoint, NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(RhsSelfAdjoint,ConjugateRhs),
EIGEN_LOGICAL_XOR(LhsSelfAdjoint,LhsStorageOrder==RowMajor) ? ColMajor : RowMajor,
LhsSelfAdjoint, NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(LhsSelfAdjoint,ConjugateLhs),
- ColMajor>
- ::run(cols, rows, rhs, rhsStride, lhs, lhsStride, res, resStride, alpha, blocking);
+ ColMajor,ResInnerStride>
+ ::run(cols, rows, rhs, rhsStride, lhs, lhsStride, res, resIncr, resStride, alpha, blocking);
}
};
template <typename Scalar, typename Index,
int LhsStorageOrder, bool ConjugateLhs,
- int RhsStorageOrder, bool ConjugateRhs>
-struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs, RhsStorageOrder,false,ConjugateRhs,ColMajor>
+ int RhsStorageOrder, bool ConjugateRhs,
+ int ResInnerStride>
+struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs, RhsStorageOrder,false,ConjugateRhs,ColMajor,ResInnerStride>
{
static EIGEN_DONT_INLINE void run(
Index rows, Index cols,
const Scalar* _lhs, Index lhsStride,
const Scalar* _rhs, Index rhsStride,
- Scalar* res, Index resStride,
+ Scalar* res, Index resIncr, Index resStride,
const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking);
};
template <typename Scalar, typename Index,
int LhsStorageOrder, bool ConjugateLhs,
- int RhsStorageOrder, bool ConjugateRhs>
-EIGEN_DONT_INLINE void product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs, RhsStorageOrder,false,ConjugateRhs,ColMajor>::run(
+ int RhsStorageOrder, bool ConjugateRhs,
+ int ResInnerStride>
+EIGEN_DONT_INLINE void product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs, RhsStorageOrder,false,ConjugateRhs,ColMajor,ResInnerStride>::run(
Index rows, Index cols,
const Scalar* _lhs, Index lhsStride,
const Scalar* _rhs, Index rhsStride,
- Scalar* _res, Index resStride,
+ Scalar* _res, Index resIncr, Index resStride,
const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
{
Index size = rows;
@@ -334,11 +354,11 @@ EIGEN_DONT_INLINE void product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,t
typedef const_blas_data_mapper<Scalar, Index, LhsStorageOrder> LhsMapper;
typedef const_blas_data_mapper<Scalar, Index, (LhsStorageOrder == RowMajor) ? ColMajor : RowMajor> LhsTransposeMapper;
typedef const_blas_data_mapper<Scalar, Index, RhsStorageOrder> RhsMapper;
- typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper;
+ typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor, Unaligned, ResInnerStride> ResMapper;
LhsMapper lhs(_lhs,lhsStride);
LhsTransposeMapper lhs_transpose(_lhs,lhsStride);
RhsMapper rhs(_rhs,rhsStride);
- ResMapper res(_res, resStride);
+ ResMapper res(_res, resStride, resIncr);
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
@@ -352,7 +372,7 @@ EIGEN_DONT_INLINE void product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,t
gebp_kernel<Scalar, Scalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
symm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr,RhsStorageOrder> pack_rhs;
- gemm_pack_lhs<Scalar, Index, LhsTransposeMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder==RowMajor?ColMajor:RowMajor, true> pack_lhs_transposed;
+ gemm_pack_lhs<Scalar, Index, LhsTransposeMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, LhsStorageOrder==RowMajor?ColMajor:RowMajor, true> pack_lhs_transposed;
for(Index k2=0; k2<size; k2+=kc)
{
@@ -387,7 +407,7 @@ EIGEN_DONT_INLINE void product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,t
for(Index i2=k2+kc; i2<size; i2+=mc)
{
const Index actual_mc = (std::min)(i2+mc,size)-i2;
- gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder,false>()
+ gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, LhsStorageOrder,false>()
(blockA, lhs.getSubMapper(i2, k2), actual_kc, actual_mc);
gebp_kernel(res.getSubMapper(i2, 0), blockA, blockB, actual_mc, actual_kc, cols, alpha);
@@ -398,26 +418,28 @@ EIGEN_DONT_INLINE void product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,t
// matrix * selfadjoint product
template <typename Scalar, typename Index,
int LhsStorageOrder, bool ConjugateLhs,
- int RhsStorageOrder, bool ConjugateRhs>
-struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,false,ConjugateLhs, RhsStorageOrder,true,ConjugateRhs,ColMajor>
+ int RhsStorageOrder, bool ConjugateRhs,
+ int ResInnerStride>
+struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,false,ConjugateLhs, RhsStorageOrder,true,ConjugateRhs,ColMajor,ResInnerStride>
{
static EIGEN_DONT_INLINE void run(
Index rows, Index cols,
const Scalar* _lhs, Index lhsStride,
const Scalar* _rhs, Index rhsStride,
- Scalar* res, Index resStride,
+ Scalar* res, Index resIncr, Index resStride,
const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking);
};
template <typename Scalar, typename Index,
int LhsStorageOrder, bool ConjugateLhs,
- int RhsStorageOrder, bool ConjugateRhs>
-EIGEN_DONT_INLINE void product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,false,ConjugateLhs, RhsStorageOrder,true,ConjugateRhs,ColMajor>::run(
+ int RhsStorageOrder, bool ConjugateRhs,
+ int ResInnerStride>
+EIGEN_DONT_INLINE void product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,false,ConjugateLhs, RhsStorageOrder,true,ConjugateRhs,ColMajor,ResInnerStride>::run(
Index rows, Index cols,
const Scalar* _lhs, Index lhsStride,
const Scalar* _rhs, Index rhsStride,
- Scalar* _res, Index resStride,
+ Scalar* _res, Index resIncr, Index resStride,
const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
{
Index size = cols;
@@ -425,9 +447,9 @@ EIGEN_DONT_INLINE void product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,f
typedef gebp_traits<Scalar,Scalar> Traits;
typedef const_blas_data_mapper<Scalar, Index, LhsStorageOrder> LhsMapper;
- typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper;
+ typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor, Unaligned, ResInnerStride> ResMapper;
LhsMapper lhs(_lhs,lhsStride);
- ResMapper res(_res,resStride);
+ ResMapper res(_res,resStride, resIncr);
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
@@ -437,7 +459,7 @@ EIGEN_DONT_INLINE void product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,f
ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
gebp_kernel<Scalar, Scalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
- gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
+ gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, LhsStorageOrder> pack_lhs;
symm_pack_rhs<Scalar, Index, Traits::nr,RhsStorageOrder> pack_rhs;
for(Index k2=0; k2<size; k2+=kc)
@@ -503,12 +525,13 @@ struct selfadjoint_product_impl<Lhs,LhsMode,false,Rhs,RhsMode,false>
NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(LhsIsUpper,bool(LhsBlasTraits::NeedToConjugate)),
EIGEN_LOGICAL_XOR(RhsIsUpper,internal::traits<Rhs>::Flags &RowMajorBit) ? RowMajor : ColMajor, RhsIsSelfAdjoint,
NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(RhsIsUpper,bool(RhsBlasTraits::NeedToConjugate)),
- internal::traits<Dest>::Flags&RowMajorBit ? RowMajor : ColMajor>
+ internal::traits<Dest>::Flags&RowMajorBit ? RowMajor : ColMajor,
+ Dest::InnerStrideAtCompileTime>
::run(
lhs.rows(), rhs.cols(), // sizes
&lhs.coeffRef(0,0), lhs.outerStride(), // lhs info
&rhs.coeffRef(0,0), rhs.outerStride(), // rhs info
- &dst.coeffRef(0,0), dst.outerStride(), // result info
+ &dst.coeffRef(0,0), dst.innerStride(), dst.outerStride(), // result info
actualAlpha, blocking // alpha
);
}
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h b/examples/ThirdPartyLibs/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h
index 9a5318507..61396dbdf 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h
@@ -44,16 +44,18 @@ namespace internal {
template <typename Index, \
int LhsStorageOrder, bool ConjugateLhs, \
int RhsStorageOrder, bool ConjugateRhs> \
-struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,true,ConjugateLhs,RhsStorageOrder,false,ConjugateRhs,ColMajor> \
+struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,true,ConjugateLhs,RhsStorageOrder,false,ConjugateRhs,ColMajor,1> \
{\
\
static void run( \
Index rows, Index cols, \
const EIGTYPE* _lhs, Index lhsStride, \
const EIGTYPE* _rhs, Index rhsStride, \
- EIGTYPE* res, Index resStride, \
+ EIGTYPE* res, Index resIncr, Index resStride, \
EIGTYPE alpha, level3_blocking<EIGTYPE, EIGTYPE>& /*blocking*/) \
{ \
+ EIGEN_ONLY_USED_FOR_DEBUG(resIncr); \
+ eigen_assert(resIncr == 1); \
char side='L', uplo='L'; \
BlasIndex m, n, lda, ldb, ldc; \
const EIGTYPE *a, *b; \
@@ -91,15 +93,17 @@ struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,true,ConjugateLh
template <typename Index, \
int LhsStorageOrder, bool ConjugateLhs, \
int RhsStorageOrder, bool ConjugateRhs> \
-struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,true,ConjugateLhs,RhsStorageOrder,false,ConjugateRhs,ColMajor> \
+struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,true,ConjugateLhs,RhsStorageOrder,false,ConjugateRhs,ColMajor,1> \
{\
static void run( \
Index rows, Index cols, \
const EIGTYPE* _lhs, Index lhsStride, \
const EIGTYPE* _rhs, Index rhsStride, \
- EIGTYPE* res, Index resStride, \
+ EIGTYPE* res, Index resIncr, Index resStride, \
EIGTYPE alpha, level3_blocking<EIGTYPE, EIGTYPE>& /*blocking*/) \
{ \
+ EIGEN_ONLY_USED_FOR_DEBUG(resIncr); \
+ eigen_assert(resIncr == 1); \
char side='L', uplo='L'; \
BlasIndex m, n, lda, ldb, ldc; \
const EIGTYPE *a, *b; \
@@ -167,16 +171,18 @@ EIGEN_BLAS_HEMM_L(scomplex, float, cf, chemm_)
template <typename Index, \
int LhsStorageOrder, bool ConjugateLhs, \
int RhsStorageOrder, bool ConjugateRhs> \
-struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,false,ConjugateLhs,RhsStorageOrder,true,ConjugateRhs,ColMajor> \
+struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,false,ConjugateLhs,RhsStorageOrder,true,ConjugateRhs,ColMajor,1> \
{\
\
static void run( \
Index rows, Index cols, \
const EIGTYPE* _lhs, Index lhsStride, \
const EIGTYPE* _rhs, Index rhsStride, \
- EIGTYPE* res, Index resStride, \
+ EIGTYPE* res, Index resIncr, Index resStride, \
EIGTYPE alpha, level3_blocking<EIGTYPE, EIGTYPE>& /*blocking*/) \
{ \
+ EIGEN_ONLY_USED_FOR_DEBUG(resIncr); \
+ eigen_assert(resIncr == 1); \
char side='R', uplo='L'; \
BlasIndex m, n, lda, ldb, ldc; \
const EIGTYPE *a, *b; \
@@ -213,15 +219,17 @@ struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,false,ConjugateL
template <typename Index, \
int LhsStorageOrder, bool ConjugateLhs, \
int RhsStorageOrder, bool ConjugateRhs> \
-struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,false,ConjugateLhs,RhsStorageOrder,true,ConjugateRhs,ColMajor> \
+struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,false,ConjugateLhs,RhsStorageOrder,true,ConjugateRhs,ColMajor,1> \
{\
static void run( \
Index rows, Index cols, \
const EIGTYPE* _lhs, Index lhsStride, \
const EIGTYPE* _rhs, Index rhsStride, \
- EIGTYPE* res, Index resStride, \
+ EIGTYPE* res, Index resIncr, Index resStride, \
EIGTYPE alpha, level3_blocking<EIGTYPE, EIGTYPE>& /*blocking*/) \
{ \
+ EIGEN_ONLY_USED_FOR_DEBUG(resIncr); \
+ eigen_assert(resIncr == 1); \
char side='R', uplo='L'; \
BlasIndex m, n, lda, ldb, ldc; \
const EIGTYPE *a, *b; \
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/products/SelfadjointMatrixVector.h b/examples/ThirdPartyLibs/Eigen/src/Core/products/SelfadjointMatrixVector.h
index 3fd180e6c..d38fd72b2 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/products/SelfadjointMatrixVector.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/products/SelfadjointMatrixVector.h
@@ -15,7 +15,7 @@ namespace Eigen {
namespace internal {
/* Optimized selfadjoint matrix * vector product:
- * This algorithm processes 2 columns at onces that allows to both reduce
+ * This algorithm processes 2 columns at once that allows to both reduce
* the number of load/stores of the result by a factor 2 and to reduce
* the instruction dependency.
*/
@@ -27,7 +27,8 @@ template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool Conju
struct selfadjoint_matrix_vector_product
{
-static EIGEN_DONT_INLINE void run(
+static EIGEN_DONT_INLINE EIGEN_DEVICE_FUNC
+void run(
Index size,
const Scalar* lhs, Index lhsStride,
const Scalar* rhs,
@@ -36,7 +37,8 @@ static EIGEN_DONT_INLINE void run(
};
template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs, int Version>
-EIGEN_DONT_INLINE void selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,Version>::run(
+EIGEN_DONT_INLINE EIGEN_DEVICE_FUNC
+void selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,Version>::run(
Index size,
const Scalar* lhs, Index lhsStride,
const Scalar* rhs,
@@ -62,8 +64,7 @@ EIGEN_DONT_INLINE void selfadjoint_matrix_vector_product<Scalar,Index,StorageOrd
Scalar cjAlpha = ConjugateRhs ? numext::conj(alpha) : alpha;
-
- Index bound = (std::max)(Index(0),size-8) & 0xfffffffe;
+ Index bound = numext::maxi(Index(0), size-8) & 0xfffffffe;
if (FirstTriangular)
bound = size - bound;
@@ -175,7 +176,8 @@ struct selfadjoint_product_impl<Lhs,LhsMode,false,Rhs,0,true>
enum { LhsUpLo = LhsMode&(Upper|Lower) };
template<typename Dest>
- static void run(Dest& dest, const Lhs &a_lhs, const Rhs &a_rhs, const Scalar& alpha)
+ static EIGEN_DEVICE_FUNC
+ void run(Dest& dest, const Lhs &a_lhs, const Rhs &a_rhs, const Scalar& alpha)
{
typedef typename Dest::Scalar ResScalar;
typedef typename Rhs::Scalar RhsScalar;
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/products/SelfadjointProduct.h b/examples/ThirdPartyLibs/Eigen/src/Core/products/SelfadjointProduct.h
index 39c5b59ff..a21be8050 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/products/SelfadjointProduct.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/products/SelfadjointProduct.h
@@ -109,10 +109,10 @@ struct selfadjoint_product_selector<MatrixType,OtherType,UpLo,false>
internal::general_matrix_matrix_triangular_product<Index,
Scalar, OtherIsRowMajor ? RowMajor : ColMajor, OtherBlasTraits::NeedToConjugate && NumTraits<Scalar>::IsComplex,
Scalar, OtherIsRowMajor ? ColMajor : RowMajor, (!OtherBlasTraits::NeedToConjugate) && NumTraits<Scalar>::IsComplex,
- IsRowMajor ? RowMajor : ColMajor, UpLo>
+ IsRowMajor ? RowMajor : ColMajor, MatrixType::InnerStrideAtCompileTime, UpLo>
::run(size, depth,
- &actualOther.coeffRef(0,0), actualOther.outerStride(), &actualOther.coeffRef(0,0), actualOther.outerStride(),
- mat.data(), mat.outerStride(), actualAlpha, blocking);
+ actualOther.data(), actualOther.outerStride(), actualOther.data(), actualOther.outerStride(),
+ mat.data(), mat.innerStride(), mat.outerStride(), actualAlpha, blocking);
}
};
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/products/SelfadjointRank2Update.h b/examples/ThirdPartyLibs/Eigen/src/Core/products/SelfadjointRank2Update.h
index d395888e5..f752a0bf0 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/products/SelfadjointRank2Update.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/products/SelfadjointRank2Update.h
@@ -24,7 +24,8 @@ struct selfadjoint_rank2_update_selector;
template<typename Scalar, typename Index, typename UType, typename VType>
struct selfadjoint_rank2_update_selector<Scalar,Index,UType,VType,Lower>
{
- static void run(Scalar* mat, Index stride, const UType& u, const VType& v, const Scalar& alpha)
+ static EIGEN_DEVICE_FUNC
+ void run(Scalar* mat, Index stride, const UType& u, const VType& v, const Scalar& alpha)
{
const Index size = u.size();
for (Index i=0; i<size; ++i)
@@ -79,8 +80,8 @@ EIGEN_DEVICE_FUNC SelfAdjointView<MatrixType,UpLo>& SelfAdjointView<MatrixType,U
if (IsRowMajor)
actualAlpha = numext::conj(actualAlpha);
- typedef typename internal::remove_all<typename internal::conj_expr_if<IsRowMajor ^ UBlasTraits::NeedToConjugate,_ActualUType>::type>::type UType;
- typedef typename internal::remove_all<typename internal::conj_expr_if<IsRowMajor ^ VBlasTraits::NeedToConjugate,_ActualVType>::type>::type VType;
+ typedef typename internal::remove_all<typename internal::conj_expr_if<int(IsRowMajor) ^ int(UBlasTraits::NeedToConjugate), _ActualUType>::type>::type UType;
+ typedef typename internal::remove_all<typename internal::conj_expr_if<int(IsRowMajor) ^ int(VBlasTraits::NeedToConjugate), _ActualVType>::type>::type VType;
internal::selfadjoint_rank2_update_selector<Scalar, Index, UType, VType,
(IsRowMajor ? int(UpLo==Upper ? Lower : Upper) : UpLo)>
::run(_expression().const_cast_derived().data(),_expression().outerStride(),UType(actualU),VType(actualV),actualAlpha);
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularMatrixMatrix.h b/examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularMatrixMatrix.h
index 539b6c0c6..f0c60507a 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularMatrixMatrix.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularMatrixMatrix.h
@@ -45,22 +45,24 @@ template <typename Scalar, typename Index,
int Mode, bool LhsIsTriangular,
int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs,
- int ResStorageOrder, int Version = Specialized>
+ int ResStorageOrder, int ResInnerStride,
+ int Version = Specialized>
struct product_triangular_matrix_matrix;
template <typename Scalar, typename Index,
int Mode, bool LhsIsTriangular,
int LhsStorageOrder, bool ConjugateLhs,
- int RhsStorageOrder, bool ConjugateRhs, int Version>
+ int RhsStorageOrder, bool ConjugateRhs,
+ int ResInnerStride, int Version>
struct product_triangular_matrix_matrix<Scalar,Index,Mode,LhsIsTriangular,
LhsStorageOrder,ConjugateLhs,
- RhsStorageOrder,ConjugateRhs,RowMajor,Version>
+ RhsStorageOrder,ConjugateRhs,RowMajor,ResInnerStride,Version>
{
static EIGEN_STRONG_INLINE void run(
Index rows, Index cols, Index depth,
const Scalar* lhs, Index lhsStride,
const Scalar* rhs, Index rhsStride,
- Scalar* res, Index resStride,
+ Scalar* res, Index resIncr, Index resStride,
const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
{
product_triangular_matrix_matrix<Scalar, Index,
@@ -70,18 +72,19 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,LhsIsTriangular,
ConjugateRhs,
LhsStorageOrder==RowMajor ? ColMajor : RowMajor,
ConjugateLhs,
- ColMajor>
- ::run(cols, rows, depth, rhs, rhsStride, lhs, lhsStride, res, resStride, alpha, blocking);
+ ColMajor, ResInnerStride>
+ ::run(cols, rows, depth, rhs, rhsStride, lhs, lhsStride, res, resIncr, resStride, alpha, blocking);
}
};
// implements col-major += alpha * op(triangular) * op(general)
template <typename Scalar, typename Index, int Mode,
int LhsStorageOrder, bool ConjugateLhs,
- int RhsStorageOrder, bool ConjugateRhs, int Version>
+ int RhsStorageOrder, bool ConjugateRhs,
+ int ResInnerStride, int Version>
struct product_triangular_matrix_matrix<Scalar,Index,Mode,true,
LhsStorageOrder,ConjugateLhs,
- RhsStorageOrder,ConjugateRhs,ColMajor,Version>
+ RhsStorageOrder,ConjugateRhs,ColMajor,ResInnerStride,Version>
{
typedef gebp_traits<Scalar,Scalar> Traits;
@@ -95,20 +98,21 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,true,
Index _rows, Index _cols, Index _depth,
const Scalar* _lhs, Index lhsStride,
const Scalar* _rhs, Index rhsStride,
- Scalar* res, Index resStride,
+ Scalar* res, Index resIncr, Index resStride,
const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking);
};
template <typename Scalar, typename Index, int Mode,
int LhsStorageOrder, bool ConjugateLhs,
- int RhsStorageOrder, bool ConjugateRhs, int Version>
+ int RhsStorageOrder, bool ConjugateRhs,
+ int ResInnerStride, int Version>
EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,true,
LhsStorageOrder,ConjugateLhs,
- RhsStorageOrder,ConjugateRhs,ColMajor,Version>::run(
+ RhsStorageOrder,ConjugateRhs,ColMajor,ResInnerStride,Version>::run(
Index _rows, Index _cols, Index _depth,
const Scalar* _lhs, Index lhsStride,
const Scalar* _rhs, Index rhsStride,
- Scalar* _res, Index resStride,
+ Scalar* _res, Index resIncr, Index resStride,
const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
{
// strip zeros
@@ -119,10 +123,10 @@ EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,true,
typedef const_blas_data_mapper<Scalar, Index, LhsStorageOrder> LhsMapper;
typedef const_blas_data_mapper<Scalar, Index, RhsStorageOrder> RhsMapper;
- typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper;
+ typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor, Unaligned, ResInnerStride> ResMapper;
LhsMapper lhs(_lhs,lhsStride);
RhsMapper rhs(_rhs,rhsStride);
- ResMapper res(_res, resStride);
+ ResMapper res(_res, resStride, resIncr);
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
@@ -151,7 +155,7 @@ EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,true,
triangularBuffer.diagonal().setOnes();
gebp_kernel<Scalar, Scalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
- gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
+ gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr,RhsStorageOrder> pack_rhs;
for(Index k2=IsLower ? depth : 0;
@@ -222,7 +226,7 @@ EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,true,
for(Index i2=start; i2<end; i2+=mc)
{
const Index actual_mc = (std::min)(i2+mc,end)-i2;
- gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr,Traits::LhsProgress, LhsStorageOrder,false>()
+ gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr,Traits::LhsProgress, typename Traits::LhsPacket4Packing, LhsStorageOrder,false>()
(blockA, lhs.getSubMapper(i2, actual_k2), actual_kc, actual_mc);
gebp_kernel(res.getSubMapper(i2, 0), blockA, blockB, actual_mc,
@@ -235,10 +239,11 @@ EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,true,
// implements col-major += alpha * op(general) * op(triangular)
template <typename Scalar, typename Index, int Mode,
int LhsStorageOrder, bool ConjugateLhs,
- int RhsStorageOrder, bool ConjugateRhs, int Version>
+ int RhsStorageOrder, bool ConjugateRhs,
+ int ResInnerStride, int Version>
struct product_triangular_matrix_matrix<Scalar,Index,Mode,false,
LhsStorageOrder,ConjugateLhs,
- RhsStorageOrder,ConjugateRhs,ColMajor,Version>
+ RhsStorageOrder,ConjugateRhs,ColMajor,ResInnerStride,Version>
{
typedef gebp_traits<Scalar,Scalar> Traits;
enum {
@@ -251,20 +256,21 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,false,
Index _rows, Index _cols, Index _depth,
const Scalar* _lhs, Index lhsStride,
const Scalar* _rhs, Index rhsStride,
- Scalar* res, Index resStride,
+ Scalar* res, Index resIncr, Index resStride,
const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking);
};
template <typename Scalar, typename Index, int Mode,
int LhsStorageOrder, bool ConjugateLhs,
- int RhsStorageOrder, bool ConjugateRhs, int Version>
+ int RhsStorageOrder, bool ConjugateRhs,
+ int ResInnerStride, int Version>
EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,false,
LhsStorageOrder,ConjugateLhs,
- RhsStorageOrder,ConjugateRhs,ColMajor,Version>::run(
+ RhsStorageOrder,ConjugateRhs,ColMajor,ResInnerStride,Version>::run(
Index _rows, Index _cols, Index _depth,
const Scalar* _lhs, Index lhsStride,
const Scalar* _rhs, Index rhsStride,
- Scalar* _res, Index resStride,
+ Scalar* _res, Index resIncr, Index resStride,
const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
{
const Index PacketBytes = packet_traits<Scalar>::size*sizeof(Scalar);
@@ -276,10 +282,10 @@ EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,false,
typedef const_blas_data_mapper<Scalar, Index, LhsStorageOrder> LhsMapper;
typedef const_blas_data_mapper<Scalar, Index, RhsStorageOrder> RhsMapper;
- typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper;
+ typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor, Unaligned, ResInnerStride> ResMapper;
LhsMapper lhs(_lhs,lhsStride);
RhsMapper rhs(_rhs,rhsStride);
- ResMapper res(_res, resStride);
+ ResMapper res(_res, resStride, resIncr);
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
@@ -299,7 +305,7 @@ EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,false,
triangularBuffer.diagonal().setOnes();
gebp_kernel<Scalar, Scalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
- gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
+ gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr,RhsStorageOrder> pack_rhs;
gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr,RhsStorageOrder,false,true> pack_rhs_panel;
@@ -400,7 +406,9 @@ struct triangular_product_impl<Mode,LhsIsTriangular,Lhs,false,Rhs,false>
{
template<typename Dest> static void run(Dest& dst, const Lhs &a_lhs, const Rhs &a_rhs, const typename Dest::Scalar& alpha)
{
- typedef typename Dest::Scalar Scalar;
+ typedef typename Lhs::Scalar LhsScalar;
+ typedef typename Rhs::Scalar RhsScalar;
+ typedef typename Dest::Scalar Scalar;
typedef internal::blas_traits<Lhs> LhsBlasTraits;
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
@@ -412,8 +420,9 @@ struct triangular_product_impl<Mode,LhsIsTriangular,Lhs,false,Rhs,false>
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs);
- Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs)
- * RhsBlasTraits::extractScalarFactor(a_rhs);
+ LhsScalar lhs_alpha = LhsBlasTraits::extractScalarFactor(a_lhs);
+ RhsScalar rhs_alpha = RhsBlasTraits::extractScalarFactor(a_rhs);
+ Scalar actualAlpha = alpha * lhs_alpha * rhs_alpha;
typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,Scalar,Scalar,
Lhs::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime, Lhs::MaxColsAtCompileTime,4> BlockingType;
@@ -430,14 +439,29 @@ struct triangular_product_impl<Mode,LhsIsTriangular,Lhs,false,Rhs,false>
Mode, LhsIsTriangular,
(internal::traits<ActualLhsTypeCleaned>::Flags&RowMajorBit) ? RowMajor : ColMajor, LhsBlasTraits::NeedToConjugate,
(internal::traits<ActualRhsTypeCleaned>::Flags&RowMajorBit) ? RowMajor : ColMajor, RhsBlasTraits::NeedToConjugate,
- (internal::traits<Dest >::Flags&RowMajorBit) ? RowMajor : ColMajor>
+ (internal::traits<Dest >::Flags&RowMajorBit) ? RowMajor : ColMajor, Dest::InnerStrideAtCompileTime>
::run(
stripedRows, stripedCols, stripedDepth, // sizes
&lhs.coeffRef(0,0), lhs.outerStride(), // lhs info
&rhs.coeffRef(0,0), rhs.outerStride(), // rhs info
- &dst.coeffRef(0,0), dst.outerStride(), // result info
+ &dst.coeffRef(0,0), dst.innerStride(), dst.outerStride(), // result info
actualAlpha, blocking
);
+
+ // Apply correction if the diagonal is unit and a scalar factor was nested:
+ if ((Mode&UnitDiag)==UnitDiag)
+ {
+ if (LhsIsTriangular && lhs_alpha!=LhsScalar(1))
+ {
+ Index diagSize = (std::min)(lhs.rows(),lhs.cols());
+ dst.topRows(diagSize) -= ((lhs_alpha-LhsScalar(1))*a_rhs).topRows(diagSize);
+ }
+ else if ((!LhsIsTriangular) && rhs_alpha!=RhsScalar(1))
+ {
+ Index diagSize = (std::min)(rhs.rows(),rhs.cols());
+ dst.leftCols(diagSize) -= (rhs_alpha-RhsScalar(1))*a_lhs.leftCols(diagSize);
+ }
+ }
}
};
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h b/examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h
index a25197ab0..a98d12e4a 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h
@@ -46,7 +46,7 @@ template <typename Scalar, typename Index,
struct product_triangular_matrix_matrix_trmm :
product_triangular_matrix_matrix<Scalar,Index,Mode,
LhsIsTriangular,LhsStorageOrder,ConjugateLhs,
- RhsStorageOrder, ConjugateRhs, ResStorageOrder, BuiltIn> {};
+ RhsStorageOrder, ConjugateRhs, ResStorageOrder, 1, BuiltIn> {};
// try to go to BLAS specialization
@@ -55,13 +55,15 @@ template <typename Index, int Mode, \
int LhsStorageOrder, bool ConjugateLhs, \
int RhsStorageOrder, bool ConjugateRhs> \
struct product_triangular_matrix_matrix<Scalar,Index, Mode, LhsIsTriangular, \
- LhsStorageOrder,ConjugateLhs, RhsStorageOrder,ConjugateRhs,ColMajor,Specialized> { \
+ LhsStorageOrder,ConjugateLhs, RhsStorageOrder,ConjugateRhs,ColMajor,1,Specialized> { \
static inline void run(Index _rows, Index _cols, Index _depth, const Scalar* _lhs, Index lhsStride,\
- const Scalar* _rhs, Index rhsStride, Scalar* res, Index resStride, Scalar alpha, level3_blocking<Scalar,Scalar>& blocking) { \
+ const Scalar* _rhs, Index rhsStride, Scalar* res, Index resIncr, Index resStride, Scalar alpha, level3_blocking<Scalar,Scalar>& blocking) { \
+ EIGEN_ONLY_USED_FOR_DEBUG(resIncr); \
+ eigen_assert(resIncr == 1); \
product_triangular_matrix_matrix_trmm<Scalar,Index,Mode, \
LhsIsTriangular,LhsStorageOrder,ConjugateLhs, \
RhsStorageOrder, ConjugateRhs, ColMajor>::run( \
- _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha, blocking); \
+ _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha, blocking); \
} \
};
@@ -115,8 +117,8 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,true, \
if (((nthr==1) && (((std::max)(rows,depth)-diagSize)/(double)diagSize < 0.5))) { \
/* Most likely no benefit to call TRMM or GEMM from BLAS */ \
product_triangular_matrix_matrix<EIGTYPE,Index,Mode,true, \
- LhsStorageOrder,ConjugateLhs, RhsStorageOrder, ConjugateRhs, ColMajor, BuiltIn>::run( \
- _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha, blocking); \
+ LhsStorageOrder,ConjugateLhs, RhsStorageOrder, ConjugateRhs, ColMajor, 1, BuiltIn>::run( \
+ _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, 1, resStride, alpha, blocking); \
/*std::cout << "TRMM_L: A is not square! Go to Eigen TRMM implementation!\n";*/ \
} else { \
/* Make sense to call GEMM */ \
@@ -124,8 +126,8 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,true, \
MatrixLhs aa_tmp=lhsMap.template triangularView<Mode>(); \
BlasIndex aStride = convert_index<BlasIndex>(aa_tmp.outerStride()); \
gemm_blocking_space<ColMajor,EIGTYPE,EIGTYPE,Dynamic,Dynamic,Dynamic> gemm_blocking(_rows,_cols,_depth, 1, true); \
- general_matrix_matrix_product<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,RhsStorageOrder,ConjugateRhs,ColMajor>::run( \
- rows, cols, depth, aa_tmp.data(), aStride, _rhs, rhsStride, res, resStride, alpha, gemm_blocking, 0); \
+ general_matrix_matrix_product<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,RhsStorageOrder,ConjugateRhs,ColMajor,1>::run( \
+ rows, cols, depth, aa_tmp.data(), aStride, _rhs, rhsStride, res, 1, resStride, alpha, gemm_blocking, 0); \
\
/*std::cout << "TRMM_L: A is not square! Go to BLAS GEMM implementation! " << nthr<<" \n";*/ \
} \
@@ -232,8 +234,8 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,false, \
if ((nthr==1) && (((std::max)(cols,depth)-diagSize)/(double)diagSize < 0.5)) { \
/* Most likely no benefit to call TRMM or GEMM from BLAS*/ \
product_triangular_matrix_matrix<EIGTYPE,Index,Mode,false, \
- LhsStorageOrder,ConjugateLhs, RhsStorageOrder, ConjugateRhs, ColMajor, BuiltIn>::run( \
- _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha, blocking); \
+ LhsStorageOrder,ConjugateLhs, RhsStorageOrder, ConjugateRhs, ColMajor, 1, BuiltIn>::run( \
+ _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, 1, resStride, alpha, blocking); \
/*std::cout << "TRMM_R: A is not square! Go to Eigen TRMM implementation!\n";*/ \
} else { \
/* Make sense to call GEMM */ \
@@ -241,8 +243,8 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,false, \
MatrixRhs aa_tmp=rhsMap.template triangularView<Mode>(); \
BlasIndex aStride = convert_index<BlasIndex>(aa_tmp.outerStride()); \
gemm_blocking_space<ColMajor,EIGTYPE,EIGTYPE,Dynamic,Dynamic,Dynamic> gemm_blocking(_rows,_cols,_depth, 1, true); \
- general_matrix_matrix_product<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,RhsStorageOrder,ConjugateRhs,ColMajor>::run( \
- rows, cols, depth, _lhs, lhsStride, aa_tmp.data(), aStride, res, resStride, alpha, gemm_blocking, 0); \
+ general_matrix_matrix_product<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,RhsStorageOrder,ConjugateRhs,ColMajor,1>::run( \
+ rows, cols, depth, _lhs, lhsStride, aa_tmp.data(), aStride, res, 1, resStride, alpha, gemm_blocking, 0); \
\
/*std::cout << "TRMM_R: A is not square! Go to BLAS GEMM implementation! " << nthr<<" \n";*/ \
} \
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularMatrixVector.h b/examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularMatrixVector.h
index 4b292e74d..76bfa159c 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularMatrixVector.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularMatrixVector.h
@@ -221,8 +221,9 @@ template<int Mode> struct trmv_selector<Mode,ColMajor>
typename internal::add_const_on_value_type<ActualLhsType>::type actualLhs = LhsBlasTraits::extract(lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type actualRhs = RhsBlasTraits::extract(rhs);
- ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(lhs)
- * RhsBlasTraits::extractScalarFactor(rhs);
+ LhsScalar lhs_alpha = LhsBlasTraits::extractScalarFactor(lhs);
+ RhsScalar rhs_alpha = RhsBlasTraits::extractScalarFactor(rhs);
+ ResScalar actualAlpha = alpha * lhs_alpha * rhs_alpha;
enum {
// FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1
@@ -274,6 +275,12 @@ template<int Mode> struct trmv_selector<Mode,ColMajor>
else
dest = MappedDest(actualDestPtr, dest.size());
}
+
+ if ( ((Mode&UnitDiag)==UnitDiag) && (lhs_alpha!=LhsScalar(1)) )
+ {
+ Index diagSize = (std::min)(lhs.rows(),lhs.cols());
+ dest.head(diagSize) -= (lhs_alpha-LhsScalar(1))*rhs.head(diagSize);
+ }
}
};
@@ -295,8 +302,9 @@ template<int Mode> struct trmv_selector<Mode,RowMajor>
typename add_const<ActualLhsType>::type actualLhs = LhsBlasTraits::extract(lhs);
typename add_const<ActualRhsType>::type actualRhs = RhsBlasTraits::extract(rhs);
- ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(lhs)
- * RhsBlasTraits::extractScalarFactor(rhs);
+ LhsScalar lhs_alpha = LhsBlasTraits::extractScalarFactor(lhs);
+ RhsScalar rhs_alpha = RhsBlasTraits::extractScalarFactor(rhs);
+ ResScalar actualAlpha = alpha * lhs_alpha * rhs_alpha;
enum {
DirectlyUseRhs = ActualRhsTypeCleaned::InnerStrideAtCompileTime==1
@@ -326,6 +334,12 @@ template<int Mode> struct trmv_selector<Mode,RowMajor>
actualRhsPtr,1,
dest.data(),dest.innerStride(),
actualAlpha);
+
+ if ( ((Mode&UnitDiag)==UnitDiag) && (lhs_alpha!=LhsScalar(1)) )
+ {
+ Index diagSize = (std::min)(lhs.rows(),lhs.cols());
+ dest.head(diagSize) -= (lhs_alpha-LhsScalar(1))*rhs.head(diagSize);
+ }
}
};
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularSolverMatrix.h b/examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularSolverMatrix.h
index 223c38b86..6d879ba00 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularSolverMatrix.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularSolverMatrix.h
@@ -15,48 +15,48 @@ namespace Eigen {
namespace internal {
// if the rhs is row major, let's transpose the product
-template <typename Scalar, typename Index, int Side, int Mode, bool Conjugate, int TriStorageOrder>
-struct triangular_solve_matrix<Scalar,Index,Side,Mode,Conjugate,TriStorageOrder,RowMajor>
+template <typename Scalar, typename Index, int Side, int Mode, bool Conjugate, int TriStorageOrder, int OtherInnerStride>
+struct triangular_solve_matrix<Scalar,Index,Side,Mode,Conjugate,TriStorageOrder,RowMajor,OtherInnerStride>
{
static void run(
Index size, Index cols,
const Scalar* tri, Index triStride,
- Scalar* _other, Index otherStride,
+ Scalar* _other, Index otherIncr, Index otherStride,
level3_blocking<Scalar,Scalar>& blocking)
{
triangular_solve_matrix<
Scalar, Index, Side==OnTheLeft?OnTheRight:OnTheLeft,
(Mode&UnitDiag) | ((Mode&Upper) ? Lower : Upper),
NumTraits<Scalar>::IsComplex && Conjugate,
- TriStorageOrder==RowMajor ? ColMajor : RowMajor, ColMajor>
- ::run(size, cols, tri, triStride, _other, otherStride, blocking);
+ TriStorageOrder==RowMajor ? ColMajor : RowMajor, ColMajor, OtherInnerStride>
+ ::run(size, cols, tri, triStride, _other, otherIncr, otherStride, blocking);
}
};
/* Optimized triangular solver with multiple right hand side and the triangular matrix on the left
*/
-template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder>
-struct triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conjugate,TriStorageOrder,ColMajor>
+template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder,int OtherInnerStride>
+struct triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conjugate,TriStorageOrder,ColMajor,OtherInnerStride>
{
static EIGEN_DONT_INLINE void run(
Index size, Index otherSize,
const Scalar* _tri, Index triStride,
- Scalar* _other, Index otherStride,
+ Scalar* _other, Index otherIncr, Index otherStride,
level3_blocking<Scalar,Scalar>& blocking);
};
-template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder>
-EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conjugate,TriStorageOrder,ColMajor>::run(
+template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder, int OtherInnerStride>
+EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conjugate,TriStorageOrder,ColMajor,OtherInnerStride>::run(
Index size, Index otherSize,
const Scalar* _tri, Index triStride,
- Scalar* _other, Index otherStride,
+ Scalar* _other, Index otherIncr, Index otherStride,
level3_blocking<Scalar,Scalar>& blocking)
{
Index cols = otherSize;
typedef const_blas_data_mapper<Scalar, Index, TriStorageOrder> TriMapper;
- typedef blas_data_mapper<Scalar, Index, ColMajor> OtherMapper;
+ typedef blas_data_mapper<Scalar, Index, ColMajor, Unaligned, OtherInnerStride> OtherMapper;
TriMapper tri(_tri, triStride);
- OtherMapper other(_other, otherStride);
+ OtherMapper other(_other, otherStride, otherIncr);
typedef gebp_traits<Scalar,Scalar> Traits;
@@ -76,7 +76,7 @@ EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conju
conj_if<Conjugate> conj;
gebp_kernel<Scalar, Scalar, Index, OtherMapper, Traits::mr, Traits::nr, Conjugate, false> gebp_kernel;
- gemm_pack_lhs<Scalar, Index, TriMapper, Traits::mr, Traits::LhsProgress, TriStorageOrder> pack_lhs;
+ gemm_pack_lhs<Scalar, Index, TriMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, TriStorageOrder> pack_lhs;
gemm_pack_rhs<Scalar, Index, OtherMapper, Traits::nr, ColMajor, false, true> pack_rhs;
// the goal here is to subdivise the Rhs panels such that we keep some cache
@@ -128,19 +128,21 @@ EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conju
{
Scalar b(0);
const Scalar* l = &tri(i,s);
- Scalar* r = &other(s,j);
+ typename OtherMapper::LinearMapper r = other.getLinearMapper(s,j);
for (Index i3=0; i3<k; ++i3)
- b += conj(l[i3]) * r[i3];
+ b += conj(l[i3]) * r(i3);
other(i,j) = (other(i,j) - b)*a;
}
else
{
- Scalar b = (other(i,j) *= a);
- Scalar* r = &other(s,j);
- const Scalar* l = &tri(s,i);
+ Scalar& otherij = other(i,j);
+ otherij *= a;
+ Scalar b = otherij;
+ typename OtherMapper::LinearMapper r = other.getLinearMapper(s,j);
+ typename TriMapper::LinearMapper l = tri.getLinearMapper(s,i);
for (Index i3=0;i3<rs;++i3)
- r[i3] -= b * conj(l[i3]);
+ r(i3) -= b * conj(l(i3));
}
}
}
@@ -185,28 +187,28 @@ EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conju
/* Optimized triangular solver with multiple left hand sides and the triangular matrix on the right
*/
-template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder>
-struct triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conjugate,TriStorageOrder,ColMajor>
+template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder, int OtherInnerStride>
+struct triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conjugate,TriStorageOrder,ColMajor,OtherInnerStride>
{
static EIGEN_DONT_INLINE void run(
Index size, Index otherSize,
const Scalar* _tri, Index triStride,
- Scalar* _other, Index otherStride,
+ Scalar* _other, Index otherIncr, Index otherStride,
level3_blocking<Scalar,Scalar>& blocking);
};
-template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder>
-EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conjugate,TriStorageOrder,ColMajor>::run(
+template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder, int OtherInnerStride>
+EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conjugate,TriStorageOrder,ColMajor,OtherInnerStride>::run(
Index size, Index otherSize,
const Scalar* _tri, Index triStride,
- Scalar* _other, Index otherStride,
+ Scalar* _other, Index otherIncr, Index otherStride,
level3_blocking<Scalar,Scalar>& blocking)
{
Index rows = otherSize;
typedef typename NumTraits<Scalar>::Real RealScalar;
- typedef blas_data_mapper<Scalar, Index, ColMajor> LhsMapper;
+ typedef blas_data_mapper<Scalar, Index, ColMajor, Unaligned, OtherInnerStride> LhsMapper;
typedef const_blas_data_mapper<Scalar, Index, TriStorageOrder> RhsMapper;
- LhsMapper lhs(_other, otherStride);
+ LhsMapper lhs(_other, otherStride, otherIncr);
RhsMapper rhs(_tri, triStride);
typedef gebp_traits<Scalar,Scalar> Traits;
@@ -229,7 +231,7 @@ EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conj
gebp_kernel<Scalar, Scalar, Index, LhsMapper, Traits::mr, Traits::nr, false, Conjugate> gebp_kernel;
gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs;
gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr, RhsStorageOrder,false,true> pack_rhs_panel;
- gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, ColMajor, false, true> pack_lhs_panel;
+ gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, ColMajor, false, true> pack_lhs_panel;
for(Index k2=IsLower ? size : 0;
IsLower ? k2>0 : k2<size;
@@ -297,24 +299,24 @@ EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conj
{
Index j = IsLower ? absolute_j2+actualPanelWidth-k-1 : absolute_j2+k;
- Scalar* r = &lhs(i2,j);
+ typename LhsMapper::LinearMapper r = lhs.getLinearMapper(i2,j);
for (Index k3=0; k3<k; ++k3)
{
Scalar b = conj(rhs(IsLower ? j+1+k3 : absolute_j2+k3,j));
- Scalar* a = &lhs(i2,IsLower ? j+1+k3 : absolute_j2+k3);
+ typename LhsMapper::LinearMapper a = lhs.getLinearMapper(i2,IsLower ? j+1+k3 : absolute_j2+k3);
for (Index i=0; i<actual_mc; ++i)
- r[i] -= a[i] * b;
+ r(i) -= a(i) * b;
}
if((Mode & UnitDiag)==0)
{
Scalar inv_rjj = RealScalar(1)/conj(rhs(j,j));
for (Index i=0; i<actual_mc; ++i)
- r[i] *= inv_rjj;
+ r(i) *= inv_rjj;
}
}
// pack the just computed part of lhs to A
- pack_lhs_panel(blockA, LhsMapper(_other+absolute_j2*otherStride+i2, otherStride),
+ pack_lhs_panel(blockA, lhs.getSubMapper(i2,absolute_j2),
actualPanelWidth, actual_mc,
actual_kc, j2);
}
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h b/examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h
index f0775116a..621194ce6 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h
@@ -40,7 +40,7 @@ namespace internal {
// implements LeftSide op(triangular)^-1 * general
#define EIGEN_BLAS_TRSM_L(EIGTYPE, BLASTYPE, BLASFUNC) \
template <typename Index, int Mode, bool Conjugate, int TriStorageOrder> \
-struct triangular_solve_matrix<EIGTYPE,Index,OnTheLeft,Mode,Conjugate,TriStorageOrder,ColMajor> \
+struct triangular_solve_matrix<EIGTYPE,Index,OnTheLeft,Mode,Conjugate,TriStorageOrder,ColMajor,1> \
{ \
enum { \
IsLower = (Mode&Lower) == Lower, \
@@ -51,8 +51,10 @@ struct triangular_solve_matrix<EIGTYPE,Index,OnTheLeft,Mode,Conjugate,TriStorage
static void run( \
Index size, Index otherSize, \
const EIGTYPE* _tri, Index triStride, \
- EIGTYPE* _other, Index otherStride, level3_blocking<EIGTYPE,EIGTYPE>& /*blocking*/) \
+ EIGTYPE* _other, Index otherIncr, Index otherStride, level3_blocking<EIGTYPE,EIGTYPE>& /*blocking*/) \
{ \
+ EIGEN_ONLY_USED_FOR_DEBUG(otherIncr); \
+ eigen_assert(otherIncr == 1); \
BlasIndex m = convert_index<BlasIndex>(size), n = convert_index<BlasIndex>(otherSize), lda, ldb; \
char side = 'L', uplo, diag='N', transa; \
/* Set alpha_ */ \
@@ -99,7 +101,7 @@ EIGEN_BLAS_TRSM_L(scomplex, float, ctrsm_)
// implements RightSide general * op(triangular)^-1
#define EIGEN_BLAS_TRSM_R(EIGTYPE, BLASTYPE, BLASFUNC) \
template <typename Index, int Mode, bool Conjugate, int TriStorageOrder> \
-struct triangular_solve_matrix<EIGTYPE,Index,OnTheRight,Mode,Conjugate,TriStorageOrder,ColMajor> \
+struct triangular_solve_matrix<EIGTYPE,Index,OnTheRight,Mode,Conjugate,TriStorageOrder,ColMajor,1> \
{ \
enum { \
IsLower = (Mode&Lower) == Lower, \
@@ -110,8 +112,10 @@ struct triangular_solve_matrix<EIGTYPE,Index,OnTheRight,Mode,Conjugate,TriStorag
static void run( \
Index size, Index otherSize, \
const EIGTYPE* _tri, Index triStride, \
- EIGTYPE* _other, Index otherStride, level3_blocking<EIGTYPE,EIGTYPE>& /*blocking*/) \
+ EIGTYPE* _other, Index otherIncr, Index otherStride, level3_blocking<EIGTYPE,EIGTYPE>& /*blocking*/) \
{ \
+ EIGEN_ONLY_USED_FOR_DEBUG(otherIncr); \
+ eigen_assert(otherIncr == 1); \
BlasIndex m = convert_index<BlasIndex>(otherSize), n = convert_index<BlasIndex>(size), lda, ldb; \
char side = 'R', uplo, diag='N', transa; \
/* Set alpha_ */ \
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularSolverVector.h b/examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularSolverVector.h
index b994759b2..647317016 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularSolverVector.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/products/TriangularSolverVector.h
@@ -58,7 +58,7 @@ struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Con
{
// let's directly call the low level product function because:
// 1 - it is faster to compile
- // 2 - it is slighlty faster at runtime
+ // 2 - it is slightly faster at runtime
Index startRow = IsLower ? pi : pi-actualPanelWidth;
Index startCol = IsLower ? 0 : pi;
@@ -77,7 +77,7 @@ struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Con
if (k>0)
rhs[i] -= (cjLhs.row(i).segment(s,k).transpose().cwiseProduct(Map<const Matrix<RhsScalar,Dynamic,1> >(rhs+s,k))).sum();
- if(!(Mode & UnitDiag))
+ if((!(Mode & UnitDiag)) && numext::not_equal_strict(rhs[i],RhsScalar(0)))
rhs[i] /= cjLhs(i,i);
}
}
@@ -114,20 +114,23 @@ struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Con
for(Index k=0; k<actualPanelWidth; ++k)
{
Index i = IsLower ? pi+k : pi-k-1;
- if(!(Mode & UnitDiag))
- rhs[i] /= cjLhs.coeff(i,i);
-
- Index r = actualPanelWidth - k - 1; // remaining size
- Index s = IsLower ? i+1 : i-r;
- if (r>0)
- Map<Matrix<RhsScalar,Dynamic,1> >(rhs+s,r) -= rhs[i] * cjLhs.col(i).segment(s,r);
+ if(numext::not_equal_strict(rhs[i],RhsScalar(0)))
+ {
+ if(!(Mode & UnitDiag))
+ rhs[i] /= cjLhs.coeff(i,i);
+
+ Index r = actualPanelWidth - k - 1; // remaining size
+ Index s = IsLower ? i+1 : i-r;
+ if (r>0)
+ Map<Matrix<RhsScalar,Dynamic,1> >(rhs+s,r) -= rhs[i] * cjLhs.col(i).segment(s,r);
+ }
}
Index r = IsLower ? size - endBlock : startBlock; // remaining size
if (r > 0)
{
// let's directly call the low level product function because:
// 1 - it is faster to compile
- // 2 - it is slighlty faster at runtime
+ // 2 - it is slightly faster at runtime
general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,Conjugate,RhsScalar,RhsMapper,false>::run(
r, actualPanelWidth,
LhsMapper(&lhs.coeffRef(endBlock,startBlock), lhsStride),
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/util/BlasUtil.h b/examples/ThirdPartyLibs/Eigen/src/Core/util/BlasUtil.h
index b1791fb3a..e16a56498 100644..100755
--- a/examples/ThirdPartyLibs/Eigen/src/Core/util/BlasUtil.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/util/BlasUtil.h
@@ -24,14 +24,14 @@ struct gebp_kernel;
template<typename Scalar, typename Index, typename DataMapper, int nr, int StorageOrder, bool Conjugate = false, bool PanelMode=false>
struct gemm_pack_rhs;
-template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, int StorageOrder, bool Conjugate = false, bool PanelMode = false>
+template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, typename Packet, int StorageOrder, bool Conjugate = false, bool PanelMode = false>
struct gemm_pack_lhs;
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs,
- int ResStorageOrder>
+ int ResStorageOrder, int ResInnerStride>
struct general_matrix_matrix_product;
template<typename Index,
@@ -39,90 +39,6 @@ template<typename Index,
typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version=Specialized>
struct general_matrix_vector_product;
-
-template<bool Conjugate> struct conj_if;
-
-template<> struct conj_if<true> {
- template<typename T>
- inline T operator()(const T& x) const { return numext::conj(x); }
- template<typename T>
- inline T pconj(const T& x) const { return internal::pconj(x); }
-};
-
-template<> struct conj_if<false> {
- template<typename T>
- inline const T& operator()(const T& x) const { return x; }
- template<typename T>
- inline const T& pconj(const T& x) const { return x; }
-};
-
-// Generic implementation for custom complex types.
-template<typename LhsScalar, typename RhsScalar, bool ConjLhs, bool ConjRhs>
-struct conj_helper
-{
- typedef typename ScalarBinaryOpTraits<LhsScalar,RhsScalar>::ReturnType Scalar;
-
- EIGEN_STRONG_INLINE Scalar pmadd(const LhsScalar& x, const RhsScalar& y, const Scalar& c) const
- { return padd(c, pmul(x,y)); }
-
- EIGEN_STRONG_INLINE Scalar pmul(const LhsScalar& x, const RhsScalar& y) const
- { return conj_if<ConjLhs>()(x) * conj_if<ConjRhs>()(y); }
-};
-
-template<typename Scalar> struct conj_helper<Scalar,Scalar,false,false>
-{
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const { return internal::pmadd(x,y,c); }
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const { return internal::pmul(x,y); }
-};
-
-template<typename RealScalar> struct conj_helper<std::complex<RealScalar>, std::complex<RealScalar>, false,true>
-{
- typedef std::complex<RealScalar> Scalar;
- EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const
- { return c + pmul(x,y); }
-
- EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const
- { return Scalar(numext::real(x)*numext::real(y) + numext::imag(x)*numext::imag(y), numext::imag(x)*numext::real(y) - numext::real(x)*numext::imag(y)); }
-};
-
-template<typename RealScalar> struct conj_helper<std::complex<RealScalar>, std::complex<RealScalar>, true,false>
-{
- typedef std::complex<RealScalar> Scalar;
- EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const
- { return c + pmul(x,y); }
-
- EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const
- { return Scalar(numext::real(x)*numext::real(y) + numext::imag(x)*numext::imag(y), numext::real(x)*numext::imag(y) - numext::imag(x)*numext::real(y)); }
-};
-
-template<typename RealScalar> struct conj_helper<std::complex<RealScalar>, std::complex<RealScalar>, true,true>
-{
- typedef std::complex<RealScalar> Scalar;
- EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const
- { return c + pmul(x,y); }
-
- EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const
- { return Scalar(numext::real(x)*numext::real(y) - numext::imag(x)*numext::imag(y), - numext::real(x)*numext::imag(y) - numext::imag(x)*numext::real(y)); }
-};
-
-template<typename RealScalar,bool Conj> struct conj_helper<std::complex<RealScalar>, RealScalar, Conj,false>
-{
- typedef std::complex<RealScalar> Scalar;
- EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const RealScalar& y, const Scalar& c) const
- { return padd(c, pmul(x,y)); }
- EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const RealScalar& y) const
- { return conj_if<Conj>()(x)*y; }
-};
-
-template<typename RealScalar,bool Conj> struct conj_helper<RealScalar, std::complex<RealScalar>, false,Conj>
-{
- typedef std::complex<RealScalar> Scalar;
- EIGEN_STRONG_INLINE Scalar pmadd(const RealScalar& x, const Scalar& y, const Scalar& c) const
- { return padd(c, pmul(x,y)); }
- EIGEN_STRONG_INLINE Scalar pmul(const RealScalar& x, const Scalar& y) const
- { return x*conj_if<Conj>()(y); }
-};
-
template<typename From,typename To> struct get_factor {
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE To run(const From& x) { return To(x); }
};
@@ -155,13 +71,19 @@ class BlasVectorMapper {
Scalar* m_data;
};
-template<typename Scalar, typename Index, int AlignmentType>
-class BlasLinearMapper {
- public:
- typedef typename packet_traits<Scalar>::type Packet;
- typedef typename packet_traits<Scalar>::half HalfPacket;
+template<typename Scalar, typename Index, int AlignmentType, int Incr=1>
+class BlasLinearMapper;
- EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE BlasLinearMapper(Scalar *data) : m_data(data) {}
+template<typename Scalar, typename Index, int AlignmentType>
+class BlasLinearMapper<Scalar,Index,AlignmentType>
+{
+public:
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE BlasLinearMapper(Scalar *data, Index incr=1)
+ : m_data(data)
+ {
+ EIGEN_ONLY_USED_FOR_DEBUG(incr);
+ eigen_assert(incr==1);
+ }
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void prefetch(int i) const {
internal::prefetch(&operator()(i));
@@ -171,33 +93,86 @@ class BlasLinearMapper {
return m_data[i];
}
- EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet loadPacket(Index i) const {
- return ploadt<Packet, AlignmentType>(m_data + i);
+ template<typename PacketType>
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketType loadPacket(Index i) const {
+ return ploadt<PacketType, AlignmentType>(m_data + i);
}
- EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE HalfPacket loadHalfPacket(Index i) const {
- return ploadt<HalfPacket, AlignmentType>(m_data + i);
+ template<typename PacketType>
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void storePacket(Index i, const PacketType &p) const {
+ pstoret<Scalar, PacketType, AlignmentType>(m_data + i, p);
}
- EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void storePacket(Index i, const Packet &p) const {
- pstoret<Scalar, Packet, AlignmentType>(m_data + i, p);
- }
-
- protected:
+protected:
Scalar *m_data;
};
// Lightweight helper class to access matrix coefficients.
-template<typename Scalar, typename Index, int StorageOrder, int AlignmentType = Unaligned>
-class blas_data_mapper {
- public:
- typedef typename packet_traits<Scalar>::type Packet;
- typedef typename packet_traits<Scalar>::half HalfPacket;
+template<typename Scalar, typename Index, int StorageOrder, int AlignmentType = Unaligned, int Incr = 1>
+class blas_data_mapper;
+
+// TMP to help PacketBlock store implementation.
+// There's currently no known use case for PacketBlock load.
+// The default implementation assumes ColMajor order.
+// It always store each packet sequentially one `stride` apart.
+template<typename Index, typename Scalar, typename Packet, int n, int idx, int StorageOrder>
+struct PacketBlockManagement
+{
+ PacketBlockManagement<Index, Scalar, Packet, n, idx - 1, StorageOrder> pbm;
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void store(Scalar *to, const Index stride, Index i, Index j, const PacketBlock<Packet, n> &block) const {
+ pbm.store(to, stride, i, j, block);
+ pstoreu<Scalar>(to + i + (j + idx)*stride, block.packet[idx]);
+ }
+};
+// PacketBlockManagement specialization to take care of RowMajor order without ifs.
+template<typename Index, typename Scalar, typename Packet, int n, int idx>
+struct PacketBlockManagement<Index, Scalar, Packet, n, idx, RowMajor>
+{
+ PacketBlockManagement<Index, Scalar, Packet, n, idx - 1, RowMajor> pbm;
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void store(Scalar *to, const Index stride, Index i, Index j, const PacketBlock<Packet, n> &block) const {
+ pbm.store(to, stride, i, j, block);
+ pstoreu<Scalar>(to + j + (i + idx)*stride, block.packet[idx]);
+ }
+};
+
+template<typename Index, typename Scalar, typename Packet, int n, int StorageOrder>
+struct PacketBlockManagement<Index, Scalar, Packet, n, -1, StorageOrder>
+{
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void store(Scalar *to, const Index stride, Index i, Index j, const PacketBlock<Packet, n> &block) const {
+ EIGEN_UNUSED_VARIABLE(to);
+ EIGEN_UNUSED_VARIABLE(stride);
+ EIGEN_UNUSED_VARIABLE(i);
+ EIGEN_UNUSED_VARIABLE(j);
+ EIGEN_UNUSED_VARIABLE(block);
+ }
+};
+
+template<typename Index, typename Scalar, typename Packet, int n>
+struct PacketBlockManagement<Index, Scalar, Packet, n, -1, RowMajor>
+{
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void store(Scalar *to, const Index stride, Index i, Index j, const PacketBlock<Packet, n> &block) const {
+ EIGEN_UNUSED_VARIABLE(to);
+ EIGEN_UNUSED_VARIABLE(stride);
+ EIGEN_UNUSED_VARIABLE(i);
+ EIGEN_UNUSED_VARIABLE(j);
+ EIGEN_UNUSED_VARIABLE(block);
+ }
+};
+
+template<typename Scalar, typename Index, int StorageOrder, int AlignmentType>
+class blas_data_mapper<Scalar,Index,StorageOrder,AlignmentType,1>
+{
+public:
typedef BlasLinearMapper<Scalar, Index, AlignmentType> LinearMapper;
typedef BlasVectorMapper<Scalar, Index> VectorMapper;
- EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE blas_data_mapper(Scalar* data, Index stride) : m_data(data), m_stride(stride) {}
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE blas_data_mapper(Scalar* data, Index stride, Index incr=1)
+ : m_data(data), m_stride(stride)
+ {
+ EIGEN_ONLY_USED_FOR_DEBUG(incr);
+ eigen_assert(incr==1);
+ }
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE blas_data_mapper<Scalar, Index, StorageOrder, AlignmentType>
getSubMapper(Index i, Index j) const {
@@ -218,8 +193,9 @@ class blas_data_mapper {
return m_data[StorageOrder==RowMajor ? j + i*m_stride : i + j*m_stride];
}
- EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet loadPacket(Index i, Index j) const {
- return ploadt<Packet, AlignmentType>(&operator()(i, j));
+ template<typename PacketType>
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketType loadPacket(Index i, Index j) const {
+ return ploadt<PacketType, AlignmentType>(&operator()(i, j));
}
template <typename PacketT, int AlignmentT>
@@ -227,10 +203,6 @@ class blas_data_mapper {
return ploadt<PacketT, AlignmentT>(&operator()(i, j));
}
- EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE HalfPacket loadHalfPacket(Index i, Index j) const {
- return ploadt<HalfPacket, AlignmentType>(&operator()(i, j));
- }
-
template<typename SubPacket>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void scatterPacket(Index i, Index j, const SubPacket &p) const {
pscatter<Scalar, SubPacket>(&operator()(i, j), p, m_stride);
@@ -251,11 +223,167 @@ class blas_data_mapper {
return internal::first_default_aligned(m_data, size);
}
- protected:
+ template<typename SubPacket, int n>
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void storePacketBlock(Index i, Index j, const PacketBlock<SubPacket, n> &block) const {
+ PacketBlockManagement<Index, Scalar, SubPacket, n, n-1, StorageOrder> pbm;
+ pbm.store(m_data, m_stride, i, j, block);
+ }
+protected:
Scalar* EIGEN_RESTRICT m_data;
const Index m_stride;
};
+// Implementation of non-natural increment (i.e. inner-stride != 1)
+// The exposed API is not complete yet compared to the Incr==1 case
+// because some features makes less sense in this case.
+template<typename Scalar, typename Index, int AlignmentType, int Incr>
+class BlasLinearMapper
+{
+public:
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE BlasLinearMapper(Scalar *data,Index incr) : m_data(data), m_incr(incr) {}
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void prefetch(int i) const {
+ internal::prefetch(&operator()(i));
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar& operator()(Index i) const {
+ return m_data[i*m_incr.value()];
+ }
+
+ template<typename PacketType>
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketType loadPacket(Index i) const {
+ return pgather<Scalar,PacketType>(m_data + i*m_incr.value(), m_incr.value());
+ }
+
+ template<typename PacketType>
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void storePacket(Index i, const PacketType &p) const {
+ pscatter<Scalar, PacketType>(m_data + i*m_incr.value(), p, m_incr.value());
+ }
+
+protected:
+ Scalar *m_data;
+ const internal::variable_if_dynamic<Index,Incr> m_incr;
+};
+
+template<typename Scalar, typename Index, int StorageOrder, int AlignmentType,int Incr>
+class blas_data_mapper
+{
+public:
+ typedef BlasLinearMapper<Scalar, Index, AlignmentType,Incr> LinearMapper;
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE blas_data_mapper(Scalar* data, Index stride, Index incr) : m_data(data), m_stride(stride), m_incr(incr) {}
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE blas_data_mapper
+ getSubMapper(Index i, Index j) const {
+ return blas_data_mapper(&operator()(i, j), m_stride, m_incr.value());
+ }
+
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE LinearMapper getLinearMapper(Index i, Index j) const {
+ return LinearMapper(&operator()(i, j), m_incr.value());
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_ALWAYS_INLINE Scalar& operator()(Index i, Index j) const {
+ return m_data[StorageOrder==RowMajor ? j*m_incr.value() + i*m_stride : i*m_incr.value() + j*m_stride];
+ }
+
+ template<typename PacketType>
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketType loadPacket(Index i, Index j) const {
+ return pgather<Scalar,PacketType>(&operator()(i, j),m_incr.value());
+ }
+
+ template <typename PacketT, int AlignmentT>
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketT load(Index i, Index j) const {
+ return pgather<Scalar,PacketT>(&operator()(i, j),m_incr.value());
+ }
+
+ template<typename SubPacket>
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void scatterPacket(Index i, Index j, const SubPacket &p) const {
+ pscatter<Scalar, SubPacket>(&operator()(i, j), p, m_stride);
+ }
+
+ template<typename SubPacket>
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE SubPacket gatherPacket(Index i, Index j) const {
+ return pgather<Scalar, SubPacket>(&operator()(i, j), m_stride);
+ }
+
+ // storePacketBlock_helper defines a way to access values inside the PacketBlock, this is essentially required by the Complex types.
+ template<typename SubPacket, typename ScalarT, int n, int idx>
+ struct storePacketBlock_helper
+ {
+ storePacketBlock_helper<SubPacket, ScalarT, n, idx-1> spbh;
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void store(const blas_data_mapper<Scalar, Index, StorageOrder, AlignmentType, Incr>* sup, Index i, Index j, const PacketBlock<SubPacket, n>& block) const {
+ spbh.store(sup, i,j,block);
+ for(int l = 0; l < unpacket_traits<SubPacket>::size; l++)
+ {
+ ScalarT *v = &sup->operator()(i+l, j+idx);
+ *v = block.packet[idx][l];
+ }
+ }
+ };
+
+ template<typename SubPacket, int n, int idx>
+ struct storePacketBlock_helper<SubPacket, std::complex<float>, n, idx>
+ {
+ storePacketBlock_helper<SubPacket, std::complex<float>, n, idx-1> spbh;
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void store(const blas_data_mapper<Scalar, Index, StorageOrder, AlignmentType, Incr>* sup, Index i, Index j, const PacketBlock<SubPacket, n>& block) const {
+ spbh.store(sup,i,j,block);
+ for(int l = 0; l < unpacket_traits<SubPacket>::size; l++)
+ {
+ std::complex<float> *v = &sup->operator()(i+l, j+idx);
+ v->real(block.packet[idx].v[2*l+0]);
+ v->imag(block.packet[idx].v[2*l+1]);
+ }
+ }
+ };
+
+ template<typename SubPacket, int n, int idx>
+ struct storePacketBlock_helper<SubPacket, std::complex<double>, n, idx>
+ {
+ storePacketBlock_helper<SubPacket, std::complex<double>, n, idx-1> spbh;
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void store(const blas_data_mapper<Scalar, Index, StorageOrder, AlignmentType, Incr>* sup, Index i, Index j, const PacketBlock<SubPacket, n>& block) const {
+ spbh.store(sup,i,j,block);
+ for(int l = 0; l < unpacket_traits<SubPacket>::size; l++)
+ {
+ std::complex<double> *v = &sup->operator()(i+l, j+idx);
+ v->real(block.packet[idx].v[2*l+0]);
+ v->imag(block.packet[idx].v[2*l+1]);
+ }
+ }
+ };
+
+ template<typename SubPacket, typename ScalarT, int n>
+ struct storePacketBlock_helper<SubPacket, ScalarT, n, -1>
+ {
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void store(const blas_data_mapper<Scalar, Index, StorageOrder, AlignmentType, Incr>*, Index, Index, const PacketBlock<SubPacket, n>& ) const {
+ }
+ };
+
+ template<typename SubPacket, int n>
+ struct storePacketBlock_helper<SubPacket, std::complex<float>, n, -1>
+ {
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void store(const blas_data_mapper<Scalar, Index, StorageOrder, AlignmentType, Incr>*, Index, Index, const PacketBlock<SubPacket, n>& ) const {
+ }
+ };
+
+ template<typename SubPacket, int n>
+ struct storePacketBlock_helper<SubPacket, std::complex<double>, n, -1>
+ {
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void store(const blas_data_mapper<Scalar, Index, StorageOrder, AlignmentType, Incr>*, Index, Index, const PacketBlock<SubPacket, n>& ) const {
+ }
+ };
+ // This function stores a PacketBlock on m_data, this approach is really quite slow compare to Incr=1 and should be avoided when possible.
+ template<typename SubPacket, int n>
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void storePacketBlock(Index i, Index j, const PacketBlock<SubPacket, n>&block) const {
+ storePacketBlock_helper<SubPacket, Scalar, n, n-1> spb;
+ spb.store(this, i,j,block);
+ }
+protected:
+ Scalar* EIGEN_RESTRICT m_data;
+ const Index m_stride;
+ const internal::variable_if_dynamic<Index,Incr> m_incr;
+};
+
// lightweight helper class to access matrix coefficients (const version)
template<typename Scalar, typename Index, int StorageOrder>
class const_blas_data_mapper : public blas_data_mapper<const Scalar, Index, StorageOrder> {
@@ -283,14 +411,15 @@ template<typename XprType> struct blas_traits
HasUsableDirectAccess = ( (int(XprType::Flags)&DirectAccessBit)
&& ( bool(XprType::IsVectorAtCompileTime)
|| int(inner_stride_at_compile_time<XprType>::ret) == 1)
- ) ? 1 : 0
+ ) ? 1 : 0,
+ HasScalarFactor = false
};
typedef typename conditional<bool(HasUsableDirectAccess),
ExtractType,
typename _ExtractType::PlainObject
>::type DirectLinearAccessType;
- static inline ExtractType extract(const XprType& x) { return x; }
- static inline const Scalar extractScalarFactor(const XprType&) { return Scalar(1); }
+ static inline EIGEN_DEVICE_FUNC ExtractType extract(const XprType& x) { return x; }
+ static inline EIGEN_DEVICE_FUNC const Scalar extractScalarFactor(const XprType&) { return Scalar(1); }
};
// pop conjugate
@@ -315,17 +444,23 @@ template<typename Scalar, typename NestedXpr, typename Plain>
struct blas_traits<CwiseBinaryOp<scalar_product_op<Scalar>, const CwiseNullaryOp<scalar_constant_op<Scalar>,Plain>, NestedXpr> >
: blas_traits<NestedXpr>
{
+ enum {
+ HasScalarFactor = true
+ };
typedef blas_traits<NestedXpr> Base;
typedef CwiseBinaryOp<scalar_product_op<Scalar>, const CwiseNullaryOp<scalar_constant_op<Scalar>,Plain>, NestedXpr> XprType;
typedef typename Base::ExtractType ExtractType;
- static inline ExtractType extract(const XprType& x) { return Base::extract(x.rhs()); }
- static inline Scalar extractScalarFactor(const XprType& x)
+ static inline EIGEN_DEVICE_FUNC ExtractType extract(const XprType& x) { return Base::extract(x.rhs()); }
+ static inline EIGEN_DEVICE_FUNC Scalar extractScalarFactor(const XprType& x)
{ return x.lhs().functor().m_other * Base::extractScalarFactor(x.rhs()); }
};
template<typename Scalar, typename NestedXpr, typename Plain>
struct blas_traits<CwiseBinaryOp<scalar_product_op<Scalar>, NestedXpr, const CwiseNullaryOp<scalar_constant_op<Scalar>,Plain> > >
: blas_traits<NestedXpr>
{
+ enum {
+ HasScalarFactor = true
+ };
typedef blas_traits<NestedXpr> Base;
typedef CwiseBinaryOp<scalar_product_op<Scalar>, NestedXpr, const CwiseNullaryOp<scalar_constant_op<Scalar>,Plain> > XprType;
typedef typename Base::ExtractType ExtractType;
@@ -344,6 +479,9 @@ template<typename Scalar, typename NestedXpr>
struct blas_traits<CwiseUnaryOp<scalar_opposite_op<Scalar>, NestedXpr> >
: blas_traits<NestedXpr>
{
+ enum {
+ HasScalarFactor = true
+ };
typedef blas_traits<NestedXpr> Base;
typedef CwiseUnaryOp<scalar_opposite_op<Scalar>, NestedXpr> XprType;
typedef typename Base::ExtractType ExtractType;
@@ -380,7 +518,7 @@ struct blas_traits<const T>
template<typename T, bool HasUsableDirectAccess=blas_traits<T>::HasUsableDirectAccess>
struct extract_data_selector {
- static const typename T::Scalar* run(const T& m)
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static const typename T::Scalar* run(const T& m)
{
return blas_traits<T>::extract(m).data();
}
@@ -391,11 +529,53 @@ struct extract_data_selector<T,false> {
static typename T::Scalar* run(const T&) { return 0; }
};
-template<typename T> const typename T::Scalar* extract_data(const T& m)
+template<typename T>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE const typename T::Scalar* extract_data(const T& m)
{
return extract_data_selector<T>::run(m);
}
+/**
+ * \c combine_scalar_factors extracts and multiplies factors from GEMM and GEMV products.
+ * There is a specialization for booleans
+ */
+template<typename ResScalar, typename Lhs, typename Rhs>
+struct combine_scalar_factors_impl
+{
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static ResScalar run(const Lhs& lhs, const Rhs& rhs)
+ {
+ return blas_traits<Lhs>::extractScalarFactor(lhs) * blas_traits<Rhs>::extractScalarFactor(rhs);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static ResScalar run(const ResScalar& alpha, const Lhs& lhs, const Rhs& rhs)
+ {
+ return alpha * blas_traits<Lhs>::extractScalarFactor(lhs) * blas_traits<Rhs>::extractScalarFactor(rhs);
+ }
+};
+template<typename Lhs, typename Rhs>
+struct combine_scalar_factors_impl<bool, Lhs, Rhs>
+{
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static bool run(const Lhs& lhs, const Rhs& rhs)
+ {
+ return blas_traits<Lhs>::extractScalarFactor(lhs) && blas_traits<Rhs>::extractScalarFactor(rhs);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static bool run(const bool& alpha, const Lhs& lhs, const Rhs& rhs)
+ {
+ return alpha && blas_traits<Lhs>::extractScalarFactor(lhs) && blas_traits<Rhs>::extractScalarFactor(rhs);
+ }
+};
+
+template<typename ResScalar, typename Lhs, typename Rhs>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE ResScalar combine_scalar_factors(const ResScalar& alpha, const Lhs& lhs, const Rhs& rhs)
+{
+ return combine_scalar_factors_impl<ResScalar,Lhs,Rhs>::run(alpha, lhs, rhs);
+}
+template<typename ResScalar, typename Lhs, typename Rhs>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE ResScalar combine_scalar_factors(const Lhs& lhs, const Rhs& rhs)
+{
+ return combine_scalar_factors_impl<ResScalar,Lhs,Rhs>::run(lhs, rhs);
+}
+
+
} // end namespace internal
} // end namespace Eigen
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/util/ConfigureVectorization.h b/examples/ThirdPartyLibs/Eigen/src/Core/util/ConfigureVectorization.h
new file mode 100644
index 000000000..73e8a65a5
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/util/ConfigureVectorization.h
@@ -0,0 +1,512 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2018 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2020, Arm Limited and Contributors
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CONFIGURE_VECTORIZATION_H
+#define EIGEN_CONFIGURE_VECTORIZATION_H
+
+//------------------------------------------------------------------------------------------
+// Static and dynamic alignment control
+//
+// The main purpose of this section is to define EIGEN_MAX_ALIGN_BYTES and EIGEN_MAX_STATIC_ALIGN_BYTES
+// as the maximal boundary in bytes on which dynamically and statically allocated data may be alignment respectively.
+// The values of EIGEN_MAX_ALIGN_BYTES and EIGEN_MAX_STATIC_ALIGN_BYTES can be specified by the user. If not,
+// a default value is automatically computed based on architecture, compiler, and OS.
+//
+// This section also defines macros EIGEN_ALIGN_TO_BOUNDARY(N) and the shortcuts EIGEN_ALIGN{8,16,32,_MAX}
+// to be used to declare statically aligned buffers.
+//------------------------------------------------------------------------------------------
+
+
+/* EIGEN_ALIGN_TO_BOUNDARY(n) forces data to be n-byte aligned. This is used to satisfy SIMD requirements.
+ * However, we do that EVEN if vectorization (EIGEN_VECTORIZE) is disabled,
+ * so that vectorization doesn't affect binary compatibility.
+ *
+ * If we made alignment depend on whether or not EIGEN_VECTORIZE is defined, it would be impossible to link
+ * vectorized and non-vectorized code.
+ *
+ * FIXME: this code can be cleaned up once we switch to proper C++11 only.
+ */
+#if (defined EIGEN_CUDACC)
+ #define EIGEN_ALIGN_TO_BOUNDARY(n) __align__(n)
+ #define EIGEN_ALIGNOF(x) __alignof(x)
+#elif EIGEN_HAS_ALIGNAS
+ #define EIGEN_ALIGN_TO_BOUNDARY(n) alignas(n)
+ #define EIGEN_ALIGNOF(x) alignof(x)
+#elif EIGEN_COMP_GNUC || EIGEN_COMP_PGI || EIGEN_COMP_IBM || EIGEN_COMP_ARM
+ #define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))
+ #define EIGEN_ALIGNOF(x) __alignof(x)
+#elif EIGEN_COMP_MSVC
+ #define EIGEN_ALIGN_TO_BOUNDARY(n) __declspec(align(n))
+ #define EIGEN_ALIGNOF(x) __alignof(x)
+#elif EIGEN_COMP_SUNCC
+ // FIXME not sure about this one:
+ #define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))
+ #define EIGEN_ALIGNOF(x) __alignof(x)
+#else
+ #error Please tell me what is the equivalent of alignas(n) and alignof(x) for your compiler
+#endif
+
+// If the user explicitly disable vectorization, then we also disable alignment
+#if defined(EIGEN_DONT_VECTORIZE)
+ #if defined(EIGEN_GPUCC)
+ // GPU code is always vectorized and requires memory alignment for
+ // statically allocated buffers.
+ #define EIGEN_IDEAL_MAX_ALIGN_BYTES 16
+ #else
+ #define EIGEN_IDEAL_MAX_ALIGN_BYTES 0
+ #endif
+#elif defined(__AVX512F__)
+ // 64 bytes static alignment is preferred only if really required
+ #define EIGEN_IDEAL_MAX_ALIGN_BYTES 64
+#elif defined(__AVX__)
+ // 32 bytes static alignment is preferred only if really required
+ #define EIGEN_IDEAL_MAX_ALIGN_BYTES 32
+#else
+ #define EIGEN_IDEAL_MAX_ALIGN_BYTES 16
+#endif
+
+
+// EIGEN_MIN_ALIGN_BYTES defines the minimal value for which the notion of explicit alignment makes sense
+#define EIGEN_MIN_ALIGN_BYTES 16
+
+// Defined the boundary (in bytes) on which the data needs to be aligned. Note
+// that unless EIGEN_ALIGN is defined and not equal to 0, the data may not be
+// aligned at all regardless of the value of this #define.
+
+#if (defined(EIGEN_DONT_ALIGN_STATICALLY) || defined(EIGEN_DONT_ALIGN)) && defined(EIGEN_MAX_STATIC_ALIGN_BYTES) && EIGEN_MAX_STATIC_ALIGN_BYTES>0
+#error EIGEN_MAX_STATIC_ALIGN_BYTES and EIGEN_DONT_ALIGN[_STATICALLY] are both defined with EIGEN_MAX_STATIC_ALIGN_BYTES!=0. Use EIGEN_MAX_STATIC_ALIGN_BYTES=0 as a synonym of EIGEN_DONT_ALIGN_STATICALLY.
+#endif
+
+// EIGEN_DONT_ALIGN_STATICALLY and EIGEN_DONT_ALIGN are deprecated
+// They imply EIGEN_MAX_STATIC_ALIGN_BYTES=0
+#if defined(EIGEN_DONT_ALIGN_STATICALLY) || defined(EIGEN_DONT_ALIGN)
+ #ifdef EIGEN_MAX_STATIC_ALIGN_BYTES
+ #undef EIGEN_MAX_STATIC_ALIGN_BYTES
+ #endif
+ #define EIGEN_MAX_STATIC_ALIGN_BYTES 0
+#endif
+
+#ifndef EIGEN_MAX_STATIC_ALIGN_BYTES
+
+ // Try to automatically guess what is the best default value for EIGEN_MAX_STATIC_ALIGN_BYTES
+
+ // 16 byte alignment is only useful for vectorization. Since it affects the ABI, we need to enable
+ // 16 byte alignment on all platforms where vectorization might be enabled. In theory we could always
+ // enable alignment, but it can be a cause of problems on some platforms, so we just disable it in
+ // certain common platform (compiler+architecture combinations) to avoid these problems.
+ // Only static alignment is really problematic (relies on nonstandard compiler extensions),
+ // try to keep heap alignment even when we have to disable static alignment.
+ #if EIGEN_COMP_GNUC && !(EIGEN_ARCH_i386_OR_x86_64 || EIGEN_ARCH_ARM_OR_ARM64 || EIGEN_ARCH_PPC || EIGEN_ARCH_IA64 || EIGEN_ARCH_MIPS)
+ #define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 1
+ #elif EIGEN_ARCH_ARM_OR_ARM64 && EIGEN_COMP_GNUC_STRICT && EIGEN_GNUC_AT_MOST(4, 6)
+ // Old versions of GCC on ARM, at least 4.4, were once seen to have buggy static alignment support.
+ // Not sure which version fixed it, hopefully it doesn't affect 4.7, which is still somewhat in use.
+ // 4.8 and newer seem definitely unaffected.
+ #define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 1
+ #else
+ #define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 0
+ #endif
+
+ // static alignment is completely disabled with GCC 3, Sun Studio, and QCC/QNX
+ #if !EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT \
+ && !EIGEN_GCC3_OR_OLDER \
+ && !EIGEN_COMP_SUNCC \
+ && !EIGEN_OS_QNX
+ #define EIGEN_ARCH_WANTS_STACK_ALIGNMENT 1
+ #else
+ #define EIGEN_ARCH_WANTS_STACK_ALIGNMENT 0
+ #endif
+
+ #if EIGEN_ARCH_WANTS_STACK_ALIGNMENT
+ #define EIGEN_MAX_STATIC_ALIGN_BYTES EIGEN_IDEAL_MAX_ALIGN_BYTES
+ #else
+ #define EIGEN_MAX_STATIC_ALIGN_BYTES 0
+ #endif
+
+#endif
+
+// If EIGEN_MAX_ALIGN_BYTES is defined, then it is considered as an upper bound for EIGEN_MAX_STATIC_ALIGN_BYTES
+#if defined(EIGEN_MAX_ALIGN_BYTES) && EIGEN_MAX_ALIGN_BYTES<EIGEN_MAX_STATIC_ALIGN_BYTES
+#undef EIGEN_MAX_STATIC_ALIGN_BYTES
+#define EIGEN_MAX_STATIC_ALIGN_BYTES EIGEN_MAX_ALIGN_BYTES
+#endif
+
+#if EIGEN_MAX_STATIC_ALIGN_BYTES==0 && !defined(EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT)
+ #define EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT
+#endif
+
+// At this stage, EIGEN_MAX_STATIC_ALIGN_BYTES>0 is the true test whether we want to align arrays on the stack or not.
+// It takes into account both the user choice to explicitly enable/disable alignment (by setting EIGEN_MAX_STATIC_ALIGN_BYTES)
+// and the architecture config (EIGEN_ARCH_WANTS_STACK_ALIGNMENT).
+// Henceforth, only EIGEN_MAX_STATIC_ALIGN_BYTES should be used.
+
+
+// Shortcuts to EIGEN_ALIGN_TO_BOUNDARY
+#define EIGEN_ALIGN8 EIGEN_ALIGN_TO_BOUNDARY(8)
+#define EIGEN_ALIGN16 EIGEN_ALIGN_TO_BOUNDARY(16)
+#define EIGEN_ALIGN32 EIGEN_ALIGN_TO_BOUNDARY(32)
+#define EIGEN_ALIGN64 EIGEN_ALIGN_TO_BOUNDARY(64)
+#if EIGEN_MAX_STATIC_ALIGN_BYTES>0
+#define EIGEN_ALIGN_MAX EIGEN_ALIGN_TO_BOUNDARY(EIGEN_MAX_STATIC_ALIGN_BYTES)
+#else
+#define EIGEN_ALIGN_MAX
+#endif
+
+
+// Dynamic alignment control
+
+#if defined(EIGEN_DONT_ALIGN) && defined(EIGEN_MAX_ALIGN_BYTES) && EIGEN_MAX_ALIGN_BYTES>0
+#error EIGEN_MAX_ALIGN_BYTES and EIGEN_DONT_ALIGN are both defined with EIGEN_MAX_ALIGN_BYTES!=0. Use EIGEN_MAX_ALIGN_BYTES=0 as a synonym of EIGEN_DONT_ALIGN.
+#endif
+
+#ifdef EIGEN_DONT_ALIGN
+ #ifdef EIGEN_MAX_ALIGN_BYTES
+ #undef EIGEN_MAX_ALIGN_BYTES
+ #endif
+ #define EIGEN_MAX_ALIGN_BYTES 0
+#elif !defined(EIGEN_MAX_ALIGN_BYTES)
+ #define EIGEN_MAX_ALIGN_BYTES EIGEN_IDEAL_MAX_ALIGN_BYTES
+#endif
+
+#if EIGEN_IDEAL_MAX_ALIGN_BYTES > EIGEN_MAX_ALIGN_BYTES
+#define EIGEN_DEFAULT_ALIGN_BYTES EIGEN_IDEAL_MAX_ALIGN_BYTES
+#else
+#define EIGEN_DEFAULT_ALIGN_BYTES EIGEN_MAX_ALIGN_BYTES
+#endif
+
+
+#ifndef EIGEN_UNALIGNED_VECTORIZE
+#define EIGEN_UNALIGNED_VECTORIZE 1
+#endif
+
+//----------------------------------------------------------------------
+
+// if alignment is disabled, then disable vectorization. Note: EIGEN_MAX_ALIGN_BYTES is the proper check, it takes into
+// account both the user's will (EIGEN_MAX_ALIGN_BYTES,EIGEN_DONT_ALIGN) and our own platform checks
+#if EIGEN_MAX_ALIGN_BYTES==0
+ #ifndef EIGEN_DONT_VECTORIZE
+ #define EIGEN_DONT_VECTORIZE
+ #endif
+#endif
+
+
+// The following (except #include <malloc.h> and _M_IX86_FP ??) can likely be
+// removed as gcc 4.1 and msvc 2008 are not supported anyways.
+#if EIGEN_COMP_MSVC
+ #include <malloc.h> // for _aligned_malloc -- need it regardless of whether vectorization is enabled
+ #if (EIGEN_COMP_MSVC >= 1500) // 2008 or later
+ // a user reported that in 64-bit mode, MSVC doesn't care to define _M_IX86_FP.
+ #if (defined(_M_IX86_FP) && (_M_IX86_FP >= 2)) || EIGEN_ARCH_x86_64
+ #define EIGEN_SSE2_ON_MSVC_2008_OR_LATER
+ #endif
+ #endif
+#else
+ #if (defined __SSE2__) && ( (!EIGEN_COMP_GNUC) || EIGEN_COMP_ICC || EIGEN_GNUC_AT_LEAST(4,2) )
+ #define EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC
+ #endif
+#endif
+
+#if !(defined(EIGEN_DONT_VECTORIZE) || defined(EIGEN_GPUCC))
+
+ #if defined (EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC) || defined(EIGEN_SSE2_ON_MSVC_2008_OR_LATER)
+
+ // Defines symbols for compile-time detection of which instructions are
+ // used.
+ // EIGEN_VECTORIZE_YY is defined if and only if the instruction set YY is used
+ #define EIGEN_VECTORIZE
+ #define EIGEN_VECTORIZE_SSE
+ #define EIGEN_VECTORIZE_SSE2
+
+ // Detect sse3/ssse3/sse4:
+ // gcc and icc defines __SSE3__, ...
+ // there is no way to know about this on msvc. You can define EIGEN_VECTORIZE_SSE* if you
+ // want to force the use of those instructions with msvc.
+ #ifdef __SSE3__
+ #define EIGEN_VECTORIZE_SSE3
+ #endif
+ #ifdef __SSSE3__
+ #define EIGEN_VECTORIZE_SSSE3
+ #endif
+ #ifdef __SSE4_1__
+ #define EIGEN_VECTORIZE_SSE4_1
+ #endif
+ #ifdef __SSE4_2__
+ #define EIGEN_VECTORIZE_SSE4_2
+ #endif
+ #ifdef __AVX__
+ #ifndef EIGEN_USE_SYCL
+ #define EIGEN_VECTORIZE_AVX
+ #endif
+ #define EIGEN_VECTORIZE_SSE3
+ #define EIGEN_VECTORIZE_SSSE3
+ #define EIGEN_VECTORIZE_SSE4_1
+ #define EIGEN_VECTORIZE_SSE4_2
+ #endif
+ #ifdef __AVX2__
+ #ifndef EIGEN_USE_SYCL
+ #define EIGEN_VECTORIZE_AVX2
+ #define EIGEN_VECTORIZE_AVX
+ #endif
+ #define EIGEN_VECTORIZE_SSE3
+ #define EIGEN_VECTORIZE_SSSE3
+ #define EIGEN_VECTORIZE_SSE4_1
+ #define EIGEN_VECTORIZE_SSE4_2
+ #endif
+ #if defined(__FMA__) || (EIGEN_COMP_MSVC && defined(__AVX2__))
+ // MSVC does not expose a switch dedicated for FMA
+ // For MSVC, AVX2 => FMA
+ #define EIGEN_VECTORIZE_FMA
+ #endif
+ #if defined(__AVX512F__)
+ #ifndef EIGEN_VECTORIZE_FMA
+ #if EIGEN_COMP_GNUC
+ #error Please add -mfma to your compiler flags: compiling with -mavx512f alone without SSE/AVX FMA is not supported (bug 1638).
+ #else
+ #error Please enable FMA in your compiler flags (e.g. -mfma): compiling with AVX512 alone without SSE/AVX FMA is not supported (bug 1638).
+ #endif
+ #endif
+ #ifndef EIGEN_USE_SYCL
+ #define EIGEN_VECTORIZE_AVX512
+ #define EIGEN_VECTORIZE_AVX2
+ #define EIGEN_VECTORIZE_AVX
+ #endif
+ #define EIGEN_VECTORIZE_FMA
+ #define EIGEN_VECTORIZE_SSE3
+ #define EIGEN_VECTORIZE_SSSE3
+ #define EIGEN_VECTORIZE_SSE4_1
+ #define EIGEN_VECTORIZE_SSE4_2
+ #ifndef EIGEN_USE_SYCL
+ #ifdef __AVX512DQ__
+ #define EIGEN_VECTORIZE_AVX512DQ
+ #endif
+ #ifdef __AVX512ER__
+ #define EIGEN_VECTORIZE_AVX512ER
+ #endif
+ #ifdef __AVX512BF16__
+ #define EIGEN_VECTORIZE_AVX512BF16
+ #endif
+ #endif
+ #endif
+
+ // Disable AVX support on broken xcode versions
+ #if defined(__apple_build_version__) && (__apple_build_version__ == 11000033 ) && ( __MAC_OS_X_VERSION_MIN_REQUIRED == 101500 )
+ // A nasty bug in the clang compiler shipped with xcode in a common compilation situation
+ // when XCode 11.0 and Mac deployment target macOS 10.15 is https://trac.macports.org/ticket/58776#no1
+ #ifdef EIGEN_VECTORIZE_AVX
+ #undef EIGEN_VECTORIZE_AVX
+ #warning "Disabling AVX support: clang compiler shipped with XCode 11.[012] generates broken assembly with -macosx-version-min=10.15 and AVX enabled. "
+ #ifdef EIGEN_VECTORIZE_AVX2
+ #undef EIGEN_VECTORIZE_AVX2
+ #endif
+ #ifdef EIGEN_VECTORIZE_FMA
+ #undef EIGEN_VECTORIZE_FMA
+ #endif
+ #ifdef EIGEN_VECTORIZE_AVX512
+ #undef EIGEN_VECTORIZE_AVX512
+ #endif
+ #ifdef EIGEN_VECTORIZE_AVX512DQ
+ #undef EIGEN_VECTORIZE_AVX512DQ
+ #endif
+ #ifdef EIGEN_VECTORIZE_AVX512ER
+ #undef EIGEN_VECTORIZE_AVX512ER
+ #endif
+ #endif
+ // NOTE: Confirmed test failures in XCode 11.0, and XCode 11.2 with -macosx-version-min=10.15 and AVX
+ // NOTE using -macosx-version-min=10.15 with Xcode 11.0 results in runtime segmentation faults in many tests, 11.2 produce core dumps in 3 tests
+ // NOTE using -macosx-version-min=10.14 produces functioning and passing tests in all cases
+ // NOTE __clang_version__ "11.0.0 (clang-1100.0.33.8)" XCode 11.0 <- Produces many segfault and core dumping tests
+ // with -macosx-version-min=10.15 and AVX
+ // NOTE __clang_version__ "11.0.0 (clang-1100.0.33.12)" XCode 11.2 <- Produces 3 core dumping tests with
+ // -macosx-version-min=10.15 and AVX
+ #endif
+
+ // include files
+
+ // This extern "C" works around a MINGW-w64 compilation issue
+ // https://sourceforge.net/tracker/index.php?func=detail&aid=3018394&group_id=202880&atid=983354
+ // In essence, intrin.h is included by windows.h and also declares intrinsics (just as emmintrin.h etc. below do).
+ // However, intrin.h uses an extern "C" declaration, and g++ thus complains of duplicate declarations
+ // with conflicting linkage. The linkage for intrinsics doesn't matter, but at that stage the compiler doesn't know;
+ // so, to avoid compile errors when windows.h is included after Eigen/Core, ensure intrinsics are extern "C" here too.
+ // notice that since these are C headers, the extern "C" is theoretically needed anyways.
+ extern "C" {
+ // In theory we should only include immintrin.h and not the other *mmintrin.h header files directly.
+ // Doing so triggers some issues with ICC. However old gcc versions seems to not have this file, thus:
+ #if EIGEN_COMP_ICC >= 1110
+ #include <immintrin.h>
+ #else
+ #include <mmintrin.h>
+ #include <emmintrin.h>
+ #include <xmmintrin.h>
+ #ifdef EIGEN_VECTORIZE_SSE3
+ #include <pmmintrin.h>
+ #endif
+ #ifdef EIGEN_VECTORIZE_SSSE3
+ #include <tmmintrin.h>
+ #endif
+ #ifdef EIGEN_VECTORIZE_SSE4_1
+ #include <smmintrin.h>
+ #endif
+ #ifdef EIGEN_VECTORIZE_SSE4_2
+ #include <nmmintrin.h>
+ #endif
+ #if defined(EIGEN_VECTORIZE_AVX) || defined(EIGEN_VECTORIZE_AVX512)
+ #include <immintrin.h>
+ #endif
+ #endif
+ } // end extern "C"
+
+ #elif defined __VSX__
+
+ #define EIGEN_VECTORIZE
+ #define EIGEN_VECTORIZE_VSX
+ #include <altivec.h>
+ // We need to #undef all these ugly tokens defined in <altivec.h>
+ // => use __vector instead of vector
+ #undef bool
+ #undef vector
+ #undef pixel
+
+ #elif defined __ALTIVEC__
+
+ #define EIGEN_VECTORIZE
+ #define EIGEN_VECTORIZE_ALTIVEC
+ #include <altivec.h>
+ // We need to #undef all these ugly tokens defined in <altivec.h>
+ // => use __vector instead of vector
+ #undef bool
+ #undef vector
+ #undef pixel
+
+ #elif ((defined __ARM_NEON) || (defined __ARM_NEON__)) && !(defined EIGEN_ARM64_USE_SVE)
+
+ #define EIGEN_VECTORIZE
+ #define EIGEN_VECTORIZE_NEON
+ #include <arm_neon.h>
+
+ // We currently require SVE to be enabled explicitly via EIGEN_ARM64_USE_SVE and
+ // will not select the backend automatically
+ #elif (defined __ARM_FEATURE_SVE) && (defined EIGEN_ARM64_USE_SVE)
+
+ #define EIGEN_VECTORIZE
+ #define EIGEN_VECTORIZE_SVE
+ #include <arm_sve.h>
+
+ // Since we depend on knowing SVE vector lengths at compile-time, we need
+ // to ensure a fixed lengths is set
+ #if defined __ARM_FEATURE_SVE_BITS
+ #define EIGEN_ARM64_SVE_VL __ARM_FEATURE_SVE_BITS
+ #else
+#error "Eigen requires a fixed SVE lector length but EIGEN_ARM64_SVE_VL is not set."
+#endif
+
+#elif (defined __s390x__ && defined __VEC__)
+
+#define EIGEN_VECTORIZE
+#define EIGEN_VECTORIZE_ZVECTOR
+#include <vecintrin.h>
+
+#elif defined __mips_msa
+
+// Limit MSA optimizations to little-endian CPUs for now.
+// TODO: Perhaps, eventually support MSA optimizations on big-endian CPUs?
+#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+#if defined(__LP64__)
+#define EIGEN_MIPS_64
+#else
+#define EIGEN_MIPS_32
+#endif
+#define EIGEN_VECTORIZE
+#define EIGEN_VECTORIZE_MSA
+#include <msa.h>
+#endif
+
+#endif
+#endif
+
+// Following the Arm ACLE arm_neon.h should also include arm_fp16.h but not all
+// compilers seem to follow this. We therefore include it explicitly.
+// See also: https://bugs.llvm.org/show_bug.cgi?id=47955
+#if defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC)
+ #include <arm_fp16.h>
+#endif
+
+#if defined(__F16C__) && (!defined(EIGEN_GPUCC) && (!EIGEN_COMP_CLANG || EIGEN_COMP_CLANG>=380))
+ // We can use the optimized fp16 to float and float to fp16 conversion routines
+ #define EIGEN_HAS_FP16_C
+
+ #if EIGEN_COMP_CLANG
+ // Workaround for clang: The FP16C intrinsics for clang are included by
+ // immintrin.h, as opposed to emmintrin.h as suggested by Intel:
+ // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#othertechs=FP16C&expand=1711
+ #include <immintrin.h>
+ #endif
+#endif
+
+#if defined EIGEN_CUDACC
+ #define EIGEN_VECTORIZE_GPU
+ #include <vector_types.h>
+ #if EIGEN_CUDA_SDK_VER >= 70500
+ #define EIGEN_HAS_CUDA_FP16
+ #endif
+#endif
+
+#if defined(EIGEN_HAS_CUDA_FP16)
+ #include <cuda_runtime_api.h>
+ #include <cuda_fp16.h>
+#endif
+
+#if defined(EIGEN_HIPCC)
+ #define EIGEN_VECTORIZE_GPU
+ #include <hip/hip_vector_types.h>
+ #define EIGEN_HAS_HIP_FP16
+ #include <hip/hip_fp16.h>
+#endif
+
+
+/** \brief Namespace containing all symbols from the %Eigen library. */
+namespace Eigen {
+
+inline static const char *SimdInstructionSetsInUse(void) {
+#if defined(EIGEN_VECTORIZE_AVX512)
+ return "AVX512, FMA, AVX2, AVX, SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2";
+#elif defined(EIGEN_VECTORIZE_AVX)
+ return "AVX SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2";
+#elif defined(EIGEN_VECTORIZE_SSE4_2)
+ return "SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2";
+#elif defined(EIGEN_VECTORIZE_SSE4_1)
+ return "SSE, SSE2, SSE3, SSSE3, SSE4.1";
+#elif defined(EIGEN_VECTORIZE_SSSE3)
+ return "SSE, SSE2, SSE3, SSSE3";
+#elif defined(EIGEN_VECTORIZE_SSE3)
+ return "SSE, SSE2, SSE3";
+#elif defined(EIGEN_VECTORIZE_SSE2)
+ return "SSE, SSE2";
+#elif defined(EIGEN_VECTORIZE_ALTIVEC)
+ return "AltiVec";
+#elif defined(EIGEN_VECTORIZE_VSX)
+ return "VSX";
+#elif defined(EIGEN_VECTORIZE_NEON)
+ return "ARM NEON";
+#elif defined(EIGEN_VECTORIZE_SVE)
+ return "ARM SVE";
+#elif defined(EIGEN_VECTORIZE_ZVECTOR)
+ return "S390X ZVECTOR";
+#elif defined(EIGEN_VECTORIZE_MSA)
+ return "MIPS MSA";
+#else
+ return "None";
+#endif
+}
+
+} // end namespace Eigen
+
+
+#endif // EIGEN_CONFIGURE_VECTORIZATION_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/util/Constants.h b/examples/ThirdPartyLibs/Eigen/src/Core/util/Constants.h
index 5d37e5d04..35dcaa7b3 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/util/Constants.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/util/Constants.h
@@ -3,6 +3,7 @@
//
// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2020, Arm Limited and Contributors
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -156,7 +157,7 @@ const unsigned int DirectAccessBit = 0x40;
/** \deprecated \ingroup flags
*
* means the first coefficient packet is guaranteed to be aligned.
- * An expression cannot has the AlignedBit without the PacketAccessBit flag.
+ * An expression cannot have the AlignedBit without the PacketAccessBit flag.
* In other words, this means we are allow to perform an aligned packet access to the first element regardless
* of the expression kind:
* \code
@@ -255,12 +256,6 @@ enum AlignmentType {
};
/** \ingroup enums
- * Enum used by DenseBase::corner() in Eigen2 compatibility mode. */
-// FIXME after the corner() API change, this was not needed anymore, except by AlignedBox
-// TODO: find out what to do with that. Adapt the AlignedBox API ?
-enum CornerType { TopLeft, TopRight, BottomLeft, BottomRight };
-
-/** \ingroup enums
* Enum containing possible values for the \p Direction parameter of
* Reverse, PartialReduxExpr and VectorwiseOp. */
enum DirectionType {
@@ -334,9 +329,20 @@ enum StorageOptions {
* Enum for specifying whether to apply or solve on the left or right. */
enum SideType {
/** Apply transformation on the left. */
- OnTheLeft = 1,
+ OnTheLeft = 1,
/** Apply transformation on the right. */
- OnTheRight = 2
+ OnTheRight = 2
+};
+
+/** \ingroup enums
+ * Enum for specifying NaN-propagation behavior, e.g. for coeff-wise min/max. */
+enum NaNPropagationOptions {
+ /** Implementation defined behavior if NaNs are present. */
+ PropagateFast = 0,
+ /** Always propagate NaNs. */
+ PropagateNaN,
+ /** Always propagate not-NaNs. */
+ PropagateNumbers
};
/* the following used to be written as:
@@ -468,6 +474,8 @@ namespace Architecture
AltiVec = 0x2,
VSX = 0x3,
NEON = 0x4,
+ MSA = 0x5,
+ SVE = 0x6,
#if defined EIGEN_VECTORIZE_SSE
Target = SSE
#elif defined EIGEN_VECTORIZE_ALTIVEC
@@ -476,6 +484,10 @@ namespace Architecture
Target = VSX
#elif defined EIGEN_VECTORIZE_NEON
Target = NEON
+#elif defined EIGEN_VECTORIZE_SVE
+ Target = SVE
+#elif defined EIGEN_VECTORIZE_MSA
+ Target = MSA
#else
Target = Generic
#endif
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/util/DisableStupidWarnings.h b/examples/ThirdPartyLibs/Eigen/src/Core/util/DisableStupidWarnings.h
index 8ef0f3594..e950749e7 100644..100755
--- a/examples/ThirdPartyLibs/Eigen/src/Core/util/DisableStupidWarnings.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/util/DisableStupidWarnings.h
@@ -4,6 +4,7 @@
#ifdef _MSC_VER
// 4100 - unreferenced formal parameter (occurred e.g. in aligned_allocator::destroy(pointer p))
// 4101 - unreferenced local variable
+ // 4127 - conditional expression is constant
// 4181 - qualifier applied to reference type ignored
// 4211 - nonstandard extension used : redefined extern to static
// 4244 - 'argument' : conversion from 'type1' to 'type2', possible loss of data
@@ -19,7 +20,7 @@
#ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS
#pragma warning( push )
#endif
- #pragma warning( disable : 4100 4101 4181 4211 4244 4273 4324 4503 4512 4522 4700 4714 4717 4800)
+ #pragma warning( disable : 4100 4101 4127 4181 4211 4244 4273 4324 4503 4512 4522 4700 4714 4717 4800)
#elif defined __INTEL_COMPILER
// 2196 - routine is both "inline" and "noinline" ("noinline" assumed)
@@ -44,14 +45,33 @@
#if __clang_major__ >= 3 && __clang_minor__ >= 5
#pragma clang diagnostic ignored "-Wabsolute-value"
#endif
+ #if __clang_major__ >= 10
+ #pragma clang diagnostic ignored "-Wimplicit-int-float-conversion"
+ #endif
+ #if ( defined(__ALTIVEC__) || defined(__VSX__) ) && __cplusplus < 201103L
+ // warning: generic selections are a C11-specific feature
+ // ignoring warnings thrown at vec_ctf in Altivec/PacketMath.h
+ #pragma clang diagnostic ignored "-Wc11-extensions"
+ #endif
-#elif defined __GNUC__ && __GNUC__>=6
+#elif defined __GNUC__
- #ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS
+ #if (!defined(EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS)) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
#pragma GCC diagnostic push
#endif
- #pragma GCC diagnostic ignored "-Wignored-attributes"
-
+ // g++ warns about local variables shadowing member functions, which is too strict
+ #pragma GCC diagnostic ignored "-Wshadow"
+ #if __GNUC__ == 4 && __GNUC_MINOR__ < 8
+ // Until g++-4.7 there are warnings when comparing unsigned int vs 0, even in templated functions:
+ #pragma GCC diagnostic ignored "-Wtype-limits"
+ #endif
+ #if __GNUC__>=6
+ #pragma GCC diagnostic ignored "-Wignored-attributes"
+ #endif
+ #if __GNUC__==7
+ // See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89325
+ #pragma GCC diagnostic ignored "-Wattributes"
+ #endif
#endif
#if defined __NVCC__
@@ -74,6 +94,21 @@
#pragma diag_suppress 2735
#pragma diag_suppress 2737
#pragma diag_suppress 2739
+ #pragma diag_suppress 2976
+ #pragma diag_suppress 2979
+ // Disable the "// __device__ annotation is ignored on a function(...) that is
+ // explicitly defaulted on its first declaration" message.
+ // The __device__ annotation seems to actually be needed in some cases,
+ // otherwise resulting in kernel runtime errors.
+ #pragma diag_suppress 2977
#endif
+#else
+// warnings already disabled:
+# ifndef EIGEN_WARNINGS_DISABLED_2
+# define EIGEN_WARNINGS_DISABLED_2
+# elif defined(EIGEN_INTERNAL_DEBUGGING)
+# error "Do not include \"DisableStupidWarnings.h\" recursively more than twice!"
+# endif
+
#endif // not EIGEN_WARNINGS_DISABLED
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/util/ForwardDeclarations.h b/examples/ThirdPartyLibs/Eigen/src/Core/util/ForwardDeclarations.h
index 1a48cff04..2f9cc4491 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/util/ForwardDeclarations.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/util/ForwardDeclarations.h
@@ -47,11 +47,7 @@ template<typename T> struct NumTraits;
template<typename Derived> struct EigenBase;
template<typename Derived> class DenseBase;
template<typename Derived> class PlainObjectBase;
-
-
-template<typename Derived,
- int Level = internal::accessors_level<Derived>::value >
-class DenseCoeffsBase;
+template<typename Derived, int Level> class DenseCoeffsBase;
template<typename _Scalar, int _Rows, int _Cols,
int _Options = AutoAlign |
@@ -84,6 +80,7 @@ template<typename ExpressionType> class SwapWrapper;
template<typename XprType, int BlockRows=Dynamic, int BlockCols=Dynamic, bool InnerPanel = false> class Block;
template<typename XprType, typename RowIndices, typename ColIndices> class IndexedView;
+template<typename XprType, int Rows=Dynamic, int Cols=Dynamic, int Order=0> class Reshaped;
template<typename MatrixType, int Size=Dynamic> class VectorBlock;
template<typename MatrixType> class Transpose;
@@ -113,7 +110,7 @@ template<typename _IndicesType> class TranspositionsWrapper;
template<typename Derived,
int Level = internal::accessors_level<Derived>::has_write_access ? WriteAccessors : ReadOnlyAccessors
> class MapBase;
-template<int InnerStrideAtCompileTime, int OuterStrideAtCompileTime> class Stride;
+template<int OuterStrideAtCompileTime, int InnerStrideAtCompileTime> class Stride;
template<int Value = Dynamic> class InnerStride;
template<int Value = Dynamic> class OuterStride;
template<typename MatrixType, int MapOptions=Unaligned, typename StrideType = Stride<0,0> > class Map;
@@ -134,6 +131,10 @@ template<typename Derived> class SolverBase;
template<typename XprType> class InnerIterator;
namespace internal {
+template<typename XprType> class generic_randaccess_stl_iterator;
+template<typename XprType> class pointer_based_stl_iterator;
+template<typename XprType, DirectionType Direction> class subvector_stl_iterator;
+template<typename XprType, DirectionType Direction> class subvector_stl_reverse_iterator;
template<typename DecompositionType> struct kernel_retval_base;
template<typename DecompositionType> struct kernel_retval;
template<typename DecompositionType> struct image_retval_base;
@@ -179,14 +180,15 @@ template<typename LhsScalar, typename RhsScalar, bool ConjLhs=false, bool ConjRh
template<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_sum_op;
template<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_difference_op;
template<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_conj_product_op;
-template<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_min_op;
-template<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_max_op;
+template<typename LhsScalar,typename RhsScalar=LhsScalar, int NaNPropagation=PropagateFast> struct scalar_min_op;
+template<typename LhsScalar,typename RhsScalar=LhsScalar, int NaNPropagation=PropagateFast> struct scalar_max_op;
template<typename Scalar> struct scalar_opposite_op;
template<typename Scalar> struct scalar_conjugate_op;
template<typename Scalar> struct scalar_real_op;
template<typename Scalar> struct scalar_imag_op;
template<typename Scalar> struct scalar_abs_op;
template<typename Scalar> struct scalar_abs2_op;
+template<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_absolute_difference_op;
template<typename Scalar> struct scalar_sqrt_op;
template<typename Scalar> struct scalar_rsqrt_op;
template<typename Scalar> struct scalar_exp_op;
@@ -203,7 +205,7 @@ template<typename Scalar, typename NewType> struct scalar_cast_op;
template<typename Scalar> struct scalar_random_op;
template<typename Scalar> struct scalar_constant_op;
template<typename Scalar> struct scalar_identity_op;
-template<typename Scalar,bool iscpx> struct scalar_sign_op;
+template<typename Scalar,bool is_complex, bool is_integer> struct scalar_sign_op;
template<typename Scalar,typename ScalarExponent> struct scalar_pow_op;
template<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_hypot_op;
template<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_product_op;
@@ -214,11 +216,27 @@ template<typename Scalar> struct scalar_lgamma_op;
template<typename Scalar> struct scalar_digamma_op;
template<typename Scalar> struct scalar_erf_op;
template<typename Scalar> struct scalar_erfc_op;
+template<typename Scalar> struct scalar_ndtri_op;
template<typename Scalar> struct scalar_igamma_op;
template<typename Scalar> struct scalar_igammac_op;
template<typename Scalar> struct scalar_zeta_op;
template<typename Scalar> struct scalar_betainc_op;
+// Bessel functions in SpecialFunctions module
+template<typename Scalar> struct scalar_bessel_i0_op;
+template<typename Scalar> struct scalar_bessel_i0e_op;
+template<typename Scalar> struct scalar_bessel_i1_op;
+template<typename Scalar> struct scalar_bessel_i1e_op;
+template<typename Scalar> struct scalar_bessel_j0_op;
+template<typename Scalar> struct scalar_bessel_y0_op;
+template<typename Scalar> struct scalar_bessel_j1_op;
+template<typename Scalar> struct scalar_bessel_y1_op;
+template<typename Scalar> struct scalar_bessel_k0_op;
+template<typename Scalar> struct scalar_bessel_k0e_op;
+template<typename Scalar> struct scalar_bessel_k1_op;
+template<typename Scalar> struct scalar_bessel_k1e_op;
+
+
} // end namespace internal
struct IOFormat;
@@ -256,6 +274,7 @@ template<typename MatrixType> class HouseholderQR;
template<typename MatrixType> class ColPivHouseholderQR;
template<typename MatrixType> class FullPivHouseholderQR;
template<typename MatrixType> class CompleteOrthogonalDecomposition;
+template<typename MatrixType> class SVDBase;
template<typename MatrixType, int QRPreconditioner = ColPivHouseholderQRPreconditioner> class JacobiSVD;
template<typename MatrixType> class BDCSVD;
template<typename MatrixType, int UpLo = Lower> class LLT;
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/util/IndexedViewHelper.h b/examples/ThirdPartyLibs/Eigen/src/Core/util/IndexedViewHelper.h
index ab01c857f..f85de305f 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/util/IndexedViewHelper.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/util/IndexedViewHelper.h
@@ -13,13 +13,6 @@
namespace Eigen {
-/** \namespace Eigen::placeholders
- * \ingroup Core_Module
- *
- * Namespace containing symbolic placeholder and identifiers
- */
-namespace placeholders {
-
namespace internal {
struct symbolic_last_tag {};
}
@@ -30,41 +23,40 @@ struct symbolic_last_tag {};
* Can be used as a parameter to Eigen::seq and Eigen::seqN functions to symbolically reference the last element/row/columns
* of the underlying vector or matrix once passed to DenseBase::operator()(const RowIndices&, const ColIndices&).
*
- * This symbolic placeholder support standard arithmetic operation.
+ * This symbolic placeholder supports standard arithmetic operations.
*
* A typical usage example would be:
* \code
* using namespace Eigen;
- * using Eigen::placeholders::last;
+ * using Eigen::last;
* VectorXd v(n);
* v(seq(2,last-2)).setOnes();
* \endcode
*
* \sa end
*/
-static const Symbolic::SymbolExpr<internal::symbolic_last_tag> last;
+static const symbolic::SymbolExpr<internal::symbolic_last_tag> last; // PLEASE use Eigen::last instead of Eigen::placeholders::last
-/** \var end
+/** \var lastp1
* \ingroup Core_Module
*
- * Can be used as a parameter to Eigen::seq and Eigen::seqN functions to symbolically reference the last+1 element/row/columns
- * of the underlying vector or matrix once passed to DenseBase::operator()(const RowIndices&, const ColIndices&).
+ * Can be used as a parameter to Eigen::seq and Eigen::seqN functions to symbolically
+ * reference the last+1 element/row/columns of the underlying vector or matrix once
+ * passed to DenseBase::operator()(const RowIndices&, const ColIndices&).
*
- * This symbolic placeholder support standard arithmetic operation.
- * It is essentially an alias to last+1
+ * This symbolic placeholder supports standard arithmetic operations.
+ * It is essentially an alias to last+fix<1>.
*
* \sa last
*/
#ifdef EIGEN_PARSED_BY_DOXYGEN
-static const auto end = last+1;
+static const auto lastp1 = last+fix<1>;
#else
// Using a FixedExpr<1> expression is important here to make sure the compiler
// can fully optimize the computation starting indices with zero overhead.
-static const Symbolic::AddExpr<Symbolic::SymbolExpr<internal::symbolic_last_tag>,Symbolic::ValueExpr<Eigen::internal::FixedInt<1> > > end(last+fix<1>());
+static const symbolic::AddExpr<symbolic::SymbolExpr<internal::symbolic_last_tag>,symbolic::ValueExpr<Eigen::internal::FixedInt<1> > > lastp1(last+fix<1>());
#endif
-} // end namespace placeholders
-
namespace internal {
// Replace symbolic last/end "keywords" by their true runtime value
@@ -74,9 +66,9 @@ template<int N>
FixedInt<N> eval_expr_given_size(FixedInt<N> x, Index /*size*/) { return x; }
template<typename Derived>
-Index eval_expr_given_size(const Symbolic::BaseExpr<Derived> &x, Index size)
+Index eval_expr_given_size(const symbolic::BaseExpr<Derived> &x, Index size)
{
- return x.derived().eval(placeholders::last=size-1);
+ return x.derived().eval(last=size-1);
}
// Extract increment/step at compile time
@@ -86,7 +78,7 @@ template<typename T, typename EnableIf = void> struct get_compile_time_incr {
// Analogue of std::get<0>(x), but tailored for our needs.
template<typename T>
-Index first(const T& x) { return x.first(); }
+EIGEN_CONSTEXPR Index first(const T& x) EIGEN_NOEXCEPT { return x.first(); }
// IndexedViewCompatibleType/makeIndexedViewCompatible turn an arbitrary object of type T into something usable by MatrixSlice
// The generic implementation is a no-op
@@ -108,8 +100,8 @@ struct SingleRange {
};
SingleRange(Index val) : m_value(val) {}
Index operator[](Index) const { return m_value; }
- Index size() const { return 1; }
- Index first() const { return m_value; }
+ static EIGEN_CONSTEXPR Index size() EIGEN_NOEXCEPT { return 1; }
+ Index first() const EIGEN_NOEXCEPT { return m_value; }
Index m_value;
};
@@ -117,7 +109,7 @@ template<> struct get_compile_time_incr<SingleRange> {
enum { value = 1 }; // 1 or 0 ??
};
-// Turn a single index into something that looks like an array (i.e., that exposes a .size(), and operatro[](int) methods)
+// Turn a single index into something that looks like an array (i.e., that exposes a .size(), and operator[](int) methods)
template<typename T, int XprSize>
struct IndexedViewCompatibleType<T,XprSize,typename internal::enable_if<internal::is_integral<T>::value>::type> {
// Here we could simply use Array, but maybe it's less work for the compiler to use
@@ -127,13 +119,13 @@ struct IndexedViewCompatibleType<T,XprSize,typename internal::enable_if<internal
};
template<typename T, int XprSize>
-struct IndexedViewCompatibleType<T, XprSize, typename enable_if<Symbolic::is_symbolic<T>::value>::type> {
+struct IndexedViewCompatibleType<T, XprSize, typename enable_if<symbolic::is_symbolic<T>::value>::type> {
typedef SingleRange type;
};
template<typename T>
-typename enable_if<Symbolic::is_symbolic<T>::value,SingleRange>::type
+typename enable_if<symbolic::is_symbolic<T>::value,SingleRange>::type
makeIndexedViewCompatible(const T& id, Index size, SpecializedType) {
return eval_expr_given_size(id,size);
}
@@ -149,9 +141,9 @@ template<int XprSize>
struct AllRange {
enum { SizeAtCompileTime = XprSize };
AllRange(Index size = XprSize) : m_size(size) {}
- Index operator[](Index i) const { return i; }
- Index size() const { return m_size.value(); }
- Index first() const { return 0; }
+ EIGEN_CONSTEXPR Index operator[](Index i) const EIGEN_NOEXCEPT { return i; }
+ EIGEN_CONSTEXPR Index size() const EIGEN_NOEXCEPT { return m_size.value(); }
+ EIGEN_CONSTEXPR Index first() const EIGEN_NOEXCEPT { return 0; }
variable_if_dynamic<Index,XprSize> m_size;
};
@@ -172,14 +164,21 @@ template<int Size> struct get_compile_time_incr<AllRange<Size> > {
} // end namespace internal
-namespace placeholders {
-
/** \var all
* \ingroup Core_Module
* Can be used as a parameter to DenseBase::operator()(const RowIndices&, const ColIndices&) to index all rows or columns
*/
-static const Eigen::internal::all_t all;
+static const Eigen::internal::all_t all; // PLEASE use Eigen::all instead of Eigen::placeholders::all
+
+
+namespace placeholders {
+ typedef symbolic::SymbolExpr<internal::symbolic_last_tag> last_t;
+ typedef symbolic::AddExpr<symbolic::SymbolExpr<internal::symbolic_last_tag>,symbolic::ValueExpr<Eigen::internal::FixedInt<1> > > end_t;
+ typedef Eigen::internal::all_t all_t;
+ EIGEN_DEPRECATED static const all_t all = Eigen::all; // PLEASE use Eigen::all instead of Eigen::placeholders::all
+ EIGEN_DEPRECATED static const last_t last = Eigen::last; // PLEASE use Eigen::last instead of Eigen::placeholders::last
+ EIGEN_DEPRECATED static const end_t end = Eigen::lastp1; // PLEASE use Eigen::lastp1 instead of Eigen::placeholders::end
}
} // end namespace Eigen
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/util/IntegralConstant.h b/examples/ThirdPartyLibs/Eigen/src/Core/util/IntegralConstant.h
index 78a4705cd..e0092f654 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/util/IntegralConstant.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/util/IntegralConstant.h
@@ -52,10 +52,12 @@ template<int N> class FixedInt
{
public:
static const int value = N;
- operator int() const { return value; }
+ EIGEN_CONSTEXPR operator int() const { return value; }
FixedInt() {}
FixedInt( VariableAndFixedInt<N> other) {
- EIGEN_ONLY_USED_FOR_DEBUG(other);
+ #ifndef EIGEN_INTERNAL_DEBUGGING
+ EIGEN_UNUSED_VARIABLE(other);
+ #endif
eigen_internal_assert(int(other)==N);
}
@@ -75,7 +77,7 @@ public:
template<int M>
FixedInt<N&M> operator&( FixedInt<M>) const { return FixedInt<N&M>(); }
-#if EIGEN_HAS_CXX14
+#if EIGEN_HAS_CXX14_VARIABLE_TEMPLATES
// Needed in C++14 to allow fix<N>():
FixedInt operator() () const { return *this; }
@@ -136,7 +138,7 @@ template<int N,int Default> struct get_fixed_value<FixedInt<N>,Default> {
static const int value = N;
};
-#if !EIGEN_HAS_CXX14
+#if !EIGEN_HAS_CXX14_VARIABLE_TEMPLATES
template<int N,int Default> struct get_fixed_value<FixedInt<N> (*)(),Default> {
static const int value = N;
};
@@ -152,7 +154,7 @@ struct get_fixed_value<variable_if_dynamic<T,N>,Default> {
};
template<typename T> EIGEN_DEVICE_FUNC Index get_runtime_value(const T &x) { return x; }
-#if !EIGEN_HAS_CXX14
+#if !EIGEN_HAS_CXX14_VARIABLE_TEMPLATES
template<int N> EIGEN_DEVICE_FUNC Index get_runtime_value(FixedInt<N> (*)()) { return N; }
#endif
@@ -164,7 +166,7 @@ template<typename T, int DynamicKey=Dynamic, typename EnableIf=void> struct clea
// Convert any integral type (e.g., short, int, unsigned int, etc.) to Eigen::Index
template<typename T, int DynamicKey> struct cleanup_index_type<T,DynamicKey,typename internal::enable_if<internal::is_integral<T>::value>::type> { typedef Index type; };
-#if !EIGEN_HAS_CXX14
+#if !EIGEN_HAS_CXX14_VARIABLE_TEMPLATES
// In c++98/c++11, fix<N> is a pointer to function that we better cleanup to a true FixedInt<N>:
template<int N, int DynamicKey> struct cleanup_index_type<FixedInt<N> (*)(), DynamicKey> { typedef FixedInt<N> type; };
#endif
@@ -182,7 +184,7 @@ template<int N, int DynamicKey> struct cleanup_index_type<std::integral_constant
#ifndef EIGEN_PARSED_BY_DOXYGEN
-#if EIGEN_HAS_CXX14
+#if EIGEN_HAS_CXX14_VARIABLE_TEMPLATES
template<int N>
static const internal::FixedInt<N> fix{};
#else
@@ -192,7 +194,7 @@ inline internal::FixedInt<N> fix() { return internal::FixedInt<N>(); }
// The generic typename T is mandatory. Otherwise, a code like fix<N> could refer to either the function above or this next overload.
// This way a code like fix<N> can only refer to the previous function.
template<int N,typename T>
-inline internal::VariableAndFixedInt<N> fix(T val) { return internal::VariableAndFixedInt<N>(val); }
+inline internal::VariableAndFixedInt<N> fix(T val) { return internal::VariableAndFixedInt<N>(internal::convert_index<int>(val)); }
#endif
#else // EIGEN_PARSED_BY_DOXYGEN
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/util/MKL_support.h b/examples/ThirdPartyLibs/Eigen/src/Core/util/MKL_support.h
index b7d6ecc76..17963fad4 100644..100755
--- a/examples/ThirdPartyLibs/Eigen/src/Core/util/MKL_support.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/util/MKL_support.h
@@ -55,7 +55,11 @@
#if defined EIGEN_USE_MKL
-# include <mkl.h>
+# if (!defined MKL_DIRECT_CALL) && (!defined EIGEN_MKL_NO_DIRECT_CALL)
+# define MKL_DIRECT_CALL
+# define MKL_DIRECT_CALL_JUST_SET
+# endif
+# include <mkl.h>
/*Check IMKL version for compatibility: < 10.3 is not usable with Eigen*/
# ifndef INTEL_MKL_VERSION
# undef EIGEN_USE_MKL /* INTEL_MKL_VERSION is not even defined on older versions */
@@ -69,6 +73,9 @@
# undef EIGEN_USE_MKL_VML
# undef EIGEN_USE_LAPACKE_STRICT
# undef EIGEN_USE_LAPACKE
+# ifdef MKL_DIRECT_CALL_JUST_SET
+# undef MKL_DIRECT_CALL
+# endif
# endif
#endif
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/util/Macros.h b/examples/ThirdPartyLibs/Eigen/src/Core/util/Macros.h
index 46b2659bd..b436dfad3 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/util/Macros.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/util/Macros.h
@@ -11,15 +11,52 @@
#ifndef EIGEN_MACROS_H
#define EIGEN_MACROS_H
+//------------------------------------------------------------------------------------------
+// Eigen version and basic defaults
+//------------------------------------------------------------------------------------------
+
#define EIGEN_WORLD_VERSION 3
-#define EIGEN_MAJOR_VERSION 3
-#define EIGEN_MINOR_VERSION 90
+#define EIGEN_MAJOR_VERSION 4
+#define EIGEN_MINOR_VERSION 0
#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
(EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
EIGEN_MINOR_VERSION>=z))))
+#ifdef EIGEN_DEFAULT_TO_ROW_MAJOR
+#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION Eigen::RowMajor
+#else
+#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION Eigen::ColMajor
+#endif
+
+#ifndef EIGEN_DEFAULT_DENSE_INDEX_TYPE
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE std::ptrdiff_t
+#endif
+
+// Upperbound on the C++ version to use.
+// Expected values are 03, 11, 14, 17, etc.
+// By default, let's use an arbitrarily large C++ version.
+#ifndef EIGEN_MAX_CPP_VER
+#define EIGEN_MAX_CPP_VER 99
+#endif
+
+/** Allows to disable some optimizations which might affect the accuracy of the result.
+ * Such optimization are enabled by default, and set EIGEN_FAST_MATH to 0 to disable them.
+ * They currently include:
+ * - single precision ArrayBase::sin() and ArrayBase::cos() for SSE and AVX vectorization.
+ */
+#ifndef EIGEN_FAST_MATH
+#define EIGEN_FAST_MATH 1
+#endif
+
+#ifndef EIGEN_STACK_ALLOCATION_LIMIT
+// 131072 == 128 KB
+#define EIGEN_STACK_ALLOCATION_LIMIT 131072
+#endif
+
+//------------------------------------------------------------------------------------------
// Compiler identification, EIGEN_COMP_*
+//------------------------------------------------------------------------------------------
/// \internal EIGEN_COMP_GNUC set to 1 for all compilers compatible with GCC
#ifdef __GNUC__
@@ -35,6 +72,13 @@
#define EIGEN_COMP_CLANG 0
#endif
+/// \internal EIGEN_COMP_CASTXML set to 1 if being preprocessed by CastXML
+#if defined(__castxml__)
+ #define EIGEN_COMP_CASTXML 1
+#else
+ #define EIGEN_COMP_CASTXML 0
+#endif
+
/// \internal EIGEN_COMP_LLVM set to 1 if the compiler backend is llvm
#if defined(__llvm__)
#define EIGEN_COMP_LLVM 1
@@ -70,14 +114,44 @@
#define EIGEN_COMP_MSVC 0
#endif
+#if defined(__NVCC__)
+#if defined(__CUDACC_VER_MAJOR__) && (__CUDACC_VER_MAJOR__ >= 9)
+ #define EIGEN_COMP_NVCC ((__CUDACC_VER_MAJOR__ * 10000) + (__CUDACC_VER_MINOR__ * 100))
+#elif defined(__CUDACC_VER__)
+ #define EIGEN_COMP_NVCC __CUDACC_VER__
+#else
+ #error "NVCC did not define compiler version."
+#endif
+#else
+ #define EIGEN_COMP_NVCC 0
+#endif
+
// For the record, here is a table summarizing the possible values for EIGEN_COMP_MSVC:
-// name ver MSC_VER
-// 2008 9 1500
-// 2010 10 1600
-// 2012 11 1700
-// 2013 12 1800
-// 2015 14 1900
-// "15" 15 1900
+// name ver MSC_VER
+// 2008 9 1500
+// 2010 10 1600
+// 2012 11 1700
+// 2013 12 1800
+// 2015 14 1900
+// "15" 15 1900
+// 2017-14.1 15.0 1910
+// 2017-14.11 15.3 1911
+// 2017-14.12 15.5 1912
+// 2017-14.13 15.6 1913
+// 2017-14.14 15.7 1914
+
+/// \internal EIGEN_COMP_MSVC_LANG set to _MSVC_LANG if the compiler is Microsoft Visual C++, 0 otherwise.
+#if defined(_MSVC_LANG)
+ #define EIGEN_COMP_MSVC_LANG _MSVC_LANG
+#else
+ #define EIGEN_COMP_MSVC_LANG 0
+#endif
+
+// For the record, here is a table summarizing the possible values for EIGEN_COMP_MSVC_LANG:
+// MSVC option Standard MSVC_LANG
+// /std:c++14 (default as of VS 2019) C++14 201402L
+// /std:c++17 C++17 201703L
+// /std:c++latest >C++17 >201703L
/// \internal EIGEN_COMP_MSVC_STRICT set to 1 if the compiler is really Microsoft Visual C++ and not ,e.g., ICC or clang-cl
#if EIGEN_COMP_MSVC && !(EIGEN_COMP_ICC || EIGEN_COMP_LLVM || EIGEN_COMP_CLANG)
@@ -86,16 +160,21 @@
#define EIGEN_COMP_MSVC_STRICT 0
#endif
-/// \internal EIGEN_COMP_IBM set to 1 if the compiler is IBM XL C++
-#if defined(__IBMCPP__) || defined(__xlc__)
- #define EIGEN_COMP_IBM 1
+/// \internal EIGEN_COMP_IBM set to xlc version if the compiler is IBM XL C++
+// XLC version
+// 3.1 0x0301
+// 4.5 0x0405
+// 5.0 0x0500
+// 12.1 0x0C01
+#if defined(__IBMCPP__) || defined(__xlc__) || defined(__ibmxl__)
+ #define EIGEN_COMP_IBM __xlC__
#else
#define EIGEN_COMP_IBM 0
#endif
-/// \internal EIGEN_COMP_PGI set to 1 if the compiler is Portland Group Compiler
+/// \internal EIGEN_COMP_PGI set to PGI version if the compiler is Portland Group Compiler
#if defined(__PGI)
- #define EIGEN_COMP_PGI 1
+ #define EIGEN_COMP_PGI (__PGIC__*100+__PGIC_MINOR__)
#else
#define EIGEN_COMP_PGI 0
#endif
@@ -114,22 +193,9 @@
#define EIGEN_COMP_EMSCRIPTEN 0
#endif
-/// \internal EIGEN_COMP_LCC set to 1 if the compiler is LCC (Local (or Little) C Compiler)
-#if defined(__LCC__) && !(defined(__e2k__))
- #define EIGEN_COMP_LCC 1
-#else
- #define EIGEN_COMP_LCC 0
-#endif
-
-/// \internal EIGEN_COMP_LCC_E2K set to 1 if the compiler is LCC (MCST eLbrus C Compiler)
-#if defined(__LCC__) && defined(__e2k__)
- #define EIGEN_COMP_LCC_E2K 1
-#else
- #define EIGEN_COMP_LCC_E2K 0
-#endif
/// \internal EIGEN_GNUC_STRICT set to 1 if the compiler is really GCC and not a compatible compiler (e.g., ICC, clang, mingw, etc.)
-#if EIGEN_COMP_GNUC && !(EIGEN_COMP_CLANG || EIGEN_COMP_ICC || EIGEN_COMP_MINGW || EIGEN_COMP_PGI || EIGEN_COMP_IBM || EIGEN_COMP_ARM || EIGEN_COMP_EMSCRIPTEN || EIGEN_COMP_LCC || EIGEN_COMP_LCC_E2K)
+#if EIGEN_COMP_GNUC && !(EIGEN_COMP_CLANG || EIGEN_COMP_ICC || EIGEN_COMP_MINGW || EIGEN_COMP_PGI || EIGEN_COMP_IBM || EIGEN_COMP_ARM || EIGEN_COMP_EMSCRIPTEN)
#define EIGEN_COMP_GNUC_STRICT 1
#else
#define EIGEN_COMP_GNUC_STRICT 0
@@ -154,9 +220,13 @@
#endif
+
+//------------------------------------------------------------------------------------------
// Architecture identification, EIGEN_ARCH_*
+//------------------------------------------------------------------------------------------
+
-#if defined(__x86_64__) || defined(_M_X64) || defined(__amd64)
+#if defined(__x86_64__) || (defined(_M_X64) && !defined(_M_ARM64EC)) || defined(__amd64)
#define EIGEN_ARCH_x86_64 1
#else
#define EIGEN_ARCH_x86_64 0
@@ -182,18 +252,61 @@
#endif
/// \internal EIGEN_ARCH_ARM64 set to 1 if the architecture is ARM64
-#if defined(__aarch64__)
+#if defined(__aarch64__) || defined(_M_ARM64) || defined(_M_ARM64EC)
#define EIGEN_ARCH_ARM64 1
#else
#define EIGEN_ARCH_ARM64 0
#endif
+/// \internal EIGEN_ARCH_ARM_OR_ARM64 set to 1 if the architecture is ARM or ARM64
#if EIGEN_ARCH_ARM || EIGEN_ARCH_ARM64
#define EIGEN_ARCH_ARM_OR_ARM64 1
#else
#define EIGEN_ARCH_ARM_OR_ARM64 0
#endif
+/// \internal EIGEN_ARCH_ARMV8 set to 1 if the architecture is armv8 or greater.
+#if EIGEN_ARCH_ARM_OR_ARM64 && defined(__ARM_ARCH) && __ARM_ARCH >= 8
+#define EIGEN_ARCH_ARMV8 1
+#else
+#define EIGEN_ARCH_ARMV8 0
+#endif
+
+
+/// \internal EIGEN_HAS_ARM64_FP16 set to 1 if the architecture provides an IEEE
+/// compliant Arm fp16 type
+#if EIGEN_ARCH_ARM64
+ #ifndef EIGEN_HAS_ARM64_FP16
+ #if defined(__ARM_FP16_FORMAT_IEEE)
+ #define EIGEN_HAS_ARM64_FP16 1
+ #else
+ #define EIGEN_HAS_ARM64_FP16 0
+ #endif
+ #endif
+#endif
+
+/// \internal EIGEN_HAS_ARM64_FP16_VECTOR_ARITHMETIC set to 1 if the architecture
+/// supports Neon vector intrinsics for fp16.
+#if EIGEN_ARCH_ARM64
+ #ifndef EIGEN_HAS_ARM64_FP16_VECTOR_ARITHMETIC
+ #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+ #define EIGEN_HAS_ARM64_FP16_VECTOR_ARITHMETIC 1
+ #else
+ #define EIGEN_HAS_ARM64_FP16_VECTOR_ARITHMETIC 0
+ #endif
+ #endif
+#endif
+
+/// \internal EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC set to 1 if the architecture
+/// supports Neon scalar intrinsics for fp16.
+#if EIGEN_ARCH_ARM64
+ #ifndef EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC
+ #if defined(__ARM_FEATURE_FP16_SCALAR_ARITHMETIC)
+ #define EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC 1
+ #endif
+ #endif
+#endif
+
/// \internal EIGEN_ARCH_MIPS set to 1 if the architecture is MIPS
#if defined(__mips__) || defined(__mips)
#define EIGEN_ARCH_MIPS 1
@@ -222,15 +335,11 @@
#define EIGEN_ARCH_PPC 0
#endif
-/// \internal EIGEN_ARCH_E2K set to 1 if the architecture is E2K (MCST Elbrus 2000)
-#if defined(__e2k__)
- #define EIGEN_ARCH_E2K 1
-#else
- #define EIGEN_ARCH_E2K 0
-#endif
+//------------------------------------------------------------------------------------------
// Operating system identification, EIGEN_OS_*
+//------------------------------------------------------------------------------------------
/// \internal EIGEN_OS_UNIX set to 1 if the OS is a unix variant
#if defined(__unix__) || defined(__unix)
@@ -317,9 +426,17 @@
#define EIGEN_OS_WIN_STRICT 0
#endif
-/// \internal EIGEN_OS_SUN set to 1 if the OS is SUN
+/// \internal EIGEN_OS_SUN set to __SUNPRO_C if the OS is SUN
+// compiler solaris __SUNPRO_C
+// version studio
+// 5.7 10 0x570
+// 5.8 11 0x580
+// 5.9 12 0x590
+// 5.10 12.1 0x5100
+// 5.11 12.2 0x5110
+// 5.12 12.3 0x5120
#if (defined(sun) || defined(__sun)) && !(defined(__SVR4) || defined(__svr4__))
- #define EIGEN_OS_SUN 1
+ #define EIGEN_OS_SUN __SUNPRO_C
#else
#define EIGEN_OS_SUN 0
#endif
@@ -332,25 +449,137 @@
#endif
-#if EIGEN_GNUC_AT_MOST(4,3) && !EIGEN_COMP_CLANG
- // see bug 89
- #define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 0
-#else
- #define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 1
+//------------------------------------------------------------------------------------------
+// Detect GPU compilers and architectures
+//------------------------------------------------------------------------------------------
+
+// NVCC is not supported as the target platform for HIPCC
+// Note that this also makes EIGEN_CUDACC and EIGEN_HIPCC mutually exclusive
+#if defined(__NVCC__) && defined(__HIPCC__)
+ #error "NVCC as the target platform for HIPCC is currently not supported."
#endif
-// This macro can be used to prevent from macro expansion, e.g.:
-// std::max EIGEN_NOT_A_MACRO(a,b)
-#define EIGEN_NOT_A_MACRO
+#if defined(__CUDACC__) && !defined(EIGEN_NO_CUDA)
+ // Means the compiler is either nvcc or clang with CUDA enabled
+ #define EIGEN_CUDACC __CUDACC__
+#endif
-#ifdef EIGEN_DEFAULT_TO_ROW_MAJOR
-#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION Eigen::RowMajor
+#if defined(__CUDA_ARCH__) && !defined(EIGEN_NO_CUDA)
+ // Means we are generating code for the device
+ #define EIGEN_CUDA_ARCH __CUDA_ARCH__
+#endif
+
+#if defined(EIGEN_CUDACC)
+#include <cuda.h>
+ #define EIGEN_CUDA_SDK_VER (CUDA_VERSION * 10)
#else
-#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION Eigen::ColMajor
+ #define EIGEN_CUDA_SDK_VER 0
#endif
-#ifndef EIGEN_DEFAULT_DENSE_INDEX_TYPE
-#define EIGEN_DEFAULT_DENSE_INDEX_TYPE std::ptrdiff_t
+#if defined(__HIPCC__) && !defined(EIGEN_NO_HIP)
+ // Means the compiler is HIPCC (analogous to EIGEN_CUDACC, but for HIP)
+ #define EIGEN_HIPCC __HIPCC__
+
+ // We need to include hip_runtime.h here because it pulls in
+ // ++ hip_common.h which contains the define for __HIP_DEVICE_COMPILE__
+ // ++ host_defines.h which contains the defines for the __host__ and __device__ macros
+ #include <hip/hip_runtime.h>
+
+ #if defined(__HIP_DEVICE_COMPILE__)
+ // analogous to EIGEN_CUDA_ARCH, but for HIP
+ #define EIGEN_HIP_DEVICE_COMPILE __HIP_DEVICE_COMPILE__
+ #endif
+
+ // For HIP (ROCm 3.5 and higher), we need to explicitly set the launch_bounds attribute
+ // value to 1024. The compiler assigns a default value of 256 when the attribute is not
+ // specified. This results in failures on the HIP platform, for cases when a GPU kernel
+ // without an explicit launch_bounds attribute is called with a threads_per_block value
+ // greater than 256.
+ //
+ // This is a regression in functioanlity and is expected to be fixed within the next
+ // couple of ROCm releases (compiler will go back to using 1024 value as the default)
+ //
+ // In the meantime, we will use a "only enabled for HIP" macro to set the launch_bounds
+ // attribute.
+
+ #define EIGEN_HIP_LAUNCH_BOUNDS_1024 __launch_bounds__(1024)
+
+#endif
+
+#if !defined(EIGEN_HIP_LAUNCH_BOUNDS_1024)
+#define EIGEN_HIP_LAUNCH_BOUNDS_1024
+#endif // !defined(EIGEN_HIP_LAUNCH_BOUNDS_1024)
+
+// Unify CUDA/HIPCC
+
+#if defined(EIGEN_CUDACC) || defined(EIGEN_HIPCC)
+//
+// If either EIGEN_CUDACC or EIGEN_HIPCC is defined, then define EIGEN_GPUCC
+//
+#define EIGEN_GPUCC
+//
+// EIGEN_HIPCC implies the HIP compiler and is used to tweak Eigen code for use in HIP kernels
+// EIGEN_CUDACC implies the CUDA compiler and is used to tweak Eigen code for use in CUDA kernels
+//
+// In most cases the same tweaks are required to the Eigen code to enable in both the HIP and CUDA kernels.
+// For those cases, the corresponding code should be guarded with
+// #if defined(EIGEN_GPUCC)
+// instead of
+// #if defined(EIGEN_CUDACC) || defined(EIGEN_HIPCC)
+//
+// For cases where the tweak is specific to HIP, the code should be guarded with
+// #if defined(EIGEN_HIPCC)
+//
+// For cases where the tweak is specific to CUDA, the code should be guarded with
+// #if defined(EIGEN_CUDACC)
+//
+#endif
+
+#if defined(EIGEN_CUDA_ARCH) || defined(EIGEN_HIP_DEVICE_COMPILE)
+//
+// If either EIGEN_CUDA_ARCH or EIGEN_HIP_DEVICE_COMPILE is defined, then define EIGEN_GPU_COMPILE_PHASE
+//
+#define EIGEN_GPU_COMPILE_PHASE
+//
+// GPU compilers (HIPCC, NVCC) typically do two passes over the source code,
+// + one to compile the source for the "host" (ie CPU)
+// + another to compile the source for the "device" (ie. GPU)
+//
+// Code that needs to enabled only during the either the "host" or "device" compilation phase
+// needs to be guarded with a macro that indicates the current compilation phase
+//
+// EIGEN_HIP_DEVICE_COMPILE implies the device compilation phase in HIP
+// EIGEN_CUDA_ARCH implies the device compilation phase in CUDA
+//
+// In most cases, the "host" / "device" specific code is the same for both HIP and CUDA
+// For those cases, the code should be guarded with
+// #if defined(EIGEN_GPU_COMPILE_PHASE)
+// instead of
+// #if defined(EIGEN_CUDA_ARCH) || defined(EIGEN_HIP_DEVICE_COMPILE)
+//
+// For cases where the tweak is specific to HIP, the code should be guarded with
+// #if defined(EIGEN_HIP_DEVICE_COMPILE)
+//
+// For cases where the tweak is specific to CUDA, the code should be guarded with
+// #if defined(EIGEN_CUDA_ARCH)
+//
+#endif
+
+#if defined(EIGEN_USE_SYCL) && defined(__SYCL_DEVICE_ONLY__)
+// EIGEN_USE_SYCL is a user-defined macro while __SYCL_DEVICE_ONLY__ is a compiler-defined macro.
+// In most cases we want to check if both macros are defined which can be done using the define below.
+#define SYCL_DEVICE_ONLY
+#endif
+
+//------------------------------------------------------------------------------------------
+// Detect Compiler/Architecture/OS specific features
+//------------------------------------------------------------------------------------------
+
+#if EIGEN_GNUC_AT_MOST(4,3) && !EIGEN_COMP_CLANG
+ // see bug 89
+ #define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 0
+#else
+ #define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 1
#endif
// Cross compiler wrapper around LLVM's __has_builtin
@@ -368,26 +597,67 @@
// Some old compilers do not support template specializations like:
// template<typename T,int N> void foo(const T x[N]);
-#if !( EIGEN_COMP_CLANG && ((EIGEN_COMP_CLANG<309) || defined(__apple_build_version__)) || EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC<49)
+#if !( EIGEN_COMP_CLANG && ( (EIGEN_COMP_CLANG<309) \
+ || (defined(__apple_build_version__) && (__apple_build_version__ < 9000000))) \
+ || EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC<49)
#define EIGEN_HAS_STATIC_ARRAY_TEMPLATE 1
#else
#define EIGEN_HAS_STATIC_ARRAY_TEMPLATE 0
#endif
-// Upperbound on the C++ version to use.
-// Expected values are 03, 11, 14, 17, etc.
-// By default, let's use an arbitrarily large C++ version.
-#ifndef EIGEN_MAX_CPP_VER
-#define EIGEN_MAX_CPP_VER 99
+// The macro EIGEN_CPLUSPLUS is a replacement for __cplusplus/_MSVC_LANG that
+// works for both platforms, indicating the C++ standard version number.
+//
+// With MSVC, without defining /Zc:__cplusplus, the __cplusplus macro will
+// report 199711L regardless of the language standard specified via /std.
+// We need to rely on _MSVC_LANG instead, which is only available after
+// VS2015.3.
+#if EIGEN_COMP_MSVC_LANG > 0
+#define EIGEN_CPLUSPLUS EIGEN_COMP_MSVC_LANG
+#elif EIGEN_COMP_MSVC >= 1900
+#define EIGEN_CPLUSPLUS 201103L
+#elif defined(__cplusplus)
+#define EIGEN_CPLUSPLUS __cplusplus
+#else
+#define EIGEN_CPLUSPLUS 0
+#endif
+
+// The macro EIGEN_COMP_CXXVER defines the c++ verson expected by the compiler.
+// For instance, if compiling with gcc and -std=c++17, then EIGEN_COMP_CXXVER
+// is defined to 17.
+#if EIGEN_CPLUSPLUS > 201703L
+ #define EIGEN_COMP_CXXVER 20
+#elif EIGEN_CPLUSPLUS > 201402L
+ #define EIGEN_COMP_CXXVER 17
+#elif EIGEN_CPLUSPLUS > 201103L
+ #define EIGEN_COMP_CXXVER 14
+#elif EIGEN_CPLUSPLUS >= 201103L
+ #define EIGEN_COMP_CXXVER 11
+#else
+ #define EIGEN_COMP_CXXVER 03
+#endif
+
+#ifndef EIGEN_HAS_CXX14_VARIABLE_TEMPLATES
+ #if defined(__cpp_variable_templates) && __cpp_variable_templates >= 201304 && EIGEN_MAX_CPP_VER>=14
+ #define EIGEN_HAS_CXX14_VARIABLE_TEMPLATES 1
+ #else
+ #define EIGEN_HAS_CXX14_VARIABLE_TEMPLATES 0
+ #endif
#endif
-#if EIGEN_MAX_CPP_VER>=11 && (defined(__cplusplus) && (__cplusplus >= 201103L) || EIGEN_COMP_MSVC >= 1900)
+
+// The macros EIGEN_HAS_CXX?? defines a rough estimate of available c++ features
+// but in practice we should not rely on them but rather on the availabilty of
+// individual features as defined later.
+// This is why there is no EIGEN_HAS_CXX17.
+// FIXME: get rid of EIGEN_HAS_CXX14 and maybe even EIGEN_HAS_CXX11.
+#if EIGEN_MAX_CPP_VER>=11 && EIGEN_COMP_CXXVER>=11
#define EIGEN_HAS_CXX11 1
#else
#define EIGEN_HAS_CXX11 0
#endif
-#if EIGEN_MAX_CPP_VER>=14 && (defined(__cplusplus) && (__cplusplus > 201103L) || EIGEN_COMP_MSVC >= 1910)
+#if EIGEN_MAX_CPP_VER>=14 && EIGEN_COMP_CXXVER>=14
#define EIGEN_HAS_CXX14 1
#else
#define EIGEN_HAS_CXX14 0
@@ -397,8 +667,7 @@
#ifndef EIGEN_HAS_RVALUE_REFERENCES
#if EIGEN_MAX_CPP_VER>=11 && \
(__has_feature(cxx_rvalue_references) || \
- (defined(__cplusplus) && __cplusplus >= 201103L) || \
- (EIGEN_COMP_MSVC >= 1600))
+ (EIGEN_COMP_CXXVER >= 11) || (EIGEN_COMP_MSVC >= 1600))
#define EIGEN_HAS_RVALUE_REFERENCES 1
#else
#define EIGEN_HAS_RVALUE_REFERENCES 0
@@ -406,12 +675,14 @@
#endif
// Does the compiler support C99?
+// Need to include <cmath> to make sure _GLIBCXX_USE_C99 gets defined
+#include <cmath>
#ifndef EIGEN_HAS_C99_MATH
#if EIGEN_MAX_CPP_VER>=11 && \
((defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901)) \
|| (defined(__GNUC__) && defined(_GLIBCXX_USE_C99)) \
|| (defined(_LIBCPP_VERSION) && !defined(_MSC_VER)) \
- || (EIGEN_COMP_MSVC >= 1900) || defined(__SYCL_DEVICE_ONLY__))
+ || (EIGEN_COMP_MSVC >= 1900) || defined(SYCL_DEVICE_ONLY))
#define EIGEN_HAS_C99_MATH 1
#else
#define EIGEN_HAS_C99_MATH 0
@@ -419,17 +690,57 @@
#endif
// Does the compiler support result_of?
+// result_of was deprecated in c++17 and removed in c++ 20
#ifndef EIGEN_HAS_STD_RESULT_OF
-#if EIGEN_MAX_CPP_VER>=11 && ((__has_feature(cxx_lambdas) || (defined(__cplusplus) && __cplusplus >= 201103L)))
+#if EIGEN_HAS_CXX11 && EIGEN_COMP_CXXVER < 17
#define EIGEN_HAS_STD_RESULT_OF 1
#else
#define EIGEN_HAS_STD_RESULT_OF 0
#endif
#endif
-// Does the compiler support type_trais?
+// Does the compiler support std::hash?
+#ifndef EIGEN_HAS_STD_HASH
+// The std::hash struct is defined in C++11 but is not labelled as a __device__
+// function and is not constexpr, so cannot be used on device.
+#if EIGEN_HAS_CXX11 && !defined(EIGEN_GPU_COMPILE_PHASE)
+#define EIGEN_HAS_STD_HASH 1
+#else
+#define EIGEN_HAS_STD_HASH 0
+#endif
+#endif // EIGEN_HAS_STD_HASH
+
+#ifndef EIGEN_HAS_STD_INVOKE_RESULT
+#if EIGEN_MAX_CPP_VER >= 17 && EIGEN_COMP_CXXVER >= 17
+#define EIGEN_HAS_STD_INVOKE_RESULT 1
+#else
+#define EIGEN_HAS_STD_INVOKE_RESULT 0
+#endif
+#endif
+
+#ifndef EIGEN_HAS_ALIGNAS
+#if EIGEN_MAX_CPP_VER>=11 && EIGEN_HAS_CXX11 && \
+ ( __has_feature(cxx_alignas) \
+ || EIGEN_HAS_CXX14 \
+ || (EIGEN_COMP_MSVC >= 1800) \
+ || (EIGEN_GNUC_AT_LEAST(4,8)) \
+ || (EIGEN_COMP_CLANG>=305) \
+ || (EIGEN_COMP_ICC>=1500) \
+ || (EIGEN_COMP_PGI>=1500) \
+ || (EIGEN_COMP_SUNCC>=0x5130))
+#define EIGEN_HAS_ALIGNAS 1
+#else
+#define EIGEN_HAS_ALIGNAS 0
+#endif
+#endif
+
+// Does the compiler support type_traits?
+// - full support of type traits was added only to GCC 5.1.0.
+// - 20150626 corresponds to the last release of 4.x libstdc++
#ifndef EIGEN_HAS_TYPE_TRAITS
-#if EIGEN_MAX_CPP_VER>=11 && (EIGEN_HAS_CXX11 || EIGEN_COMP_MSVC >= 1700)
+#if EIGEN_MAX_CPP_VER>=11 && (EIGEN_HAS_CXX11 || EIGEN_COMP_MSVC >= 1700) \
+ && ((!EIGEN_COMP_GNUC_STRICT) || EIGEN_GNUC_AT_LEAST(5, 1)) \
+ && ((!defined(__GLIBCXX__)) || __GLIBCXX__ > 20150626)
#define EIGEN_HAS_TYPE_TRAITS 1
#define EIGEN_INCLUDE_TYPE_TRAITS
#else
@@ -439,12 +750,12 @@
// Does the compiler support variadic templates?
#ifndef EIGEN_HAS_VARIADIC_TEMPLATES
-#if EIGEN_MAX_CPP_VER>=11 && (__cplusplus > 199711L || EIGEN_COMP_MSVC >= 1900) \
- && (!defined(__NVCC__) || !EIGEN_ARCH_ARM_OR_ARM64 || (EIGEN_CUDACC_VER >= 80000) )
+#if EIGEN_MAX_CPP_VER>=11 && (EIGEN_COMP_CXXVER >= 11) \
+ && (!defined(__NVCC__) || !EIGEN_ARCH_ARM_OR_ARM64 || (EIGEN_COMP_NVCC >= 80000) )
// ^^ Disable the use of variadic templates when compiling with versions of nvcc older than 8.0 on ARM devices:
// this prevents nvcc from crashing when compiling Eigen on Tegra X1
#define EIGEN_HAS_VARIADIC_TEMPLATES 1
-#elif EIGEN_MAX_CPP_VER>=11 && (__cplusplus > 199711L || EIGEN_COMP_MSVC >= 1900) && defined(__SYCL_DEVICE_ONLY__)
+#elif EIGEN_MAX_CPP_VER>=11 && (EIGEN_COMP_CXXVER >= 11) && defined(SYCL_DEVICE_ONLY)
#define EIGEN_HAS_VARIADIC_TEMPLATES 1
#else
#define EIGEN_HAS_VARIADIC_TEMPLATES 0
@@ -453,28 +764,33 @@
// Does the compiler fully support const expressions? (as in c++14)
#ifndef EIGEN_HAS_CONSTEXPR
+ #if defined(EIGEN_CUDACC)
+ // Const expressions are supported provided that c++11 is enabled and we're using either clang or nvcc 7.5 or above
+ #if EIGEN_MAX_CPP_VER>=14 && (EIGEN_COMP_CXXVER >= 11 && (EIGEN_COMP_CLANG || EIGEN_COMP_NVCC >= 70500))
+ #define EIGEN_HAS_CONSTEXPR 1
+ #endif
+ #elif EIGEN_MAX_CPP_VER>=14 && (__has_feature(cxx_relaxed_constexpr) || (EIGEN_COMP_CXXVER >= 14) || \
+ (EIGEN_GNUC_AT_LEAST(4,8) && (EIGEN_COMP_CXXVER >= 11)) || \
+ (EIGEN_COMP_CLANG >= 306 && (EIGEN_COMP_CXXVER >= 11)))
+ #define EIGEN_HAS_CONSTEXPR 1
+ #endif
-#if defined(EIGEN_CUDACC)
-// Const expressions are supported provided that c++11 is enabled and we're using either clang or nvcc 7.5 or above
-#if EIGEN_MAX_CPP_VER>=14 && (__cplusplus > 199711L && (EIGEN_COMP_CLANG || EIGEN_CUDACC_VER >= 70500))
- #define EIGEN_HAS_CONSTEXPR 1
-#endif
-#elif EIGEN_MAX_CPP_VER>=14 && (__has_feature(cxx_relaxed_constexpr) || (defined(__cplusplus) && __cplusplus >= 201402L) || \
- (EIGEN_GNUC_AT_LEAST(4,8) && (__cplusplus > 199711L)) || \
- (EIGEN_COMP_CLANG >= 306 && (__cplusplus > 199711L)))
-#define EIGEN_HAS_CONSTEXPR 1
-#endif
+ #ifndef EIGEN_HAS_CONSTEXPR
+ #define EIGEN_HAS_CONSTEXPR 0
+ #endif
-#ifndef EIGEN_HAS_CONSTEXPR
-#define EIGEN_HAS_CONSTEXPR 0
-#endif
+#endif // EIGEN_HAS_CONSTEXPR
+#if EIGEN_HAS_CONSTEXPR
+#define EIGEN_CONSTEXPR constexpr
+#else
+#define EIGEN_CONSTEXPR
#endif
// Does the compiler support C++11 math?
// Let's be conservative and enable the default C++11 implementation only if we are sure it exists
#ifndef EIGEN_HAS_CXX11_MATH
- #if EIGEN_MAX_CPP_VER>=11 && ((__cplusplus > 201103L) || (__cplusplus >= 201103L) && (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_CLANG || EIGEN_COMP_MSVC || EIGEN_COMP_ICC) \
+ #if EIGEN_MAX_CPP_VER>=11 && ((EIGEN_COMP_CXXVER > 11) || (EIGEN_COMP_CXXVER == 11) && (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_CLANG || EIGEN_COMP_MSVC || EIGEN_COMP_ICC) \
&& (EIGEN_ARCH_i386_OR_x86_64) && (EIGEN_OS_GNULINUX || EIGEN_OS_WIN_STRICT || EIGEN_OS_MAC))
#define EIGEN_HAS_CXX11_MATH 1
#else
@@ -485,9 +801,8 @@
// Does the compiler support proper C++11 containers?
#ifndef EIGEN_HAS_CXX11_CONTAINERS
#if EIGEN_MAX_CPP_VER>=11 && \
- ((__cplusplus > 201103L) \
- || ((__cplusplus >= 201103L) && (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_CLANG || EIGEN_COMP_ICC>=1400)) \
- || EIGEN_COMP_MSVC >= 1900)
+ ((EIGEN_COMP_CXXVER > 11) \
+ || ((EIGEN_COMP_CXXVER == 11) && (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_CLANG || EIGEN_COMP_MSVC || EIGEN_COMP_ICC>=1400)))
#define EIGEN_HAS_CXX11_CONTAINERS 1
#else
#define EIGEN_HAS_CXX11_CONTAINERS 0
@@ -498,24 +813,88 @@
#ifndef EIGEN_HAS_CXX11_NOEXCEPT
#if EIGEN_MAX_CPP_VER>=11 && \
(__has_feature(cxx_noexcept) \
- || (__cplusplus > 201103L) \
- || ((__cplusplus >= 201103L) && (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_CLANG || EIGEN_COMP_ICC>=1400)) \
- || EIGEN_COMP_MSVC >= 1900)
+ || (EIGEN_COMP_CXXVER > 11) \
+ || ((EIGEN_COMP_CXXVER == 11) && (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_CLANG || EIGEN_COMP_MSVC || EIGEN_COMP_ICC>=1400)))
#define EIGEN_HAS_CXX11_NOEXCEPT 1
#else
#define EIGEN_HAS_CXX11_NOEXCEPT 0
#endif
#endif
-/** Allows to disable some optimizations which might affect the accuracy of the result.
- * Such optimization are enabled by default, and set EIGEN_FAST_MATH to 0 to disable them.
- * They currently include:
- * - single precision ArrayBase::sin() and ArrayBase::cos() for SSE and AVX vectorization.
- */
-#ifndef EIGEN_FAST_MATH
-#define EIGEN_FAST_MATH 1
+#ifndef EIGEN_HAS_CXX11_ATOMIC
+ #if EIGEN_MAX_CPP_VER>=11 && \
+ (__has_feature(cxx_atomic) \
+ || (EIGEN_COMP_CXXVER > 11) \
+ || ((EIGEN_COMP_CXXVER == 11) && (EIGEN_COMP_MSVC==0 || EIGEN_COMP_MSVC >= 1700)))
+ #define EIGEN_HAS_CXX11_ATOMIC 1
+ #else
+ #define EIGEN_HAS_CXX11_ATOMIC 0
+ #endif
+#endif
+
+#ifndef EIGEN_HAS_CXX11_OVERRIDE_FINAL
+ #if EIGEN_MAX_CPP_VER>=11 && \
+ (EIGEN_COMP_CXXVER >= 11 || EIGEN_COMP_MSVC >= 1700)
+ #define EIGEN_HAS_CXX11_OVERRIDE_FINAL 1
+ #else
+ #define EIGEN_HAS_CXX11_OVERRIDE_FINAL 0
+ #endif
+#endif
+
+// NOTE: the required Apple's clang version is very conservative
+// and it could be that XCode 9 works just fine.
+// NOTE: the MSVC version is based on https://en.cppreference.com/w/cpp/compiler_support
+// and not tested.
+#ifndef EIGEN_HAS_CXX17_OVERALIGN
+#if EIGEN_MAX_CPP_VER>=17 && EIGEN_COMP_CXXVER>=17 && ( \
+ (EIGEN_COMP_MSVC >= 1912) \
+ || (EIGEN_GNUC_AT_LEAST(7,0)) \
+ || ((!defined(__apple_build_version__)) && (EIGEN_COMP_CLANG>=500)) \
+ || (( defined(__apple_build_version__)) && (__apple_build_version__>=10000000)) \
+ )
+#define EIGEN_HAS_CXX17_OVERALIGN 1
+#else
+#define EIGEN_HAS_CXX17_OVERALIGN 0
+#endif
+#endif
+
+#if defined(EIGEN_CUDACC) && EIGEN_HAS_CONSTEXPR
+ // While available already with c++11, this is useful mostly starting with c++14 and relaxed constexpr rules
+ #if defined(__NVCC__)
+ // nvcc considers constexpr functions as __host__ __device__ with the option --expt-relaxed-constexpr
+ #ifdef __CUDACC_RELAXED_CONSTEXPR__
+ #define EIGEN_CONSTEXPR_ARE_DEVICE_FUNC
+ #endif
+ #elif defined(__clang__) && defined(__CUDA__) && __has_feature(cxx_relaxed_constexpr)
+ // clang++ always considers constexpr functions as implicitly __host__ __device__
+ #define EIGEN_CONSTEXPR_ARE_DEVICE_FUNC
+ #endif
+#endif
+
+// Does the compiler support the __int128 and __uint128_t extensions for 128-bit
+// integer arithmetic?
+//
+// Clang and GCC define __SIZEOF_INT128__ when these extensions are supported,
+// but we avoid using them in certain cases:
+//
+// * Building using Clang for Windows, where the Clang runtime library has
+// 128-bit support only on LP64 architectures, but Windows is LLP64.
+#ifndef EIGEN_HAS_BUILTIN_INT128
+#if defined(__SIZEOF_INT128__) && !(EIGEN_OS_WIN && EIGEN_COMP_CLANG)
+#define EIGEN_HAS_BUILTIN_INT128 1
+#else
+#define EIGEN_HAS_BUILTIN_INT128 0
+#endif
#endif
+//------------------------------------------------------------------------------------------
+// Preprocessor programming helpers
+//------------------------------------------------------------------------------------------
+
+// This macro can be used to prevent from macro expansion, e.g.:
+// std::max EIGEN_NOT_A_MACRO(a,b)
+#define EIGEN_NOT_A_MACRO
+
#define EIGEN_DEBUG_VAR(x) std::cerr << #x << " = " << x << std::endl;
// concatenate two tokens
@@ -531,11 +910,13 @@
// EIGEN_STRONG_INLINE is a stronger version of the inline, using __forceinline on MSVC,
// but it still doesn't use GCC's always_inline. This is useful in (common) situations where MSVC needs forceinline
// but GCC is still doing fine with just inline.
-#if EIGEN_COMP_MSVC || EIGEN_COMP_ICC
+#ifndef EIGEN_STRONG_INLINE
+#if (EIGEN_COMP_MSVC || EIGEN_COMP_ICC) && !defined(EIGEN_GPUCC)
#define EIGEN_STRONG_INLINE __forceinline
#else
#define EIGEN_STRONG_INLINE inline
#endif
+#endif
// EIGEN_ALWAYS_INLINE is the stronget, it has the effect of making the function inline and adding every possible
// attribute to maximize inlining. This should only be used when really necessary: in particular,
@@ -545,7 +926,7 @@
// Eval.h:91: sorry, unimplemented: inlining failed in call to 'const Eigen::Eval<Derived> Eigen::MatrixBase<Scalar, Derived>::eval() const'
// : function body not available
// See also bug 1367
-#if EIGEN_GNUC_AT_LEAST(4,2)
+#if EIGEN_GNUC_AT_LEAST(4,2) && !defined(SYCL_DEVICE_ONLY)
#define EIGEN_ALWAYS_INLINE __attribute__((always_inline)) inline
#else
#define EIGEN_ALWAYS_INLINE EIGEN_STRONG_INLINE
@@ -565,6 +946,37 @@
#define EIGEN_PERMISSIVE_EXPR
#endif
+// GPU stuff
+
+// Disable some features when compiling with GPU compilers (NVCC/clang-cuda/SYCL/HIPCC)
+#if defined(EIGEN_CUDACC) || defined(SYCL_DEVICE_ONLY) || defined(EIGEN_HIPCC)
+ // Do not try asserts on device code
+ #ifndef EIGEN_NO_DEBUG
+ #define EIGEN_NO_DEBUG
+ #endif
+
+ #ifdef EIGEN_INTERNAL_DEBUGGING
+ #undef EIGEN_INTERNAL_DEBUGGING
+ #endif
+
+ #ifdef EIGEN_EXCEPTIONS
+ #undef EIGEN_EXCEPTIONS
+ #endif
+#endif
+
+#if defined(SYCL_DEVICE_ONLY)
+ #ifndef EIGEN_DONT_VECTORIZE
+ #define EIGEN_DONT_VECTORIZE
+ #endif
+ #define EIGEN_DEVICE_FUNC __attribute__((flatten)) __attribute__((always_inline))
+// All functions callable from CUDA/HIP code must be qualified with __device__
+#elif defined(EIGEN_GPUCC)
+ #define EIGEN_DEVICE_FUNC __host__ __device__
+#else
+ #define EIGEN_DEVICE_FUNC
+#endif
+
+
// this macro allows to get rid of linking errors about multiply defined functions.
// - static is not very good because it prevents definitions from different object files to be merged.
// So static causes the resulting linked executable to be bloated with multiple copies of the same function.
@@ -580,7 +992,11 @@
// eigen_plain_assert is where we implement the workaround for the assert() bug in GCC <= 4.3, see bug 89
#ifdef EIGEN_NO_DEBUG
- #define eigen_plain_assert(x)
+ #ifdef SYCL_DEVICE_ONLY // used to silence the warning on SYCL device
+ #define eigen_plain_assert(x) EIGEN_UNUSED_VARIABLE(x)
+ #else
+ #define eigen_plain_assert(x)
+ #endif
#else
#if EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO
namespace Eigen {
@@ -654,7 +1070,7 @@
// Suppresses 'unused variable' warnings.
namespace Eigen {
namespace internal {
- template<typename T> EIGEN_DEVICE_FUNC void ignore_unused_variable(const T&) {}
+ template<typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ignore_unused_variable(const T&) {}
}
}
#define EIGEN_UNUSED_VARIABLE(var) Eigen::internal::ignore_unused_variable(var);
@@ -668,177 +1084,84 @@ namespace Eigen {
#endif
-#if EIGEN_COMP_MSVC
- // NOTE MSVC often gives C4127 warnings with compiletime if statements. See bug 1362.
- // This workaround is ugly, but it does the job.
-# define EIGEN_CONST_CONDITIONAL(cond) (void)0, cond
-#else
-# define EIGEN_CONST_CONDITIONAL(cond) cond
-#endif
-
-//------------------------------------------------------------------------------------------
-// Static and dynamic alignment control
-//
-// The main purpose of this section is to define EIGEN_MAX_ALIGN_BYTES and EIGEN_MAX_STATIC_ALIGN_BYTES
-// as the maximal boundary in bytes on which dynamically and statically allocated data may be alignment respectively.
-// The values of EIGEN_MAX_ALIGN_BYTES and EIGEN_MAX_STATIC_ALIGN_BYTES can be specified by the user. If not,
-// a default value is automatically computed based on architecture, compiler, and OS.
+// Acts as a barrier preventing operations involving `X` from crossing. This
+// occurs, for example, in the fast rounding trick where a magic constant is
+// added then subtracted, which is otherwise compiled away with -ffast-math.
//
-// This section also defines macros EIGEN_ALIGN_TO_BOUNDARY(N) and the shortcuts EIGEN_ALIGN{8,16,32,_MAX}
-// to be used to declare statically aligned buffers.
-//------------------------------------------------------------------------------------------
-
-
-/* EIGEN_ALIGN_TO_BOUNDARY(n) forces data to be n-byte aligned. This is used to satisfy SIMD requirements.
- * However, we do that EVEN if vectorization (EIGEN_VECTORIZE) is disabled,
- * so that vectorization doesn't affect binary compatibility.
- *
- * If we made alignment depend on whether or not EIGEN_VECTORIZE is defined, it would be impossible to link
- * vectorized and non-vectorized code.
- */
-#if (defined EIGEN_CUDACC)
- #define EIGEN_ALIGN_TO_BOUNDARY(n) __align__(n)
-#elif EIGEN_COMP_GNUC || EIGEN_COMP_PGI || EIGEN_COMP_IBM || EIGEN_COMP_ARM
- #define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))
-#elif EIGEN_COMP_MSVC
- #define EIGEN_ALIGN_TO_BOUNDARY(n) __declspec(align(n))
-#elif EIGEN_COMP_SUNCC
- // FIXME not sure about this one:
- #define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))
-#else
- #error Please tell me what is the equivalent of __attribute__((aligned(n))) for your compiler
-#endif
-
-// If the user explicitly disable vectorization, then we also disable alignment
-#if defined(EIGEN_DONT_VECTORIZE)
- #define EIGEN_IDEAL_MAX_ALIGN_BYTES 0
-#elif defined(EIGEN_VECTORIZE_AVX512)
- // 64 bytes static alignmeent is preferred only if really required
- #define EIGEN_IDEAL_MAX_ALIGN_BYTES 64
-#elif defined(__AVX__)
- // 32 bytes static alignmeent is preferred only if really required
- #define EIGEN_IDEAL_MAX_ALIGN_BYTES 32
-#else
- #define EIGEN_IDEAL_MAX_ALIGN_BYTES 16
-#endif
-
-
-// EIGEN_MIN_ALIGN_BYTES defines the minimal value for which the notion of explicit alignment makes sense
-#define EIGEN_MIN_ALIGN_BYTES 16
-
-// Defined the boundary (in bytes) on which the data needs to be aligned. Note
-// that unless EIGEN_ALIGN is defined and not equal to 0, the data may not be
-// aligned at all regardless of the value of this #define.
-
-#if (defined(EIGEN_DONT_ALIGN_STATICALLY) || defined(EIGEN_DONT_ALIGN)) && defined(EIGEN_MAX_STATIC_ALIGN_BYTES) && EIGEN_MAX_STATIC_ALIGN_BYTES>0
-#error EIGEN_MAX_STATIC_ALIGN_BYTES and EIGEN_DONT_ALIGN[_STATICALLY] are both defined with EIGEN_MAX_STATIC_ALIGN_BYTES!=0. Use EIGEN_MAX_STATIC_ALIGN_BYTES=0 as a synonym of EIGEN_DONT_ALIGN_STATICALLY.
-#endif
-
-// EIGEN_DONT_ALIGN_STATICALLY and EIGEN_DONT_ALIGN are deprectated
-// They imply EIGEN_MAX_STATIC_ALIGN_BYTES=0
-#if defined(EIGEN_DONT_ALIGN_STATICALLY) || defined(EIGEN_DONT_ALIGN)
- #ifdef EIGEN_MAX_STATIC_ALIGN_BYTES
- #undef EIGEN_MAX_STATIC_ALIGN_BYTES
- #endif
- #define EIGEN_MAX_STATIC_ALIGN_BYTES 0
-#endif
-
-#ifndef EIGEN_MAX_STATIC_ALIGN_BYTES
-
- // Try to automatically guess what is the best default value for EIGEN_MAX_STATIC_ALIGN_BYTES
-
- // 16 byte alignment is only useful for vectorization. Since it affects the ABI, we need to enable
- // 16 byte alignment on all platforms where vectorization might be enabled. In theory we could always
- // enable alignment, but it can be a cause of problems on some platforms, so we just disable it in
- // certain common platform (compiler+architecture combinations) to avoid these problems.
- // Only static alignment is really problematic (relies on nonstandard compiler extensions),
- // try to keep heap alignment even when we have to disable static alignment.
- #if EIGEN_COMP_GNUC && !(EIGEN_ARCH_i386_OR_x86_64 || EIGEN_ARCH_ARM_OR_ARM64 || EIGEN_ARCH_PPC || EIGEN_ARCH_IA64 || EIGEN_ARCH_E2K)
- #define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 1
- #elif EIGEN_ARCH_ARM_OR_ARM64 && EIGEN_COMP_GNUC_STRICT && EIGEN_GNUC_AT_MOST(4, 6)
- // Old versions of GCC on ARM, at least 4.4, were once seen to have buggy static alignment support.
- // Not sure which version fixed it, hopefully it doesn't affect 4.7, which is still somewhat in use.
- // 4.8 and newer seem definitely unaffected.
- #define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 1
- #else
- #define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 0
- #endif
-
- // static alignment is completely disabled with GCC 3, Sun Studio, and QCC/QNX
- #if !EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT \
- && !EIGEN_GCC3_OR_OLDER \
- && !EIGEN_COMP_SUNCC \
- && !EIGEN_OS_QNX
- #define EIGEN_ARCH_WANTS_STACK_ALIGNMENT 1
- #else
- #define EIGEN_ARCH_WANTS_STACK_ALIGNMENT 0
- #endif
-
- #if EIGEN_ARCH_WANTS_STACK_ALIGNMENT
- #define EIGEN_MAX_STATIC_ALIGN_BYTES EIGEN_IDEAL_MAX_ALIGN_BYTES
+// See bug 1674
+#if !defined(EIGEN_OPTIMIZATION_BARRIER)
+ #if EIGEN_COMP_GNUC
+ // According to https://gcc.gnu.org/onlinedocs/gcc/Constraints.html:
+ // X: Any operand whatsoever.
+ // r: A register operand is allowed provided that it is in a general
+ // register.
+ // g: Any register, memory or immediate integer operand is allowed, except
+ // for registers that are not general registers.
+ // w: (AArch32/AArch64) Floating point register, Advanced SIMD vector
+ // register or SVE vector register.
+ // x: (SSE) Any SSE register.
+ // (AArch64) Like w, but restricted to registers 0 to 15 inclusive.
+ // v: (PowerPC) An Altivec vector register.
+ // wa:(PowerPC) A VSX register.
+ //
+ // "X" (uppercase) should work for all cases, though this seems to fail for
+ // some versions of GCC for arm/aarch64 with
+ // "error: inconsistent operand constraints in an 'asm'"
+ // Clang x86_64/arm/aarch64 seems to require "g" to support both scalars and
+ // vectors, otherwise
+ // "error: non-trivial scalar-to-vector conversion, possible invalid
+ // constraint for vector type"
+ //
+ // GCC for ppc64le generates an internal compiler error with x/X/g.
+ // GCC for AVX generates an internal compiler error with X.
+ //
+ // Tested on icc/gcc/clang for sse, avx, avx2, avx512dq
+ // gcc for arm, aarch64,
+ // gcc for ppc64le,
+ // both vectors and scalars.
+ //
+ // Note that this is restricted to plain types - this will not work
+ // directly for std::complex<T>, Eigen::half, Eigen::bfloat16. For these,
+ // you will need to apply to the underlying POD type.
+ #if EIGEN_ARCH_PPC && EIGEN_COMP_GNUC_STRICT
+ // This seems to be broken on clang. Packet4f is loaded into a single
+ // register rather than a vector, zeroing out some entries. Integer
+ // types also generate a compile error.
+ // General, Altivec, VSX.
+ #define EIGEN_OPTIMIZATION_BARRIER(X) __asm__ ("" : "+r,v,wa" (X));
+ #elif EIGEN_ARCH_ARM_OR_ARM64
+ // General, NEON.
+ // Clang doesn't like "r",
+ // error: non-trivial scalar-to-vector conversion, possible invalid
+ // constraint for vector type
+ // GCC < 5 doesn't like "g",
+ // error: 'asm' operand requires impossible reload
+ #if EIGEN_COMP_GNUC_STRICT && EIGEN_GNUC_AT_MOST(5, 0)
+ #define EIGEN_OPTIMIZATION_BARRIER(X) __asm__ ("" : "+r,w" (X));
+ #else
+ #define EIGEN_OPTIMIZATION_BARRIER(X) __asm__ ("" : "+g,w" (X));
+ #endif
+ #elif EIGEN_ARCH_i386_OR_x86_64
+ // General, SSE.
+ #define EIGEN_OPTIMIZATION_BARRIER(X) __asm__ ("" : "+g,x" (X));
+ #else
+ // Not implemented for other architectures.
+ #define EIGEN_OPTIMIZATION_BARRIER(X)
+ #endif
#else
- #define EIGEN_MAX_STATIC_ALIGN_BYTES 0
+ // Not implemented for other compilers.
+ #define EIGEN_OPTIMIZATION_BARRIER(X)
#endif
-
-#endif
-
-// If EIGEN_MAX_ALIGN_BYTES is defined, then it is considered as an upper bound for EIGEN_MAX_ALIGN_BYTES
-#if defined(EIGEN_MAX_ALIGN_BYTES) && EIGEN_MAX_ALIGN_BYTES<EIGEN_MAX_STATIC_ALIGN_BYTES
-#undef EIGEN_MAX_STATIC_ALIGN_BYTES
-#define EIGEN_MAX_STATIC_ALIGN_BYTES EIGEN_MAX_ALIGN_BYTES
-#endif
-
-#if EIGEN_MAX_STATIC_ALIGN_BYTES==0 && !defined(EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT)
- #define EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT
-#endif
-
-// At this stage, EIGEN_MAX_STATIC_ALIGN_BYTES>0 is the true test whether we want to align arrays on the stack or not.
-// It takes into account both the user choice to explicitly enable/disable alignment (by settting EIGEN_MAX_STATIC_ALIGN_BYTES)
-// and the architecture config (EIGEN_ARCH_WANTS_STACK_ALIGNMENT).
-// Henceforth, only EIGEN_MAX_STATIC_ALIGN_BYTES should be used.
-
-
-// Shortcuts to EIGEN_ALIGN_TO_BOUNDARY
-#define EIGEN_ALIGN8 EIGEN_ALIGN_TO_BOUNDARY(8)
-#define EIGEN_ALIGN16 EIGEN_ALIGN_TO_BOUNDARY(16)
-#define EIGEN_ALIGN32 EIGEN_ALIGN_TO_BOUNDARY(32)
-#define EIGEN_ALIGN64 EIGEN_ALIGN_TO_BOUNDARY(64)
-#if EIGEN_MAX_STATIC_ALIGN_BYTES>0
-#define EIGEN_ALIGN_MAX EIGEN_ALIGN_TO_BOUNDARY(EIGEN_MAX_STATIC_ALIGN_BYTES)
-#else
-#define EIGEN_ALIGN_MAX
-#endif
-
-
-// Dynamic alignment control
-
-#if defined(EIGEN_DONT_ALIGN) && defined(EIGEN_MAX_ALIGN_BYTES) && EIGEN_MAX_ALIGN_BYTES>0
-#error EIGEN_MAX_ALIGN_BYTES and EIGEN_DONT_ALIGN are both defined with EIGEN_MAX_ALIGN_BYTES!=0. Use EIGEN_MAX_ALIGN_BYTES=0 as a synonym of EIGEN_DONT_ALIGN.
#endif
-#ifdef EIGEN_DONT_ALIGN
- #ifdef EIGEN_MAX_ALIGN_BYTES
- #undef EIGEN_MAX_ALIGN_BYTES
- #endif
- #define EIGEN_MAX_ALIGN_BYTES 0
-#elif !defined(EIGEN_MAX_ALIGN_BYTES)
- #define EIGEN_MAX_ALIGN_BYTES EIGEN_IDEAL_MAX_ALIGN_BYTES
-#endif
-
-#if EIGEN_IDEAL_MAX_ALIGN_BYTES > EIGEN_MAX_ALIGN_BYTES
-#define EIGEN_DEFAULT_ALIGN_BYTES EIGEN_IDEAL_MAX_ALIGN_BYTES
+#if EIGEN_COMP_MSVC
+ // NOTE MSVC often gives C4127 warnings with compiletime if statements. See bug 1362.
+ // This workaround is ugly, but it does the job.
+# define EIGEN_CONST_CONDITIONAL(cond) (void)0, cond
#else
-#define EIGEN_DEFAULT_ALIGN_BYTES EIGEN_MAX_ALIGN_BYTES
-#endif
-
-
-#ifndef EIGEN_UNALIGNED_VECTORIZE
-#define EIGEN_UNALIGNED_VECTORIZE 1
+# define EIGEN_CONST_CONDITIONAL(cond) cond
#endif
-//----------------------------------------------------------------------
-
-
#ifdef EIGEN_DONT_USE_RESTRICT_KEYWORD
#define EIGEN_RESTRICT
#endif
@@ -846,10 +1169,6 @@ namespace Eigen {
#define EIGEN_RESTRICT __restrict
#endif
-#ifndef EIGEN_STACK_ALLOCATION_LIMIT
-// 131072 == 128 KB
-#define EIGEN_STACK_ALLOCATION_LIMIT 131072
-#endif
#ifndef EIGEN_DEFAULT_IO_FORMAT
#ifdef EIGEN_MAKING_DOCS
@@ -864,8 +1183,23 @@ namespace Eigen {
// just an empty macro !
#define EIGEN_EMPTY
-#if EIGEN_COMP_MSVC_STRICT && (EIGEN_COMP_MSVC < 1900 || EIGEN_CUDACC_VER>0)
- // for older MSVC versions, as well as 1900 && CUDA 8, using the base operator is sufficient (cf Bugs 1000, 1324)
+
+// When compiling CUDA/HIP device code with NVCC or HIPCC
+// pull in math functions from the global namespace.
+// In host mode, and when device code is compiled with clang,
+// use the std versions.
+#if (defined(EIGEN_CUDA_ARCH) && defined(__NVCC__)) || defined(EIGEN_HIP_DEVICE_COMPILE)
+ #define EIGEN_USING_STD(FUNC) using ::FUNC;
+#else
+ #define EIGEN_USING_STD(FUNC) using std::FUNC;
+#endif
+
+#if EIGEN_COMP_MSVC_STRICT && (EIGEN_COMP_MSVC < 1900 || (EIGEN_COMP_MSVC == 1900 && EIGEN_COMP_NVCC))
+ // For older MSVC versions, as well as 1900 && CUDA 8, using the base operator is necessary,
+ // otherwise we get duplicate definition errors
+ // For later MSVC versions, we require explicit operator= definition, otherwise we get
+ // use of implicitly deleted operator errors.
+ // (cf Bugs 920, 1000, 1324, 2291)
#define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
using Base::operator =;
#elif EIGEN_COMP_CLANG // workaround clang bug (see http://forum.kde.org/viewtopic.php?f=74&t=102653)
@@ -885,11 +1219,48 @@ namespace Eigen {
#endif
+/**
+ * \internal
+ * \brief Macro to explicitly define the default copy constructor.
+ * This is necessary, because the implicit definition is deprecated if the copy-assignment is overridden.
+ */
+#if EIGEN_HAS_CXX11
+#define EIGEN_DEFAULT_COPY_CONSTRUCTOR(CLASS) EIGEN_DEVICE_FUNC CLASS(const CLASS&) = default;
+#else
+#define EIGEN_DEFAULT_COPY_CONSTRUCTOR(CLASS)
+#endif
+
+
+
/** \internal
* \brief Macro to manually inherit assignment operators.
* This is necessary, because the implicitly defined assignment operator gets deleted when a custom operator= is defined.
+ * With C++11 or later this also default-implements the copy-constructor
+ */
+#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) \
+ EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
+ EIGEN_DEFAULT_COPY_CONSTRUCTOR(Derived)
+
+/** \internal
+ * \brief Macro to manually define default constructors and destructors.
+ * This is necessary when the copy constructor is re-defined.
+ * For empty helper classes this should usually be protected, to avoid accidentally creating empty objects.
+ *
+ * Hiding the default destructor lead to problems in C++03 mode together with boost::multiprecision
*/
-#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived)
+#if EIGEN_HAS_CXX11
+#define EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(Derived) \
+ EIGEN_DEVICE_FUNC Derived() = default; \
+ EIGEN_DEVICE_FUNC ~Derived() = default;
+#else
+#define EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(Derived) \
+ EIGEN_DEVICE_FUNC Derived() {}; \
+ /* EIGEN_DEVICE_FUNC ~Derived() {}; */
+#endif
+
+
+
+
/**
* Just a side note. Commenting within defines works only by documenting
@@ -952,6 +1323,14 @@ namespace Eigen {
#define EIGEN_IMPLIES(a,b) (!(a) || (b))
+#if EIGEN_HAS_BUILTIN(__builtin_expect) || EIGEN_COMP_GNUC
+#define EIGEN_PREDICT_FALSE(x) (__builtin_expect(x, false))
+#define EIGEN_PREDICT_TRUE(x) (__builtin_expect(false || (x), true))
+#else
+#define EIGEN_PREDICT_FALSE(x) (x)
+#define EIGEN_PREDICT_TRUE(x) (x)
+#endif
+
// the expression type of a standard coefficient wise binary operation
#define EIGEN_CWISE_BINARY_RETURN_TYPE(LHS,RHS,OPNAME) \
CwiseBinaryOp< \
@@ -990,7 +1369,7 @@ namespace Eigen {
#endif
#define EIGEN_MAKE_SCALAR_BINARY_OP_ONTHERIGHT(METHOD,OPNAME) \
- template <typename T> EIGEN_DEVICE_FUNC inline \
+ template <typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,typename internal::promote_scalar_arg<Scalar EIGEN_COMMA T EIGEN_COMMA EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,Scalar,T)>::type,OPNAME))\
(METHOD)(const T& scalar) const { \
typedef typename internal::promote_scalar_arg<Scalar,T,EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,Scalar,T)>::type PromotedT; \
@@ -999,7 +1378,7 @@ namespace Eigen {
}
#define EIGEN_MAKE_SCALAR_BINARY_OP_ONTHELEFT(METHOD,OPNAME) \
- template <typename T> EIGEN_DEVICE_FUNC inline friend \
+ template <typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend \
EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(typename internal::promote_scalar_arg<Scalar EIGEN_COMMA T EIGEN_COMMA EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,T,Scalar)>::type,Derived,OPNAME)) \
(METHOD)(const T& scalar, const StorageBaseType& matrix) { \
typedef typename internal::promote_scalar_arg<Scalar,T,EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,T,Scalar)>::type PromotedT; \
@@ -1012,15 +1391,23 @@ namespace Eigen {
EIGEN_MAKE_SCALAR_BINARY_OP_ONTHERIGHT(METHOD,OPNAME)
+#if (defined(_CPPUNWIND) || defined(__EXCEPTIONS)) && !defined(EIGEN_CUDA_ARCH) && !defined(EIGEN_EXCEPTIONS) && !defined(EIGEN_USE_SYCL) && !defined(EIGEN_HIP_DEVICE_COMPILE)
+ #define EIGEN_EXCEPTIONS
+#endif
+
+
#ifdef EIGEN_EXCEPTIONS
# define EIGEN_THROW_X(X) throw X
# define EIGEN_THROW throw
# define EIGEN_TRY try
# define EIGEN_CATCH(X) catch (X)
#else
-# ifdef EIGEN_CUDA_ARCH
+# if defined(EIGEN_CUDA_ARCH)
# define EIGEN_THROW_X(X) asm("trap;")
# define EIGEN_THROW asm("trap;")
+# elif defined(EIGEN_HIP_DEVICE_COMPILE)
+# define EIGEN_THROW_X(X) asm("s_trap 0")
+# define EIGEN_THROW asm("s_trap 0")
# else
# define EIGEN_THROW_X(X) std::abort()
# define EIGEN_THROW std::abort()
@@ -1040,7 +1427,47 @@ namespace Eigen {
# define EIGEN_NOEXCEPT
# define EIGEN_NOEXCEPT_IF(x)
# define EIGEN_NO_THROW throw()
-# define EIGEN_EXCEPTION_SPEC(X) throw(X)
+# if EIGEN_COMP_MSVC || EIGEN_COMP_CXXVER>=17
+ // MSVC does not support exception specifications (warning C4290),
+ // and they are deprecated in c++11 anyway. This is even an error in c++17.
+# define EIGEN_EXCEPTION_SPEC(X) throw()
+# else
+# define EIGEN_EXCEPTION_SPEC(X) throw(X)
+# endif
+#endif
+
+#if EIGEN_HAS_VARIADIC_TEMPLATES
+// The all function is used to enable a variadic version of eigen_assert which can take a parameter pack as its input.
+namespace Eigen {
+namespace internal {
+
+inline bool all(){ return true; }
+
+template<typename T, typename ...Ts>
+bool all(T t, Ts ... ts){ return t && all(ts...); }
+
+}
+}
+#endif
+
+#if EIGEN_HAS_CXX11_OVERRIDE_FINAL
+// provide override and final specifiers if they are available:
+# define EIGEN_OVERRIDE override
+# define EIGEN_FINAL final
+#else
+# define EIGEN_OVERRIDE
+# define EIGEN_FINAL
+#endif
+
+// Wrapping #pragma unroll in a macro since it is required for SYCL
+#if defined(SYCL_DEVICE_ONLY)
+ #if defined(_MSC_VER)
+ #define EIGEN_UNROLL_LOOP __pragma(unroll)
+ #else
+ #define EIGEN_UNROLL_LOOP _Pragma("unroll")
+ #endif
+#else
+ #define EIGEN_UNROLL_LOOP
#endif
#endif // EIGEN_MACROS_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/util/Memory.h b/examples/ThirdPartyLibs/Eigen/src/Core/util/Memory.h
index c455f92a1..875318cdb 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/util/Memory.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/util/Memory.h
@@ -70,7 +70,21 @@ inline void throw_std_bad_alloc()
throw std::bad_alloc();
#else
std::size_t huge = static_cast<std::size_t>(-1);
+ #if defined(EIGEN_HIPCC)
+ //
+ // calls to "::operator new" are to be treated as opaque function calls (i.e no inlining),
+ // and as a consequence the code in the #else block triggers the hipcc warning :
+ // "no overloaded function has restriction specifiers that are compatible with the ambient context"
+ //
+ // "throw_std_bad_alloc" has the EIGEN_DEVICE_FUNC attribute, so it seems that hipcc expects
+ // the same on "operator new"
+ // Reverting code back to the old version in this #if block for the hipcc compiler
+ //
new int[huge];
+ #else
+ void* unused = ::operator new(huge);
+ EIGEN_UNUSED_VARIABLE(unused);
+ #endif
#endif
}
@@ -83,19 +97,26 @@ inline void throw_std_bad_alloc()
/** \internal Like malloc, but the returned pointer is guaranteed to be 16-byte aligned.
* Fast, but wastes 16 additional bytes of memory. Does not throw any exception.
*/
-inline void* handmade_aligned_malloc(std::size_t size)
+EIGEN_DEVICE_FUNC inline void* handmade_aligned_malloc(std::size_t size, std::size_t alignment = EIGEN_DEFAULT_ALIGN_BYTES)
{
- void *original = std::malloc(size+EIGEN_DEFAULT_ALIGN_BYTES);
+ eigen_assert(alignment >= sizeof(void*) && (alignment & (alignment-1)) == 0 && "Alignment must be at least sizeof(void*) and a power of 2");
+
+ EIGEN_USING_STD(malloc)
+ void *original = malloc(size+alignment);
+
if (original == 0) return 0;
- void *aligned = reinterpret_cast<void*>((reinterpret_cast<std::size_t>(original) & ~(std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1))) + EIGEN_DEFAULT_ALIGN_BYTES);
+ void *aligned = reinterpret_cast<void*>((reinterpret_cast<std::size_t>(original) & ~(std::size_t(alignment-1))) + alignment);
*(reinterpret_cast<void**>(aligned) - 1) = original;
return aligned;
}
/** \internal Frees memory allocated with handmade_aligned_malloc */
-inline void handmade_aligned_free(void *ptr)
+EIGEN_DEVICE_FUNC inline void handmade_aligned_free(void *ptr)
{
- if (ptr) std::free(*(reinterpret_cast<void**>(ptr) - 1));
+ if (ptr) {
+ EIGEN_USING_STD(free)
+ free(*(reinterpret_cast<void**>(ptr) - 1));
+ }
}
/** \internal
@@ -156,9 +177,12 @@ EIGEN_DEVICE_FUNC inline void* aligned_malloc(std::size_t size)
void *result;
#if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED
- result = std::malloc(size);
+
+ EIGEN_USING_STD(malloc)
+ result = malloc(size);
+
#if EIGEN_DEFAULT_ALIGN_BYTES==16
- eigen_assert((size<16 || (std::size_t(result)%16)==0) && "System's malloc returned an unaligned pointer. Compile with EIGEN_MALLOC_ALREADY_ALIGNED=0 to fallback to handmade alignd memory allocator.");
+ eigen_assert((size<16 || (std::size_t(result)%16)==0) && "System's malloc returned an unaligned pointer. Compile with EIGEN_MALLOC_ALREADY_ALIGNED=0 to fallback to handmade aligned memory allocator.");
#endif
#else
result = handmade_aligned_malloc(size);
@@ -174,7 +198,10 @@ EIGEN_DEVICE_FUNC inline void* aligned_malloc(std::size_t size)
EIGEN_DEVICE_FUNC inline void aligned_free(void *ptr)
{
#if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED
- std::free(ptr);
+
+ EIGEN_USING_STD(free)
+ free(ptr);
+
#else
handmade_aligned_free(ptr);
#endif
@@ -187,7 +214,7 @@ EIGEN_DEVICE_FUNC inline void aligned_free(void *ptr)
*/
inline void* aligned_realloc(void *ptr, std::size_t new_size, std::size_t old_size)
{
- EIGEN_UNUSED_VARIABLE(old_size);
+ EIGEN_UNUSED_VARIABLE(old_size)
void *result;
#if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED
@@ -218,7 +245,9 @@ template<> EIGEN_DEVICE_FUNC inline void* conditional_aligned_malloc<false>(std:
{
check_that_malloc_is_allowed();
- void *result = std::malloc(size);
+ EIGEN_USING_STD(malloc)
+ void *result = malloc(size);
+
if(!result && size)
throw_std_bad_alloc();
return result;
@@ -232,7 +261,8 @@ template<bool Align> EIGEN_DEVICE_FUNC inline void conditional_aligned_free(void
template<> EIGEN_DEVICE_FUNC inline void conditional_aligned_free<false>(void *ptr)
{
- std::free(ptr);
+ EIGEN_USING_STD(free)
+ free(ptr);
}
template<bool Align> inline void* conditional_aligned_realloc(void* ptr, std::size_t new_size, std::size_t old_size)
@@ -331,7 +361,7 @@ template<typename T, bool Align> EIGEN_DEVICE_FUNC inline T* conditional_aligned
template<typename T> EIGEN_DEVICE_FUNC inline void aligned_delete(T *ptr, std::size_t size)
{
destruct_elements_of_array<T>(ptr, size);
- aligned_free(ptr);
+ Eigen::internal::aligned_free(ptr);
}
/** \internal Deletes objects constructed with conditional_aligned_new
@@ -493,7 +523,8 @@ template<typename T> struct smart_copy_helper<T,true> {
IntPtr size = IntPtr(end)-IntPtr(start);
if(size==0) return;
eigen_internal_assert(start!=0 && end!=0 && target!=0);
- std::memcpy(target, start, size);
+ EIGEN_USING_STD(memcpy)
+ memcpy(target, start, size);
}
};
@@ -535,6 +566,17 @@ template<typename T> struct smart_memmove_helper<T,false> {
}
};
+#if EIGEN_HAS_RVALUE_REFERENCES
+template<typename T> EIGEN_DEVICE_FUNC T* smart_move(T* start, T* end, T* target)
+{
+ return std::move(start, end, target);
+}
+#else
+template<typename T> EIGEN_DEVICE_FUNC T* smart_move(T* start, T* end, T* target)
+{
+ return std::copy(start, end, target);
+}
+#endif
/*****************************************************************************
*** Implementation of runtime stack allocation (falling back to malloc) ***
@@ -542,7 +584,7 @@ template<typename T> struct smart_memmove_helper<T,false> {
// you can overwrite Eigen's default behavior regarding alloca by defining EIGEN_ALLOCA
// to the appropriate stack allocation function
-#ifndef EIGEN_ALLOCA
+#if ! defined EIGEN_ALLOCA && ! defined EIGEN_GPU_COMPILE_PHASE
#if EIGEN_OS_LINUX || EIGEN_OS_MAC || (defined alloca)
#define EIGEN_ALLOCA alloca
#elif EIGEN_COMP_MSVC
@@ -550,6 +592,15 @@ template<typename T> struct smart_memmove_helper<T,false> {
#endif
#endif
+// With clang -Oz -mthumb, alloca changes the stack pointer in a way that is
+// not allowed in Thumb2. -DEIGEN_STACK_ALLOCATION_LIMIT=0 doesn't work because
+// the compiler still emits bad code because stack allocation checks use "<=".
+// TODO: Eliminate after https://bugs.llvm.org/show_bug.cgi?id=23772
+// is fixed.
+#if defined(__clang__) && defined(__thumb__)
+ #undef EIGEN_ALLOCA
+#endif
+
// This helper class construct the allocated memory, and takes care of destructing and freeing the handled data
// at destruction time. In practice this helper class is mainly useful to avoid memory leak in case of exceptions.
template<typename T> class aligned_stack_memory_handler : noncopyable
@@ -561,12 +612,14 @@ template<typename T> class aligned_stack_memory_handler : noncopyable
* In this case, the buffer elements will also be destructed when this handler will be destructed.
* Finally, if \a dealloc is true, then the pointer \a ptr is freed.
**/
+ EIGEN_DEVICE_FUNC
aligned_stack_memory_handler(T* ptr, std::size_t size, bool dealloc)
: m_ptr(ptr), m_size(size), m_deallocate(dealloc)
{
if(NumTraits<T>::RequireInitialization && m_ptr)
Eigen::internal::construct_elements_of_array(m_ptr, size);
}
+ EIGEN_DEVICE_FUNC
~aligned_stack_memory_handler()
{
if(NumTraits<T>::RequireInitialization && m_ptr)
@@ -580,6 +633,60 @@ template<typename T> class aligned_stack_memory_handler : noncopyable
bool m_deallocate;
};
+#ifdef EIGEN_ALLOCA
+
+template<typename Xpr, int NbEvaluations,
+ bool MapExternalBuffer = nested_eval<Xpr,NbEvaluations>::Evaluate && Xpr::MaxSizeAtCompileTime==Dynamic
+ >
+struct local_nested_eval_wrapper
+{
+ static const bool NeedExternalBuffer = false;
+ typedef typename Xpr::Scalar Scalar;
+ typedef typename nested_eval<Xpr,NbEvaluations>::type ObjectType;
+ ObjectType object;
+
+ EIGEN_DEVICE_FUNC
+ local_nested_eval_wrapper(const Xpr& xpr, Scalar* ptr) : object(xpr)
+ {
+ EIGEN_UNUSED_VARIABLE(ptr);
+ eigen_internal_assert(ptr==0);
+ }
+};
+
+template<typename Xpr, int NbEvaluations>
+struct local_nested_eval_wrapper<Xpr,NbEvaluations,true>
+{
+ static const bool NeedExternalBuffer = true;
+ typedef typename Xpr::Scalar Scalar;
+ typedef typename plain_object_eval<Xpr>::type PlainObject;
+ typedef Map<PlainObject,EIGEN_DEFAULT_ALIGN_BYTES> ObjectType;
+ ObjectType object;
+
+ EIGEN_DEVICE_FUNC
+ local_nested_eval_wrapper(const Xpr& xpr, Scalar* ptr)
+ : object(ptr==0 ? reinterpret_cast<Scalar*>(Eigen::internal::aligned_malloc(sizeof(Scalar)*xpr.size())) : ptr, xpr.rows(), xpr.cols()),
+ m_deallocate(ptr==0)
+ {
+ if(NumTraits<Scalar>::RequireInitialization && object.data())
+ Eigen::internal::construct_elements_of_array(object.data(), object.size());
+ object = xpr;
+ }
+
+ EIGEN_DEVICE_FUNC
+ ~local_nested_eval_wrapper()
+ {
+ if(NumTraits<Scalar>::RequireInitialization && object.data())
+ Eigen::internal::destruct_elements_of_array(object.data(), object.size());
+ if(m_deallocate)
+ Eigen::internal::aligned_free(object.data());
+ }
+
+private:
+ bool m_deallocate;
+};
+
+#endif // EIGEN_ALLOCA
+
template<typename T> class scoped_array : noncopyable
{
T* m_ptr;
@@ -607,9 +714,11 @@ template<typename T> void swap(scoped_array<T> &a,scoped_array<T> &b)
} // end namespace internal
/** \internal
- * Declares, allocates and construct an aligned buffer named NAME of SIZE elements of type TYPE on the stack
- * if SIZE is smaller than EIGEN_STACK_ALLOCATION_LIMIT, and if stack allocation is supported by the platform
- * (currently, this is Linux and Visual Studio only). Otherwise the memory is allocated on the heap.
+ *
+ * The macro ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) declares, allocates,
+ * and construct an aligned buffer named NAME of SIZE elements of type TYPE on the stack
+ * if the size in bytes is smaller than EIGEN_STACK_ALLOCATION_LIMIT, and if stack allocation is supported by the platform
+ * (currently, this is Linux, OSX and Visual Studio only). Otherwise the memory is allocated on the heap.
* The allocated buffer is automatically deleted when exiting the scope of this declaration.
* If BUFFER is non null, then the declared variable is simply an alias for BUFFER, and no allocation/deletion occurs.
* Here is an example:
@@ -620,6 +729,14 @@ template<typename T> void swap(scoped_array<T> &a,scoped_array<T> &b)
* }
* \endcode
* The underlying stack allocation function can controlled with the EIGEN_ALLOCA preprocessor token.
+ *
+ * The macro ei_declare_local_nested_eval(XPR_T,XPR,N,NAME) is analogue to
+ * \code
+ * typename internal::nested_eval<XPRT_T,N>::type NAME(XPR);
+ * \endcode
+ * with the advantage of using aligned stack allocation even if the maximal size of XPR at compile time is unknown.
+ * This is accomplished through alloca if this later is supported and if the required number of bytes
+ * is below EIGEN_STACK_ALLOCATION_LIMIT.
*/
#ifdef EIGEN_ALLOCA
@@ -639,6 +756,13 @@ template<typename T> void swap(scoped_array<T> &a,scoped_array<T> &b)
: Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE) ); \
Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,sizeof(TYPE)*SIZE>EIGEN_STACK_ALLOCATION_LIMIT)
+
+ #define ei_declare_local_nested_eval(XPR_T,XPR,N,NAME) \
+ Eigen::internal::local_nested_eval_wrapper<XPR_T,N> EIGEN_CAT(NAME,_wrapper)(XPR, reinterpret_cast<typename XPR_T::Scalar*>( \
+ ( (Eigen::internal::local_nested_eval_wrapper<XPR_T,N>::NeedExternalBuffer) && ((sizeof(typename XPR_T::Scalar)*XPR.size())<=EIGEN_STACK_ALLOCATION_LIMIT) ) \
+ ? EIGEN_ALIGNED_ALLOCA( sizeof(typename XPR_T::Scalar)*XPR.size() ) : 0 ) ) ; \
+ typename Eigen::internal::local_nested_eval_wrapper<XPR_T,N>::ObjectType NAME(EIGEN_CAT(NAME,_wrapper).object)
+
#else
#define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \
@@ -646,6 +770,9 @@ template<typename T> void swap(scoped_array<T> &a,scoped_array<T> &b)
TYPE* NAME = (BUFFER)!=0 ? BUFFER : reinterpret_cast<TYPE*>(Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE)); \
Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,true)
+
+#define ei_declare_local_nested_eval(XPR_T,XPR,N,NAME) typename Eigen::internal::nested_eval<XPR_T,N>::type NAME(XPR)
+
#endif
@@ -653,32 +780,56 @@ template<typename T> void swap(scoped_array<T> &a,scoped_array<T> &b)
*** Implementation of EIGEN_MAKE_ALIGNED_OPERATOR_NEW [_IF] ***
*****************************************************************************/
-#if EIGEN_MAX_ALIGN_BYTES!=0
+#if EIGEN_HAS_CXX17_OVERALIGN
+
+// C++17 -> no need to bother about alignment anymore :)
+
+#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign)
+#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
+#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar,Size)
+
+#else
+
+// HIP does not support new/delete on device.
+#if EIGEN_MAX_ALIGN_BYTES!=0 && !defined(EIGEN_HIP_DEVICE_COMPILE)
#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
+ EIGEN_DEVICE_FUNC \
void* operator new(std::size_t size, const std::nothrow_t&) EIGEN_NO_THROW { \
EIGEN_TRY { return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); } \
EIGEN_CATCH (...) { return 0; } \
}
#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) \
+ EIGEN_DEVICE_FUNC \
void *operator new(std::size_t size) { \
return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \
} \
+ EIGEN_DEVICE_FUNC \
void *operator new[](std::size_t size) { \
return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \
} \
+ EIGEN_DEVICE_FUNC \
void operator delete(void * ptr) EIGEN_NO_THROW { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
+ EIGEN_DEVICE_FUNC \
void operator delete[](void * ptr) EIGEN_NO_THROW { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
+ EIGEN_DEVICE_FUNC \
void operator delete(void * ptr, std::size_t /* sz */) EIGEN_NO_THROW { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
+ EIGEN_DEVICE_FUNC \
void operator delete[](void * ptr, std::size_t /* sz */) EIGEN_NO_THROW { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
/* in-place new and delete. since (at least afaik) there is no actual */ \
/* memory allocated we can safely let the default implementation handle */ \
/* this particular case. */ \
+ EIGEN_DEVICE_FUNC \
static void *operator new(std::size_t size, void *ptr) { return ::operator new(size,ptr); } \
+ EIGEN_DEVICE_FUNC \
static void *operator new[](std::size_t size, void* ptr) { return ::operator new[](size,ptr); } \
+ EIGEN_DEVICE_FUNC \
void operator delete(void * memory, void *ptr) EIGEN_NO_THROW { return ::operator delete(memory,ptr); } \
+ EIGEN_DEVICE_FUNC \
void operator delete[](void * memory, void *ptr) EIGEN_NO_THROW { return ::operator delete[](memory,ptr); } \
/* nothrow-new (returns zero instead of std::bad_alloc) */ \
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
+ EIGEN_DEVICE_FUNC \
void operator delete(void *ptr, const std::nothrow_t&) EIGEN_NO_THROW { \
Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); \
} \
@@ -688,8 +839,14 @@ template<typename T> void swap(scoped_array<T> &a,scoped_array<T> &b)
#endif
#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(true)
-#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar,Size) \
- EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(bool(((Size)!=Eigen::Dynamic) && ((sizeof(Scalar)*(Size))%EIGEN_MAX_ALIGN_BYTES==0)))
+#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar,Size) \
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(bool( \
+ ((Size)!=Eigen::Dynamic) && \
+ (((EIGEN_MAX_ALIGN_BYTES>=16) && ((sizeof(Scalar)*(Size))%(EIGEN_MAX_ALIGN_BYTES )==0)) || \
+ ((EIGEN_MAX_ALIGN_BYTES>=32) && ((sizeof(Scalar)*(Size))%(EIGEN_MAX_ALIGN_BYTES/2)==0)) || \
+ ((EIGEN_MAX_ALIGN_BYTES>=64) && ((sizeof(Scalar)*(Size))%(EIGEN_MAX_ALIGN_BYTES/4)==0)) )))
+
+#endif
/****************************************************************************/
@@ -703,7 +860,7 @@ template<typename T> void swap(scoped_array<T> &a,scoped_array<T> &b)
* - 32 bytes alignment if AVX is enabled.
* - 64 bytes alignment if AVX512 is enabled.
*
-* This can be controled using the \c EIGEN_MAX_ALIGN_BYTES macro as documented
+* This can be controlled using the \c EIGEN_MAX_ALIGN_BYTES macro as documented
* \link TopicPreprocessorDirectivesPerformance there \endlink.
*
* Example:
@@ -744,6 +901,15 @@ public:
~aligned_allocator() {}
+ #if EIGEN_COMP_GNUC_STRICT && EIGEN_GNUC_AT_LEAST(7,0)
+ // In gcc std::allocator::max_size() is bugged making gcc triggers a warning:
+ // eigen/Eigen/src/Core/util/Memory.h:189:12: warning: argument 1 value '18446744073709551612' exceeds maximum object size 9223372036854775807
+ // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87544
+ size_type max_size() const {
+ return (std::numeric_limits<std::ptrdiff_t>::max)()/sizeof(T);
+ }
+ #endif
+
pointer allocate(size_type num, const void* /*hint*/ = 0)
{
internal::check_size_for_overflow<T>(num);
@@ -906,20 +1072,32 @@ inline void queryCacheSizes_intel(int& l1, int& l2, int& l3, int max_std_funcs)
{
if(max_std_funcs>=4)
queryCacheSizes_intel_direct(l1,l2,l3);
- else
+ else if(max_std_funcs>=2)
queryCacheSizes_intel_codes(l1,l2,l3);
+ else
+ l1 = l2 = l3 = 0;
}
inline void queryCacheSizes_amd(int& l1, int& l2, int& l3)
{
int abcd[4];
abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
- EIGEN_CPUID(abcd,0x80000005,0);
- l1 = (abcd[2] >> 24) * 1024; // C[31:24] = L1 size in KB
- abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
- EIGEN_CPUID(abcd,0x80000006,0);
- l2 = (abcd[2] >> 16) * 1024; // C[31;16] = l2 cache size in KB
- l3 = ((abcd[3] & 0xFFFC000) >> 18) * 512 * 1024; // D[31;18] = l3 cache size in 512KB
+
+ // First query the max supported function.
+ EIGEN_CPUID(abcd,0x80000000,0);
+ if(static_cast<numext::uint32_t>(abcd[0]) >= static_cast<numext::uint32_t>(0x80000006))
+ {
+ EIGEN_CPUID(abcd,0x80000005,0);
+ l1 = (abcd[2] >> 24) * 1024; // C[31:24] = L1 size in KB
+ abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
+ EIGEN_CPUID(abcd,0x80000006,0);
+ l2 = (abcd[2] >> 16) * 1024; // C[31;16] = l2 cache size in KB
+ l3 = ((abcd[3] & 0xFFFC000) >> 18) * 512 * 1024; // D[31;18] = l3 cache size in 512KB
+ }
+ else
+ {
+ l1 = l2 = l3 = 0;
+ }
}
#endif
@@ -935,7 +1113,7 @@ inline void queryCacheSizes(int& l1, int& l2, int& l3)
// identify the CPU vendor
EIGEN_CPUID(abcd,0x0,0);
- int max_std_funcs = abcd[1];
+ int max_std_funcs = abcd[0];
if(cpuid_is_vendor(abcd,GenuineIntel))
queryCacheSizes_intel(l1,l2,l3,max_std_funcs);
else if(cpuid_is_vendor(abcd,AuthenticAMD) || cpuid_is_vendor(abcd,AMDisbetter_))
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/util/Meta.h b/examples/ThirdPartyLibs/Eigen/src/Core/util/Meta.h
index 0fa818008..81ae2a32d 100644..100755
--- a/examples/ThirdPartyLibs/Eigen/src/Core/util/Meta.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/util/Meta.h
@@ -11,13 +11,54 @@
#ifndef EIGEN_META_H
#define EIGEN_META_H
-#if defined(EIGEN_CUDA_ARCH)
-#include <cfloat>
-#include <math_constants.h>
+#if defined(EIGEN_GPU_COMPILE_PHASE)
+
+ #include <cfloat>
+
+ #if defined(EIGEN_CUDA_ARCH)
+ #include <math_constants.h>
+ #endif
+
+ #if defined(EIGEN_HIP_DEVICE_COMPILE)
+ #include "Eigen/src/Core/arch/HIP/hcc/math_constants.h"
+ #endif
+
#endif
-#if EIGEN_COMP_ICC>=1600 && __cplusplus >= 201103L
+// Recent versions of ICC require <cstdint> for pointer types below.
+#define EIGEN_ICC_NEEDS_CSTDINT (EIGEN_COMP_ICC>=1600 && EIGEN_COMP_CXXVER >= 11)
+
+// Define portable (u)int{32,64} types
+#if EIGEN_HAS_CXX11 || EIGEN_ICC_NEEDS_CSTDINT
#include <cstdint>
+namespace Eigen {
+namespace numext {
+typedef std::uint8_t uint8_t;
+typedef std::int8_t int8_t;
+typedef std::uint16_t uint16_t;
+typedef std::int16_t int16_t;
+typedef std::uint32_t uint32_t;
+typedef std::int32_t int32_t;
+typedef std::uint64_t uint64_t;
+typedef std::int64_t int64_t;
+}
+}
+#else
+// Without c++11, all compilers able to compile Eigen also
+// provide the C99 stdint.h header file.
+#include <stdint.h>
+namespace Eigen {
+namespace numext {
+typedef ::uint8_t uint8_t;
+typedef ::int8_t int8_t;
+typedef ::uint16_t uint16_t;
+typedef ::int16_t int16_t;
+typedef ::uint32_t uint32_t;
+typedef ::int32_t int32_t;
+typedef ::uint64_t uint64_t;
+typedef ::int64_t int64_t;
+}
+}
#endif
namespace Eigen {
@@ -43,26 +84,33 @@ namespace internal {
// Only recent versions of ICC complain about using ptrdiff_t to hold pointers,
// and older versions do not provide *intptr_t types.
-#if EIGEN_COMP_ICC>=1600 && __cplusplus >= 201103L
+#if EIGEN_ICC_NEEDS_CSTDINT
typedef std::intptr_t IntPtr;
typedef std::uintptr_t UIntPtr;
#else
typedef std::ptrdiff_t IntPtr;
typedef std::size_t UIntPtr;
#endif
+#undef EIGEN_ICC_NEEDS_CSTDINT
struct true_type { enum { value = 1 }; };
struct false_type { enum { value = 0 }; };
+template<bool Condition>
+struct bool_constant;
+
+template<>
+struct bool_constant<true> : true_type {};
+
+template<>
+struct bool_constant<false> : false_type {};
+
template<bool Condition, typename Then, typename Else>
struct conditional { typedef Then type; };
template<typename Then, typename Else>
struct conditional <false, Then, Else> { typedef Else type; };
-template<typename T, typename U> struct is_same { enum { value = 0 }; };
-template<typename T> struct is_same<T,T> { enum { value = 1 }; };
-
template<typename T> struct remove_reference { typedef T type; };
template<typename T> struct remove_reference<T&> { typedef T type; };
@@ -97,7 +145,15 @@ template<> struct is_arithmetic<unsigned int> { enum { value = true }; };
template<> struct is_arithmetic<signed long> { enum { value = true }; };
template<> struct is_arithmetic<unsigned long> { enum { value = true }; };
+template<typename T, typename U> struct is_same { enum { value = 0 }; };
+template<typename T> struct is_same<T,T> { enum { value = 1 }; };
+
+template< class T >
+struct is_void : is_same<void, typename remove_const<T>::type> {};
+
#if EIGEN_HAS_CXX11
+template<> struct is_arithmetic<signed long long> { enum { value = true }; };
+template<> struct is_arithmetic<unsigned long long> { enum { value = true }; };
using std::is_integral;
#else
template<typename T> struct is_integral { enum { value = false }; };
@@ -111,8 +167,43 @@ template<> struct is_integral<signed int> { enum { value = true }; }
template<> struct is_integral<unsigned int> { enum { value = true }; };
template<> struct is_integral<signed long> { enum { value = true }; };
template<> struct is_integral<unsigned long> { enum { value = true }; };
+#if EIGEN_COMP_MSVC
+template<> struct is_integral<signed __int64> { enum { value = true }; };
+template<> struct is_integral<unsigned __int64> { enum { value = true }; };
+#endif
#endif
+#if EIGEN_HAS_CXX11
+using std::make_unsigned;
+#else
+// TODO: Possibly improve this implementation of make_unsigned.
+// It is currently used only by
+// template<typename Scalar> struct random_default_impl<Scalar, false, true>.
+template<typename> struct make_unsigned;
+template<> struct make_unsigned<char> { typedef unsigned char type; };
+template<> struct make_unsigned<signed char> { typedef unsigned char type; };
+template<> struct make_unsigned<unsigned char> { typedef unsigned char type; };
+template<> struct make_unsigned<signed short> { typedef unsigned short type; };
+template<> struct make_unsigned<unsigned short> { typedef unsigned short type; };
+template<> struct make_unsigned<signed int> { typedef unsigned int type; };
+template<> struct make_unsigned<unsigned int> { typedef unsigned int type; };
+template<> struct make_unsigned<signed long> { typedef unsigned long type; };
+template<> struct make_unsigned<unsigned long> { typedef unsigned long type; };
+#if EIGEN_COMP_MSVC
+template<> struct make_unsigned<signed __int64> { typedef unsigned __int64 type; };
+template<> struct make_unsigned<unsigned __int64> { typedef unsigned __int64 type; };
+#endif
+
+// Some platforms define int64_t as `long long` even for C++03, where
+// `long long` is not guaranteed by the standard. In this case we are missing
+// the definition for make_unsigned. If we just define it, we run into issues
+// where `long long` doesn't exist in some compilers for C++03. We therefore add
+// the specialization for these platforms only.
+#if EIGEN_OS_MAC || EIGEN_COMP_MINGW
+template<> struct make_unsigned<unsigned long long> { typedef unsigned long long type; };
+template<> struct make_unsigned<long long> { typedef unsigned long long type; };
+#endif
+#endif
template <typename T> struct add_const { typedef const T type; };
template <typename T> struct add_const<T&> { typedef T& type; };
@@ -126,6 +217,11 @@ template<typename T> struct add_const_on_value_type<T*> { typedef T const
template<typename T> struct add_const_on_value_type<T* const> { typedef T const* const type; };
template<typename T> struct add_const_on_value_type<T const* const> { typedef T const* const type; };
+#if EIGEN_HAS_CXX11
+
+using std::is_convertible;
+
+#else
template<typename From, typename To>
struct is_convertible_impl
@@ -139,16 +235,19 @@ private:
struct yes {int a[1];};
struct no {int a[2];};
- static yes test(const To&, int);
+ template<typename T>
+ static yes test(T, int);
+
+ template<typename T>
static no test(any_conversion, ...);
public:
- static From ms_from;
+ static typename internal::remove_reference<From>::type* ms_from;
#ifdef __INTEL_COMPILER
#pragma warning push
#pragma warning ( disable : 2259 )
#endif
- enum { value = sizeof(test(ms_from, 0))==sizeof(yes) };
+ enum { value = sizeof(test<To>(*ms_from, 0))==sizeof(yes) };
#ifdef __INTEL_COMPILER
#pragma warning pop
#endif
@@ -157,10 +256,17 @@ public:
template<typename From, typename To>
struct is_convertible
{
- enum { value = is_convertible_impl<typename remove_all<From>::type,
- typename remove_all<To >::type>::value };
+ enum { value = is_convertible_impl<From,To>::value };
};
+template<typename T>
+struct is_convertible<T,T&> { enum { value = false }; };
+
+template<typename T>
+struct is_convertible<const T,const T&> { enum { value = true }; };
+
+#endif
+
/** \internal Allows to enable/disable an overload
* according to a compile time condition.
*/
@@ -169,7 +275,7 @@ template<bool Condition, typename T=void> struct enable_if;
template<typename T> struct enable_if<true,T>
{ typedef T type; };
-#if defined(EIGEN_CUDA_ARCH)
+#if defined(EIGEN_GPU_COMPILE_PHASE) && !EIGEN_HAS_CXX11
#if !defined(__FLT_EPSILON__)
#define __FLT_EPSILON__ FLT_EPSILON
#define __DBL_EPSILON__ DBL_EPSILON
@@ -180,7 +286,7 @@ namespace device {
template<typename T> struct numeric_limits
{
EIGEN_DEVICE_FUNC
- static T epsilon() { return 0; }
+ static EIGEN_CONSTEXPR T epsilon() { return 0; }
static T (max)() { assert(false && "Highest not supported for this type"); }
static T (min)() { assert(false && "Lowest not supported for this type"); }
static T infinity() { assert(false && "Infinity not supported for this type"); }
@@ -188,91 +294,130 @@ template<typename T> struct numeric_limits
};
template<> struct numeric_limits<float>
{
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static float epsilon() { return __FLT_EPSILON__; }
EIGEN_DEVICE_FUNC
- static float (max)() { return CUDART_MAX_NORMAL_F; }
- EIGEN_DEVICE_FUNC
+ static float (max)() {
+ #if defined(EIGEN_CUDA_ARCH)
+ return CUDART_MAX_NORMAL_F;
+ #else
+ return HIPRT_MAX_NORMAL_F;
+ #endif
+ }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static float (min)() { return FLT_MIN; }
EIGEN_DEVICE_FUNC
- static float infinity() { return CUDART_INF_F; }
+ static float infinity() {
+ #if defined(EIGEN_CUDA_ARCH)
+ return CUDART_INF_F;
+ #else
+ return HIPRT_INF_F;
+ #endif
+ }
EIGEN_DEVICE_FUNC
- static float quiet_NaN() { return CUDART_NAN_F; }
+ static float quiet_NaN() {
+ #if defined(EIGEN_CUDA_ARCH)
+ return CUDART_NAN_F;
+ #else
+ return HIPRT_NAN_F;
+ #endif
+ }
};
template<> struct numeric_limits<double>
{
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static double epsilon() { return __DBL_EPSILON__; }
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static double (max)() { return DBL_MAX; }
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static double (min)() { return DBL_MIN; }
EIGEN_DEVICE_FUNC
- static double infinity() { return CUDART_INF; }
+ static double infinity() {
+ #if defined(EIGEN_CUDA_ARCH)
+ return CUDART_INF;
+ #else
+ return HIPRT_INF;
+ #endif
+ }
EIGEN_DEVICE_FUNC
- static double quiet_NaN() { return CUDART_NAN; }
+ static double quiet_NaN() {
+ #if defined(EIGEN_CUDA_ARCH)
+ return CUDART_NAN;
+ #else
+ return HIPRT_NAN;
+ #endif
+ }
};
template<> struct numeric_limits<int>
{
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static int epsilon() { return 0; }
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static int (max)() { return INT_MAX; }
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static int (min)() { return INT_MIN; }
};
template<> struct numeric_limits<unsigned int>
{
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static unsigned int epsilon() { return 0; }
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static unsigned int (max)() { return UINT_MAX; }
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static unsigned int (min)() { return 0; }
};
template<> struct numeric_limits<long>
{
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static long epsilon() { return 0; }
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static long (max)() { return LONG_MAX; }
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static long (min)() { return LONG_MIN; }
};
template<> struct numeric_limits<unsigned long>
{
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static unsigned long epsilon() { return 0; }
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static unsigned long (max)() { return ULONG_MAX; }
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static unsigned long (min)() { return 0; }
};
template<> struct numeric_limits<long long>
{
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static long long epsilon() { return 0; }
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static long long (max)() { return LLONG_MAX; }
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static long long (min)() { return LLONG_MIN; }
};
template<> struct numeric_limits<unsigned long long>
{
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static unsigned long long epsilon() { return 0; }
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static unsigned long long (max)() { return ULLONG_MAX; }
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
static unsigned long long (min)() { return 0; }
};
+template<> struct numeric_limits<bool>
+{
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ static bool epsilon() { return false; }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ static bool (max)() { return true; }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ static bool (min)() { return false; }
+};
}
-#endif
+#endif // defined(EIGEN_GPU_COMPILE_PHASE) && !EIGEN_HAS_CXX11
/** \internal
- * A base class do disable default copy ctor and copy assignement operator.
+ * A base class do disable default copy ctor and copy assignment operator.
*/
class noncopyable
{
@@ -331,19 +476,35 @@ template<typename T, std::size_t N> struct array_size<std::array<T,N> > {
*
*/
template<typename T>
-Index size(const T& x) { return x.size(); }
+EIGEN_CONSTEXPR Index size(const T& x) { return x.size(); }
template<typename T,std::size_t N>
-Index size(const T (&) [N]) { return N; }
+EIGEN_CONSTEXPR Index size(const T (&) [N]) { return N; }
/** \internal
- * Convenient struct to get the result type of a unary or binary functor.
- *
- * It supports both the current STL mechanism (using the result_type member) as well as
- * upcoming next STL generation (using a templated result member).
- * If none of these members is provided, then the type of the first argument is returned. FIXME, that behavior is a pretty bad hack.
+ * Convenient struct to get the result type of a nullary, unary, binary, or
+ * ternary functor.
+ *
+ * Pre C++11:
+ * Supports both a Func::result_type member and templated
+ * Func::result<Func(ArgTypes...)>::type member.
+ *
+ * If none of these members is provided, then the type of the first
+ * argument is returned.
+ *
+ * Post C++11:
+ * This uses std::result_of. However, note the `type` member removes
+ * const and converts references/pointers to their corresponding value type.
*/
-#if EIGEN_HAS_STD_RESULT_OF
+#if EIGEN_HAS_STD_INVOKE_RESULT
+template<typename T> struct result_of;
+
+template<typename F, typename... ArgTypes>
+struct result_of<F(ArgTypes...)> {
+ typedef typename std::invoke_result<F, ArgTypes...>::type type1;
+ typedef typename remove_all<type1>::type type;
+};
+#elif EIGEN_HAS_STD_RESULT_OF
template<typename T> struct result_of {
typedef typename std::result_of<T>::type type1;
typedef typename remove_all<type1>::type type;
@@ -355,6 +516,28 @@ struct has_none {int a[1];};
struct has_std_result_type {int a[2];};
struct has_tr1_result {int a[3];};
+template<typename Func, int SizeOf>
+struct nullary_result_of_select {};
+
+template<typename Func>
+struct nullary_result_of_select<Func, sizeof(has_std_result_type)> {typedef typename Func::result_type type;};
+
+template<typename Func>
+struct nullary_result_of_select<Func, sizeof(has_tr1_result)> {typedef typename Func::template result<Func()>::type type;};
+
+template<typename Func>
+struct result_of<Func()> {
+ template<typename T>
+ static has_std_result_type testFunctor(T const *, typename T::result_type const * = 0);
+ template<typename T>
+ static has_tr1_result testFunctor(T const *, typename T::template result<T()>::type const * = 0);
+ static has_none testFunctor(...);
+
+ // note that the following indirection is needed for gcc-3.3
+ enum {FunctorType = sizeof(testFunctor(static_cast<Func*>(0)))};
+ typedef typename nullary_result_of_select<Func, FunctorType>::type type;
+};
+
template<typename Func, typename ArgType, int SizeOf=sizeof(has_none)>
struct unary_result_of_select {typedef typename internal::remove_all<ArgType>::type type;};
@@ -424,6 +607,45 @@ struct result_of<Func(ArgType0,ArgType1,ArgType2)> {
enum {FunctorType = sizeof(testFunctor(static_cast<Func*>(0)))};
typedef typename ternary_result_of_select<Func, ArgType0, ArgType1, ArgType2, FunctorType>::type type;
};
+
+#endif
+
+#if EIGEN_HAS_STD_INVOKE_RESULT
+template<typename F, typename... ArgTypes>
+struct invoke_result {
+ typedef typename std::invoke_result<F, ArgTypes...>::type type1;
+ typedef typename remove_all<type1>::type type;
+};
+#elif EIGEN_HAS_CXX11
+template<typename F, typename... ArgTypes>
+struct invoke_result {
+ typedef typename result_of<F(ArgTypes...)>::type type1;
+ typedef typename remove_all<type1>::type type;
+};
+#else
+template<typename F, typename ArgType0 = void, typename ArgType1 = void, typename ArgType2 = void>
+struct invoke_result {
+ typedef typename result_of<F(ArgType0, ArgType1, ArgType2)>::type type1;
+ typedef typename remove_all<type1>::type type;
+};
+
+template<typename F>
+struct invoke_result<F, void, void, void> {
+ typedef typename result_of<F()>::type type1;
+ typedef typename remove_all<type1>::type type;
+};
+
+template<typename F, typename ArgType0>
+struct invoke_result<F, ArgType0, void, void> {
+ typedef typename result_of<F(ArgType0)>::type type1;
+ typedef typename remove_all<type1>::type type;
+};
+
+template<typename F, typename ArgType0, typename ArgType1>
+struct invoke_result<F, ArgType0, ArgType1, void> {
+ typedef typename result_of<F(ArgType0, ArgType1)>::type type1;
+ typedef typename remove_all<type1>::type type;
+};
#endif
struct meta_yes { char a[1]; };
@@ -493,20 +715,25 @@ class meta_sqrt<Y, InfX, SupX, true> { public: enum { ret = (SupX*SupX <= Y) ?
/** \internal Computes the least common multiple of two positive integer A and B
- * at compile-time. It implements a naive algorithm testing all multiples of A.
- * It thus works better if A>=B.
+ * at compile-time.
*/
-template<int A, int B, int K=1, bool Done = ((A*K)%B)==0>
+template<int A, int B, int K=1, bool Done = ((A*K)%B)==0, bool Big=(A>=B)>
struct meta_least_common_multiple
{
enum { ret = meta_least_common_multiple<A,B,K+1>::ret };
};
+template<int A, int B, int K, bool Done>
+struct meta_least_common_multiple<A,B,K,Done,false>
+{
+ enum { ret = meta_least_common_multiple<B,A,K>::ret };
+};
template<int A, int B, int K>
-struct meta_least_common_multiple<A,B,K,true>
+struct meta_least_common_multiple<A,B,K,true,true>
{
enum { ret = A*K };
};
+
/** \internal determines whether the product of two numeric types is allowed and what the return type is */
template<typename T, typename U> struct scalar_product_traits
{
@@ -519,17 +746,27 @@ template<typename T, typename U> struct scalar_product_traits
// typedef typename scalar_product_traits<typename remove_all<ArgType0>::type, typename remove_all<ArgType1>::type>::ReturnType type;
// };
+/** \internal Obtains a POD type suitable to use as storage for an object of a size
+ * of at most Len bytes, aligned as specified by \c Align.
+ */
+template<unsigned Len, unsigned Align>
+struct aligned_storage {
+ struct type {
+ EIGEN_ALIGN_TO_BOUNDARY(Align) unsigned char data[Len];
+ };
+};
+
} // end namespace internal
namespace numext {
-
-#if defined(EIGEN_CUDA_ARCH)
+
+#if defined(EIGEN_GPU_COMPILE_PHASE)
template<typename T> EIGEN_DEVICE_FUNC void swap(T &a, T &b) { T tmp = b; b = a; a = tmp; }
#else
template<typename T> EIGEN_STRONG_INLINE void swap(T &a, T &b) { std::swap(a,b); }
#endif
-#if defined(EIGEN_CUDA_ARCH)
+#if defined(EIGEN_GPU_COMPILE_PHASE) && !EIGEN_HAS_CXX11
using internal::device::numeric_limits;
#else
using std::numeric_limits;
@@ -538,11 +775,36 @@ using std::numeric_limits;
// Integer division with rounding up.
// T is assumed to be an integer type with a>=0, and b>0
template<typename T>
+EIGEN_DEVICE_FUNC
T div_ceil(const T &a, const T &b)
{
return (a+b-1) / b;
}
+// The aim of the following functions is to bypass -Wfloat-equal warnings
+// when we really want a strict equality comparison on floating points.
+template<typename X, typename Y> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
+bool equal_strict(const X& x,const Y& y) { return x == y; }
+
+#if !defined(EIGEN_GPU_COMPILE_PHASE) || (!defined(EIGEN_CUDA_ARCH) && defined(EIGEN_CONSTEXPR_ARE_DEVICE_FUNC))
+template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
+bool equal_strict(const float& x,const float& y) { return std::equal_to<float>()(x,y); }
+
+template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
+bool equal_strict(const double& x,const double& y) { return std::equal_to<double>()(x,y); }
+#endif
+
+template<typename X, typename Y> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
+bool not_equal_strict(const X& x,const Y& y) { return x != y; }
+
+#if !defined(EIGEN_GPU_COMPILE_PHASE) || (!defined(EIGEN_CUDA_ARCH) && defined(EIGEN_CONSTEXPR_ARE_DEVICE_FUNC))
+template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
+bool not_equal_strict(const float& x,const float& y) { return std::not_equal_to<float>()(x,y); }
+
+template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
+bool not_equal_strict(const double& x,const double& y) { return std::not_equal_to<double>()(x,y); }
+#endif
+
} // end namespace numext
} // end namespace Eigen
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/util/ReenableStupidWarnings.h b/examples/ThirdPartyLibs/Eigen/src/Core/util/ReenableStupidWarnings.h
index 86b60f52f..1ce6fd1b0 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/util/ReenableStupidWarnings.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/util/ReenableStupidWarnings.h
@@ -1,4 +1,8 @@
-#ifdef EIGEN_WARNINGS_DISABLED
+#ifdef EIGEN_WARNINGS_DISABLED_2
+// "DisableStupidWarnings.h" was included twice recursively: Do not reenable warnings yet!
+# undef EIGEN_WARNINGS_DISABLED_2
+
+#elif defined(EIGEN_WARNINGS_DISABLED)
#undef EIGEN_WARNINGS_DISABLED
#ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS
@@ -8,7 +12,7 @@
#pragma warning pop
#elif defined __clang__
#pragma clang diagnostic pop
- #elif defined __GNUC__ && __GNUC__>=6
+ #elif defined __GNUC__ && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
#pragma GCC diagnostic pop
#endif
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/util/ReshapedHelper.h b/examples/ThirdPartyLibs/Eigen/src/Core/util/ReshapedHelper.h
new file mode 100644
index 000000000..412432132
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/util/ReshapedHelper.h
@@ -0,0 +1,51 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2017 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+#ifndef EIGEN_RESHAPED_HELPER_H
+#define EIGEN_RESHAPED_HELPER_H
+
+namespace Eigen {
+
+enum AutoSize_t { AutoSize };
+const int AutoOrder = 2;
+
+namespace internal {
+
+template<typename SizeType,typename OtherSize, int TotalSize>
+struct get_compiletime_reshape_size {
+ enum { value = get_fixed_value<SizeType>::value };
+};
+
+template<typename SizeType>
+Index get_runtime_reshape_size(SizeType size, Index /*other*/, Index /*total*/) {
+ return internal::get_runtime_value(size);
+}
+
+template<typename OtherSize, int TotalSize>
+struct get_compiletime_reshape_size<AutoSize_t,OtherSize,TotalSize> {
+ enum {
+ other_size = get_fixed_value<OtherSize>::value,
+ value = (TotalSize==Dynamic || other_size==Dynamic) ? Dynamic : TotalSize / other_size };
+};
+
+inline Index get_runtime_reshape_size(AutoSize_t /*size*/, Index other, Index total) {
+ return total/other;
+}
+
+template<int Flags, int Order>
+struct get_compiletime_reshape_order {
+ enum { value = Order == AutoOrder ? Flags & RowMajorBit : Order };
+};
+
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_RESHAPED_HELPER_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/util/StaticAssert.h b/examples/ThirdPartyLibs/Eigen/src/Core/util/StaticAssert.h
index cb1678900..c45de5901 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/util/StaticAssert.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/util/StaticAssert.h
@@ -24,9 +24,10 @@
*
*/
+#ifndef EIGEN_STATIC_ASSERT
#ifndef EIGEN_NO_STATIC_ASSERT
- #if EIGEN_MAX_CPP_VER>=11 && (__has_feature(cxx_static_assert) || (defined(__cplusplus) && __cplusplus >= 201103L) || (EIGEN_COMP_MSVC >= 1600))
+ #if EIGEN_MAX_CPP_VER>=11 && (__has_feature(cxx_static_assert) || (EIGEN_COMP_CXXVER >= 11) || (EIGEN_COMP_MSVC >= 1600))
// if native static_assert is enabled, let's use it
#define EIGEN_STATIC_ASSERT(X,MSG) static_assert(X,#MSG);
@@ -101,7 +102,11 @@
THIS_TYPE_IS_NOT_SUPPORTED=1,
STORAGE_KIND_MUST_MATCH=1,
STORAGE_INDEX_MUST_MATCH=1,
- CHOLMOD_SUPPORTS_DOUBLE_PRECISION_ONLY=1
+ CHOLMOD_SUPPORTS_DOUBLE_PRECISION_ONLY=1,
+ SELFADJOINTVIEW_ACCEPTS_UPPER_AND_LOWER_MODE_ONLY=1,
+ INVALID_TEMPLATE_PARAMETER=1,
+ GPU_TENSOR_CONTRACTION_DOES_NOT_SUPPORT_OUTPUT_KERNELS=1,
+ THE_ARRAY_SIZE_SHOULD_EQUAL_WITH_PACKET_SIZE=1
};
};
@@ -131,7 +136,7 @@
#define EIGEN_STATIC_ASSERT(CONDITION,MSG) eigen_assert((CONDITION) && #MSG);
#endif // EIGEN_NO_STATIC_ASSERT
-
+#endif // EIGEN_STATIC_ASSERT
// static assertion failing if the type \a TYPE is not a vector type
#define EIGEN_STATIC_ASSERT_VECTOR_ONLY(TYPE) \
@@ -180,7 +185,7 @@
)
#define EIGEN_STATIC_ASSERT_NON_INTEGER(TYPE) \
- EIGEN_STATIC_ASSERT(!NumTraits<TYPE>::IsInteger, THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES)
+ EIGEN_STATIC_ASSERT(!Eigen::NumTraits<TYPE>::IsInteger, THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES)
// static assertion failing if it is guaranteed at compile-time that the two matrix expression types have different sizes
@@ -190,8 +195,8 @@
YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES)
#define EIGEN_STATIC_ASSERT_SIZE_1x1(TYPE) \
- EIGEN_STATIC_ASSERT((TYPE::RowsAtCompileTime == 1 || TYPE::RowsAtCompileTime == Dynamic) && \
- (TYPE::ColsAtCompileTime == 1 || TYPE::ColsAtCompileTime == Dynamic), \
+ EIGEN_STATIC_ASSERT((TYPE::RowsAtCompileTime == 1 || TYPE::RowsAtCompileTime == Eigen::Dynamic) && \
+ (TYPE::ColsAtCompileTime == 1 || TYPE::ColsAtCompileTime == Eigen::Dynamic), \
THIS_METHOD_IS_ONLY_FOR_1x1_EXPRESSIONS)
#define EIGEN_STATIC_ASSERT_LVALUE(Derived) \
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/util/SymbolicIndex.h b/examples/ThirdPartyLibs/Eigen/src/Core/util/SymbolicIndex.h
index bb6349eb9..354dd9add 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/util/SymbolicIndex.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/util/SymbolicIndex.h
@@ -12,7 +12,7 @@
namespace Eigen {
-/** \namespace Eigen::Symbolic
+/** \namespace Eigen::symbolic
* \ingroup Core_Module
*
* This namespace defines a set of classes and functions to build and evaluate symbolic expressions of scalar type Index.
@@ -20,9 +20,9 @@ namespace Eigen {
*
* \code
* // First step, defines symbols:
- * struct x_tag {}; static const Symbolic::SymbolExpr<x_tag> x;
- * struct y_tag {}; static const Symbolic::SymbolExpr<y_tag> y;
- * struct z_tag {}; static const Symbolic::SymbolExpr<z_tag> z;
+ * struct x_tag {}; static const symbolic::SymbolExpr<x_tag> x;
+ * struct y_tag {}; static const symbolic::SymbolExpr<y_tag> y;
+ * struct z_tag {}; static const symbolic::SymbolExpr<z_tag> z;
*
* // Defines an expression:
* auto expr = (x+3)/y+z;
@@ -35,10 +35,10 @@ namespace Eigen {
* std::cout << expr98.eval(x=6) << "\n";
* \endcode
*
- * It is currently only used internally to define and minipulate the placeholders::last and placeholders::end symbols in Eigen::seq and Eigen::seqN.
+ * It is currently only used internally to define and manipulate the Eigen::last and Eigen::lastp1 symbols in Eigen::seq and Eigen::seqN.
*
*/
-namespace Symbolic {
+namespace symbolic {
template<typename Tag> class Symbol;
template<typename Arg0> class NegateExpr;
@@ -65,7 +65,7 @@ class ValueExpr<internal::FixedInt<N> > {
public:
ValueExpr() {}
template<typename T>
- Index eval_impl(const T&) const { return N; }
+ EIGEN_CONSTEXPR Index eval_impl(const T&) const { return N; }
};
@@ -187,17 +187,10 @@ public:
template<typename T>
struct is_symbolic {
- // BaseExpr has no conversion ctor, so we only have to check whether T can be staticaly cast to its base class BaseExpr<T>.
+ // BaseExpr has no conversion ctor, so we only have to check whether T can be statically cast to its base class BaseExpr<T>.
enum { value = internal::is_convertible<T,BaseExpr<T> >::value };
};
-// Specialization for functions, because is_convertible fails in this case.
-// Useful in c++98/11 mode when testing is_symbolic<decltype(fix<N>)>
-template<typename T>
-struct is_symbolic<T (*)()> {
- enum { value = false };
-};
-
/** Represents the actual value of a symbol identified by its tag
*
* It is the return type of SymbolValue::operator=, and most of the time this is only way it is used.
@@ -293,7 +286,7 @@ protected:
Arg1 m_arg1;
};
-} // end namespace Symbolic
+} // end namespace symbolic
} // end namespace Eigen
diff --git a/examples/ThirdPartyLibs/Eigen/src/Core/util/XprHelper.h b/examples/ThirdPartyLibs/Eigen/src/Core/util/XprHelper.h
index 10328be0d..71c32b8a1 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Core/util/XprHelper.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Core/util/XprHelper.h
@@ -39,14 +39,22 @@ template<typename T> struct is_valid_index_type
{
enum { value =
#if EIGEN_HAS_TYPE_TRAITS
- internal::is_integral<T>::value || std::is_enum<T>::value
+ internal::is_integral<T>::value || std::is_enum<T>::value
+#elif EIGEN_COMP_MSVC
+ internal::is_integral<T>::value || __is_enum(T)
#else
- // without C++11, we use is_convertible to Index instead of is_integral in order to treat enums as Index.
- internal::is_convertible<T,Index>::value
+ // without C++11, we use is_convertible to Index instead of is_integral in order to treat enums as Index.
+ internal::is_convertible<T,Index>::value && !internal::is_same<T,float>::value && !is_same<T,double>::value
#endif
};
};
+// true if both types are not valid index types
+template<typename RowIndices, typename ColIndices>
+struct valid_indexed_view_overload {
+ enum { value = !(internal::is_valid_index_type<RowIndices>::value && internal::is_valid_index_type<ColIndices>::value) };
+};
+
// promote_scalar_arg is an helper used in operation between an expression and a scalar, like:
// expression * scalar
// Its role is to determine how the type T of the scalar operand should be promoted given the scalar type ExprScalar of the given expression.
@@ -102,6 +110,9 @@ class no_assignment_operator
{
private:
no_assignment_operator& operator=(const no_assignment_operator&);
+ protected:
+ EIGEN_DEFAULT_COPY_CONSTRUCTOR(no_assignment_operator)
+ EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(no_assignment_operator)
};
/** \internal return the index type with the largest number of bits */
@@ -118,19 +129,21 @@ struct promote_index_type
template<typename T, int Value> class variable_if_dynamic
{
public:
- EIGEN_EMPTY_STRUCT_CTOR(variable_if_dynamic)
+ EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(variable_if_dynamic)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamic(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); eigen_assert(v == T(Value)); }
- EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T value() { return T(Value); }
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE operator T() const { return T(Value); }
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T) {}
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ T value() { return T(Value); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ operator T() const { return T(Value); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ void setValue(T v) const { EIGEN_ONLY_USED_FOR_DEBUG(v); eigen_assert(v == T(Value)); }
};
template<typename T> class variable_if_dynamic<T, Dynamic>
{
T m_value;
- EIGEN_DEVICE_FUNC variable_if_dynamic() { eigen_assert(false); }
public:
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamic(T value) : m_value(value) {}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamic(T value = 0) EIGEN_NO_THROW : m_value(value) {}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T value() const { return m_value; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE operator T() const { return m_value; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T value) { m_value = value; }
@@ -143,8 +156,10 @@ template<typename T, int Value> class variable_if_dynamicindex
public:
EIGEN_EMPTY_STRUCT_CTOR(variable_if_dynamicindex)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamicindex(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); eigen_assert(v == T(Value)); }
- EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T value() { return T(Value); }
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T) {}
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+ T value() { return T(Value); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ void setValue(T) {}
};
template<typename T> class variable_if_dynamicindex<T, DynamicIndex>
@@ -169,16 +184,7 @@ template<typename T> struct functor_traits
template<typename T> struct packet_traits;
-template<typename T> struct unpacket_traits
-{
- typedef T type;
- typedef T half;
- enum
- {
- size = 1,
- alignment = 1
- };
-};
+template<typename T> struct unpacket_traits;
template<int Size, typename PacketType,
bool Stop = Size==Dynamic || (Size%unpacket_traits<PacketType>::size)==0 || is_same<PacketType,typename unpacket_traits<PacketType>::half>::value>
@@ -397,7 +403,7 @@ template<typename T> struct plain_matrix_type_row_major
typedef Matrix<typename traits<T>::Scalar,
Rows,
Cols,
- (MaxCols==1&&MaxRows!=1) ? RowMajor : ColMajor,
+ (MaxCols==1&&MaxRows!=1) ? ColMajor : RowMajor,
MaxRows,
MaxCols
> type;
@@ -414,7 +420,7 @@ struct ref_selector
T const&,
const T
>::type type;
-
+
typedef typename conditional<
bool(traits<T>::Flags & NestByRefBit),
T &,
@@ -452,7 +458,7 @@ template<typename T, int n, typename PlainObject = typename plain_object_eval<T>
{
enum {
ScalarReadCost = NumTraits<typename traits<T>::Scalar>::ReadCost,
- CoeffReadCost = evaluator<T>::CoeffReadCost, // NOTE What if an evaluator evaluate itself into a tempory?
+ CoeffReadCost = evaluator<T>::CoeffReadCost, // NOTE What if an evaluator evaluate itself into a temporary?
// Then CoeffReadCost will be small (e.g., 1) but we still have to evaluate, especially if n>1.
// This situation is already taken care by the EvalBeforeNestingBit flag, which is turned ON
// for all evaluator creating a temporary. This flag is then propagated by the parent evaluators.
@@ -593,14 +599,14 @@ template<typename ExpressionType, typename Scalar = typename ExpressionType::Sca
struct plain_row_type
{
typedef Matrix<Scalar, 1, ExpressionType::ColsAtCompileTime,
- ExpressionType::PlainObject::Options | RowMajor, 1, ExpressionType::MaxColsAtCompileTime> MatrixRowType;
+ int(ExpressionType::PlainObject::Options) | int(RowMajor), 1, ExpressionType::MaxColsAtCompileTime> MatrixRowType;
typedef Array<Scalar, 1, ExpressionType::ColsAtCompileTime,
- ExpressionType::PlainObject::Options | RowMajor, 1, ExpressionType::MaxColsAtCompileTime> ArrayRowType;
+ int(ExpressionType::PlainObject::Options) | int(RowMajor), 1, ExpressionType::MaxColsAtCompileTime> ArrayRowType;
typedef typename conditional<
is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
MatrixRowType,
- ArrayRowType
+ ArrayRowType
>::type type;
};
@@ -615,7 +621,7 @@ struct plain_col_type
typedef typename conditional<
is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
MatrixColType,
- ArrayColType
+ ArrayColType
>::type type;
};
@@ -631,7 +637,7 @@ struct plain_diag_type
typedef typename conditional<
is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
MatrixDiagType,
- ArrayDiagType
+ ArrayDiagType
>::type type;
};
@@ -668,17 +674,32 @@ template<typename T> struct is_diagonal<DiagonalWrapper<T> >
template<typename T, int S> struct is_diagonal<DiagonalMatrix<T,S> >
{ enum { ret = true }; };
+
+template<typename T> struct is_identity
+{ enum { value = false }; };
+
+template<typename T> struct is_identity<CwiseNullaryOp<internal::scalar_identity_op<typename T::Scalar>, T> >
+{ enum { value = true }; };
+
+
template<typename S1, typename S2> struct glue_shapes;
template<> struct glue_shapes<DenseShape,TriangularShape> { typedef TriangularShape type; };
template<typename T1, typename T2>
-bool is_same_dense(const T1 &mat1, const T2 &mat2, typename enable_if<has_direct_access<T1>::ret&&has_direct_access<T2>::ret, T1>::type * = 0)
+struct possibly_same_dense {
+ enum { value = has_direct_access<T1>::ret && has_direct_access<T2>::ret && is_same<typename T1::Scalar,typename T2::Scalar>::value };
+};
+
+template<typename T1, typename T2>
+EIGEN_DEVICE_FUNC
+bool is_same_dense(const T1 &mat1, const T2 &mat2, typename enable_if<possibly_same_dense<T1,T2>::value>::type * = 0)
{
return (mat1.data()==mat2.data()) && (mat1.innerStride()==mat2.innerStride()) && (mat1.outerStride()==mat2.outerStride());
}
template<typename T1, typename T2>
-bool is_same_dense(const T1 &, const T2 &, typename enable_if<!(has_direct_access<T1>::ret&&has_direct_access<T2>::ret), T1>::type * = 0)
+EIGEN_DEVICE_FUNC
+bool is_same_dense(const T1 &, const T2 &, typename enable_if<!possibly_same_dense<T1,T2>::value>::type * = 0)
{
return false;
}
@@ -732,7 +753,7 @@ std::string demangle_flags(int f)
if(f&DirectAccessBit) res += " | Direct";
if(f&NestByRefBit) res += " | NestByRef";
if(f&NoPreferredStorageOrderBit) res += " | NoPreferredStorageOrderBit";
-
+
return res;
}
#endif
@@ -829,7 +850,7 @@ struct ScalarBinaryOpTraits<void,void,BinaryOp>
#define EIGEN_CHECK_BINARY_COMPATIBILIY(BINOP,LHS,RHS) \
EIGEN_STATIC_ASSERT((Eigen::internal::has_ReturnType<ScalarBinaryOpTraits<LHS, RHS,BINOP> >::value), \
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
-
+
} // end namespace Eigen
#endif // EIGEN_XPRHELPER_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/ComplexEigenSolver.h b/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/ComplexEigenSolver.h
index dc5fae06a..081e918f1 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/ComplexEigenSolver.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/ComplexEigenSolver.h
@@ -214,7 +214,7 @@ template<typename _MatrixType> class ComplexEigenSolver
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+ * \returns \c Success if computation was successful, \c NoConvergence otherwise.
*/
ComputationInfo info() const
{
diff --git a/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/ComplexSchur.h b/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/ComplexSchur.h
index 7f38919f7..fc71468f8 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/ComplexSchur.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/ComplexSchur.h
@@ -212,7 +212,7 @@ template<typename _MatrixType> class ComplexSchur
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+ * \returns \c Success if computation was successful, \c NoConvergence otherwise.
*/
ComputationInfo info() const
{
@@ -300,10 +300,13 @@ typename ComplexSchur<MatrixType>::ComplexScalar ComplexSchur<MatrixType>::compu
ComplexScalar trace = t.coeff(0,0) + t.coeff(1,1);
ComplexScalar eival1 = (trace + disc) / RealScalar(2);
ComplexScalar eival2 = (trace - disc) / RealScalar(2);
-
- if(numext::norm1(eival1) > numext::norm1(eival2))
+ RealScalar eival1_norm = numext::norm1(eival1);
+ RealScalar eival2_norm = numext::norm1(eival2);
+ // A division by zero can only occur if eival1==eival2==0.
+ // In this case, det==0, and all we have to do is checking that eival2_norm!=0
+ if(eival1_norm > eival2_norm)
eival2 = det / eival1;
- else
+ else if(eival2_norm!=RealScalar(0))
eival1 = det / eival2;
// choose the eigenvalue closest to the bottom entry of the diagonal
diff --git a/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/EigenSolver.h b/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/EigenSolver.h
index f205b185d..572b29e4e 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/EigenSolver.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/EigenSolver.h
@@ -110,7 +110,7 @@ template<typename _MatrixType> class EigenSolver
*
* \sa compute() for an example.
*/
- EigenSolver() : m_eivec(), m_eivalues(), m_isInitialized(false), m_realSchur(), m_matT(), m_tmp() {}
+ EigenSolver() : m_eivec(), m_eivalues(), m_isInitialized(false), m_eigenvectorsOk(false), m_realSchur(), m_matT(), m_tmp() {}
/** \brief Default constructor with memory preallocation
*
@@ -277,7 +277,7 @@ template<typename _MatrixType> class EigenSolver
template<typename InputType>
EigenSolver& compute(const EigenBase<InputType>& matrix, bool computeEigenvectors = true);
- /** \returns NumericalIssue if the input contains INF or NaN values or overflow occured. Returns Success otherwise. */
+ /** \returns NumericalIssue if the input contains INF or NaN values or overflow occurred. Returns Success otherwise. */
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
diff --git a/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h b/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h
index 36a91dffc..87d789b3f 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h
@@ -311,7 +311,6 @@ GeneralizedEigenSolver<MatrixType>::compute(const MatrixType& A, const MatrixTyp
// Aliases:
Map<VectorType> v(reinterpret_cast<Scalar*>(m_tmp.data()), size);
ComplexVectorType &cv = m_tmp;
- const MatrixType &mZ = m_realQZ.matrixZ();
const MatrixType &mS = m_realQZ.matrixS();
const MatrixType &mT = m_realQZ.matrixT();
@@ -351,7 +350,7 @@ GeneralizedEigenSolver<MatrixType>::compute(const MatrixType& A, const MatrixTyp
}
}
}
- m_eivec.col(i).real().noalias() = mZ.transpose() * v;
+ m_eivec.col(i).real().noalias() = m_realQZ.matrixZ().transpose() * v;
m_eivec.col(i).real().normalize();
m_eivec.col(i).imag().setConstant(0);
}
@@ -400,7 +399,7 @@ GeneralizedEigenSolver<MatrixType>::compute(const MatrixType& A, const MatrixTyp
/ (alpha*mT.coeffRef(j,j) - static_cast<Scalar>(beta*mS.coeffRef(j,j)));
}
}
- m_eivec.col(i+1).noalias() = (mZ.transpose() * cv);
+ m_eivec.col(i+1).noalias() = (m_realQZ.matrixZ().transpose() * cv);
m_eivec.col(i+1).normalize();
m_eivec.col(i) = m_eivec.col(i+1).conjugate();
}
diff --git a/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h b/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h
index 5f6bb8289..d0f9091be 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h
@@ -121,7 +121,7 @@ class GeneralizedSelfAdjointEigenSolver : public SelfAdjointEigenSolver<_MatrixT
*
* \returns Reference to \c *this
*
- * Accoring to \p options, this function computes eigenvalues and (if requested)
+ * According to \p options, this function computes eigenvalues and (if requested)
* the eigenvectors of one of the following three generalized eigenproblems:
* - \c Ax_lBx: \f$ Ax = \lambda B x \f$
* - \c ABx_lx: \f$ ABx = \lambda x \f$
diff --git a/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/HessenbergDecomposition.h b/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/HessenbergDecomposition.h
index f647f69b0..1f2113934 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/HessenbergDecomposition.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/HessenbergDecomposition.h
@@ -267,7 +267,7 @@ template<typename _MatrixType> class HessenbergDecomposition
private:
- typedef Matrix<Scalar, 1, Size, Options | RowMajor, 1, MaxSize> VectorType;
+ typedef Matrix<Scalar, 1, Size, int(Options) | int(RowMajor), 1, MaxSize> VectorType;
typedef typename NumTraits<Scalar>::Real RealScalar;
static void _compute(MatrixType& matA, CoeffVectorType& hCoeffs, VectorType& temp);
@@ -315,7 +315,7 @@ void HessenbergDecomposition<MatrixType>::_compute(MatrixType& matA, CoeffVector
// A = A H'
matA.rightCols(remainingSize)
- .applyHouseholderOnTheRight(matA.col(i).tail(remainingSize-1).conjugate(), numext::conj(h), &temp.coeffRef(0));
+ .applyHouseholderOnTheRight(matA.col(i).tail(remainingSize-1), numext::conj(h), &temp.coeffRef(0));
}
}
diff --git a/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h b/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h
index dbbd4806a..66e5a3dbb 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h
@@ -66,7 +66,6 @@ template<typename Derived>
inline typename MatrixBase<Derived>::EigenvaluesReturnType
MatrixBase<Derived>::eigenvalues() const
{
- typedef typename internal::traits<Derived>::Scalar Scalar;
return internal::eigenvalues_selector<Derived, NumTraits<Scalar>::IsComplex>::run(derived());
}
@@ -88,7 +87,6 @@ template<typename MatrixType, unsigned int UpLo>
EIGEN_DEVICE_FUNC inline typename SelfAdjointView<MatrixType, UpLo>::EigenvaluesReturnType
SelfAdjointView<MatrixType, UpLo>::eigenvalues() const
{
- typedef typename SelfAdjointView<MatrixType, UpLo>::PlainObject PlainObject;
PlainObject thisAsMatrix(*this);
return SelfAdjointEigenSolver<PlainObject>(thisAsMatrix, false).eigenvalues();
}
diff --git a/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/RealQZ.h b/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/RealQZ.h
index b3a910dd9..509130184 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/RealQZ.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/RealQZ.h
@@ -90,8 +90,9 @@ namespace Eigen {
m_Z(size, size),
m_workspace(size*2),
m_maxIters(400),
- m_isInitialized(false)
- { }
+ m_isInitialized(false),
+ m_computeQZ(true)
+ {}
/** \brief Constructor; computes real QZ decomposition of given matrices
*
@@ -108,9 +109,11 @@ namespace Eigen {
m_Z(A.rows(),A.cols()),
m_workspace(A.rows()*2),
m_maxIters(400),
- m_isInitialized(false) {
- compute(A, B, computeQZ);
- }
+ m_isInitialized(false),
+ m_computeQZ(true)
+ {
+ compute(A, B, computeQZ);
+ }
/** \brief Returns matrix Q in the QZ decomposition.
*
@@ -161,7 +164,7 @@ namespace Eigen {
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+ * \returns \c Success if computation was successful, \c NoConvergence otherwise.
*/
ComputationInfo info() const
{
diff --git a/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/RealSchur.h b/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/RealSchur.h
index f5c86041d..7304ef344 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/RealSchur.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/RealSchur.h
@@ -190,7 +190,7 @@ template<typename _MatrixType> class RealSchur
RealSchur& computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ, bool computeU);
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+ * \returns \c Success if computation was successful, \c NoConvergence otherwise.
*/
ComputationInfo info() const
{
@@ -236,7 +236,7 @@ template<typename _MatrixType> class RealSchur
typedef Matrix<Scalar,3,1> Vector3s;
Scalar computeNormOfT();
- Index findSmallSubdiagEntry(Index iu);
+ Index findSmallSubdiagEntry(Index iu, const Scalar& considerAsZero);
void splitOffTwoRows(Index iu, bool computeU, const Scalar& exshift);
void computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo);
void initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector);
@@ -270,8 +270,13 @@ RealSchur<MatrixType>& RealSchur<MatrixType>::compute(const EigenBase<InputType>
// Step 1. Reduce to Hessenberg form
m_hess.compute(matrix.derived()/scale);
- // Step 2. Reduce to real Schur form
- computeFromHessenberg(m_hess.matrixH(), m_hess.matrixQ(), computeU);
+ // Step 2. Reduce to real Schur form
+ // Note: we copy m_hess.matrixQ() into m_matU here and not in computeFromHessenberg
+ // to be able to pass our working-space buffer for the Householder to Dense evaluation.
+ m_workspaceVector.resize(matrix.cols());
+ if(computeU)
+ m_hess.matrixQ().evalTo(m_matU, m_workspaceVector);
+ computeFromHessenberg(m_hess.matrixH(), m_matU, computeU);
m_matT *= scale;
@@ -284,13 +289,13 @@ RealSchur<MatrixType>& RealSchur<MatrixType>::computeFromHessenberg(const HessMa
using std::abs;
m_matT = matrixH;
- if(computeU)
+ m_workspaceVector.resize(m_matT.cols());
+ if(computeU && !internal::is_same_dense(m_matU,matrixQ))
m_matU = matrixQ;
Index maxIters = m_maxIters;
if (maxIters == -1)
maxIters = m_maxIterationsPerRow * matrixH.rows();
- m_workspaceVector.resize(m_matT.cols());
Scalar* workspace = &m_workspaceVector.coeffRef(0);
// The matrix m_matT is divided in three parts.
@@ -302,12 +307,16 @@ RealSchur<MatrixType>& RealSchur<MatrixType>::computeFromHessenberg(const HessMa
Index totalIter = 0; // iteration count for whole matrix
Scalar exshift(0); // sum of exceptional shifts
Scalar norm = computeNormOfT();
+ // sub-diagonal entries smaller than considerAsZero will be treated as zero.
+ // We use eps^2 to enable more precision in small eigenvalues.
+ Scalar considerAsZero = numext::maxi<Scalar>( norm * numext::abs2(NumTraits<Scalar>::epsilon()),
+ (std::numeric_limits<Scalar>::min)() );
- if(norm!=0)
+ if(norm!=Scalar(0))
{
while (iu >= 0)
{
- Index il = findSmallSubdiagEntry(iu);
+ Index il = findSmallSubdiagEntry(iu,considerAsZero);
// Check for convergence
if (il == iu) // One root found
@@ -327,7 +336,7 @@ RealSchur<MatrixType>& RealSchur<MatrixType>::computeFromHessenberg(const HessMa
else // No convergence yet
{
// The firstHouseholderVector vector has to be initialized to something to get rid of a silly GCC warning (-O1 -Wall -DNDEBUG )
- Vector3s firstHouseholderVector(0,0,0), shiftInfo;
+ Vector3s firstHouseholderVector = Vector3s::Zero(), shiftInfo;
computeShift(iu, iter, exshift, shiftInfo);
iter = iter + 1;
totalIter = totalIter + 1;
@@ -364,14 +373,17 @@ inline typename MatrixType::Scalar RealSchur<MatrixType>::computeNormOfT()
/** \internal Look for single small sub-diagonal element and returns its index */
template<typename MatrixType>
-inline Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu)
+inline Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu, const Scalar& considerAsZero)
{
using std::abs;
Index res = iu;
while (res > 0)
{
Scalar s = abs(m_matT.coeff(res-1,res-1)) + abs(m_matT.coeff(res,res));
- if (abs(m_matT.coeff(res,res-1)) <= NumTraits<Scalar>::epsilon() * s)
+
+ s = numext::maxi<Scalar>(s * NumTraits<Scalar>::epsilon(), considerAsZero);
+
+ if (abs(m_matT.coeff(res,res-1)) <= s)
break;
res--;
}
diff --git a/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h b/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
index 9ddd553f2..14692365f 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
@@ -20,7 +20,9 @@ class GeneralizedSelfAdjointEigenSolver;
namespace internal {
template<typename SolverType,int Size,bool IsComplex> struct direct_selfadjoint_eigenvalues;
+
template<typename MatrixType, typename DiagType, typename SubDiagType>
+EIGEN_DEVICE_FUNC
ComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag, const Index maxIterations, bool computeEigenvectors, MatrixType& eivec);
}
@@ -42,10 +44,14 @@ ComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag
* \f$ v \f$ such that \f$ Av = \lambda v \f$. The eigenvalues of a
* selfadjoint matrix are always real. If \f$ D \f$ is a diagonal matrix with
* the eigenvalues on the diagonal, and \f$ V \f$ is a matrix with the
- * eigenvectors as its columns, then \f$ A = V D V^{-1} \f$ (for selfadjoint
- * matrices, the matrix \f$ V \f$ is always invertible). This is called the
+ * eigenvectors as its columns, then \f$ A = V D V^{-1} \f$. This is called the
* eigendecomposition.
*
+ * For a selfadjoint matrix, \f$ V \f$ is unitary, meaning its inverse is equal
+ * to its adjoint, \f$ V^{-1} = V^{\dagger} \f$. If \f$ A \f$ is real, then
+ * \f$ V \f$ is also real and therefore orthogonal, meaning its inverse is
+ * equal to its transpose, \f$ V^{-1} = V^T \f$.
+ *
* The algorithm exploits the fact that the matrix is selfadjoint, making it
* faster and more accurate than the general purpose eigenvalue algorithms
* implemented in EigenSolver and ComplexEigenSolver.
@@ -119,7 +125,10 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
: m_eivec(),
m_eivalues(),
m_subdiag(),
- m_isInitialized(false)
+ m_hcoeffs(),
+ m_info(InvalidInput),
+ m_isInitialized(false),
+ m_eigenvectorsOk(false)
{ }
/** \brief Constructor, pre-allocates memory for dynamic-size matrices.
@@ -139,7 +148,9 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
: m_eivec(size, size),
m_eivalues(size),
m_subdiag(size > 1 ? size - 1 : 1),
- m_isInitialized(false)
+ m_hcoeffs(size > 1 ? size - 1 : 1),
+ m_isInitialized(false),
+ m_eigenvectorsOk(false)
{}
/** \brief Constructor; computes eigendecomposition of given matrix.
@@ -163,7 +174,9 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
: m_eivec(matrix.rows(), matrix.cols()),
m_eivalues(matrix.cols()),
m_subdiag(matrix.rows() > 1 ? matrix.rows() - 1 : 1),
- m_isInitialized(false)
+ m_hcoeffs(matrix.cols() > 1 ? matrix.cols() - 1 : 1),
+ m_isInitialized(false),
+ m_eigenvectorsOk(false)
{
compute(matrix.derived(), options);
}
@@ -250,6 +263,11 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
* matrix \f$ A \f$, then the matrix returned by this function is the
* matrix \f$ V \f$ in the eigendecomposition \f$ A = V D V^{-1} \f$.
*
+ * For a selfadjoint matrix, \f$ V \f$ is unitary, meaning its inverse is equal
+ * to its adjoint, \f$ V^{-1} = V^{\dagger} \f$. If \f$ A \f$ is real, then
+ * \f$ V \f$ is also real and therefore orthogonal, meaning its inverse is
+ * equal to its transpose, \f$ V^{-1} = V^T \f$.
+ *
* Example: \include SelfAdjointEigenSolver_eigenvectors.cpp
* Output: \verbinclude SelfAdjointEigenSolver_eigenvectors.out
*
@@ -337,7 +355,7 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+ * \returns \c Success if computation was successful, \c NoConvergence otherwise.
*/
EIGEN_DEVICE_FUNC
ComputationInfo info() const
@@ -354,7 +372,8 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
static const int m_maxIterations = 30;
protected:
- static void check_template_parameters()
+ static EIGEN_DEVICE_FUNC
+ void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
@@ -362,6 +381,7 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
EigenvectorsType m_eivec;
RealVectorType m_eivalues;
typename TridiagonalizationType::SubDiagonalType m_subdiag;
+ typename TridiagonalizationType::CoeffVectorType m_hcoeffs;
ComputationInfo m_info;
bool m_isInitialized;
bool m_eigenvectorsOk;
@@ -403,7 +423,7 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
const InputType &matrix(a_matrix.derived());
- using std::abs;
+ EIGEN_USING_STD(abs);
eigen_assert(matrix.cols() == matrix.rows());
eigen_assert((options&~(EigVecMask|GenEigMask))==0
&& (options&EigVecMask)!=EigVecMask
@@ -434,7 +454,8 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
if(scale==RealScalar(0)) scale = RealScalar(1);
mat.template triangularView<Lower>() /= scale;
m_subdiag.resize(n-1);
- internal::tridiagonalization_inplace(mat, diag, m_subdiag, computeEigenvectors);
+ m_hcoeffs.resize(n-1);
+ internal::tridiagonalization_inplace(mat, diag, m_subdiag, m_hcoeffs, computeEigenvectors);
m_info = internal::computeFromTridiagonal_impl(diag, m_subdiag, m_maxIterations, computeEigenvectors, m_eivec);
@@ -479,10 +500,9 @@ namespace internal {
* \returns \c Success or \c NoConvergence
*/
template<typename MatrixType, typename DiagType, typename SubDiagType>
+EIGEN_DEVICE_FUNC
ComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag, const Index maxIterations, bool computeEigenvectors, MatrixType& eivec)
{
- using std::abs;
-
ComputationInfo info;
typedef typename MatrixType::Scalar Scalar;
@@ -493,15 +513,23 @@ ComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag
typedef typename DiagType::RealScalar RealScalar;
const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)();
- const RealScalar precision = RealScalar(2)*NumTraits<RealScalar>::epsilon();
-
+ const RealScalar precision_inv = RealScalar(1)/NumTraits<RealScalar>::epsilon();
while (end>0)
{
- for (Index i = start; i<end; ++i)
- if (internal::isMuchSmallerThan(abs(subdiag[i]),(abs(diag[i])+abs(diag[i+1])),precision) || abs(subdiag[i]) <= considerAsZero)
- subdiag[i] = 0;
+ for (Index i = start; i<end; ++i) {
+ if (numext::abs(subdiag[i]) < considerAsZero) {
+ subdiag[i] = RealScalar(0);
+ } else {
+ // abs(subdiag[i]) <= epsilon * sqrt(abs(diag[i]) + abs(diag[i+1]))
+ // Scaled to prevent underflows.
+ const RealScalar scaled_subdiag = precision_inv * subdiag[i];
+ if (scaled_subdiag * scaled_subdiag <= (numext::abs(diag[i])+numext::abs(diag[i+1]))) {
+ subdiag[i] = RealScalar(0);
+ }
+ }
+ }
- // find the largest unreduced block
+ // find the largest unreduced block at the end of the matrix.
while (end>0 && subdiag[end-1]==RealScalar(0))
{
end--;
@@ -535,7 +563,7 @@ ComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag
diag.segment(i,n-i).minCoeff(&k);
if (k > 0)
{
- std::swap(diag[i], diag[k+i]);
+ numext::swap(diag[i], diag[k+i]);
if(computeEigenvectors)
eivec.col(i).swap(eivec.col(k+i));
}
@@ -566,10 +594,10 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
EIGEN_DEVICE_FUNC
static inline void computeRoots(const MatrixType& m, VectorType& roots)
{
- EIGEN_USING_STD_MATH(sqrt)
- EIGEN_USING_STD_MATH(atan2)
- EIGEN_USING_STD_MATH(cos)
- EIGEN_USING_STD_MATH(sin)
+ EIGEN_USING_STD(sqrt)
+ EIGEN_USING_STD(atan2)
+ EIGEN_USING_STD(cos)
+ EIGEN_USING_STD(sin)
const Scalar s_inv3 = Scalar(1)/Scalar(3);
const Scalar s_sqrt3 = sqrt(Scalar(3));
@@ -605,7 +633,8 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
EIGEN_DEVICE_FUNC
static inline bool extract_kernel(MatrixType& mat, Ref<VectorType> res, Ref<VectorType> representative)
{
- using std::abs;
+ EIGEN_USING_STD(abs);
+ EIGEN_USING_STD(sqrt);
Index i0;
// Find non-zero column i0 (by construction, there must exist a non zero coefficient on the diagonal):
mat.diagonal().cwiseAbs().maxCoeff(&i0);
@@ -616,8 +645,8 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
VectorType c0, c1;
n0 = (c0 = representative.cross(mat.col((i0+1)%3))).squaredNorm();
n1 = (c1 = representative.cross(mat.col((i0+2)%3))).squaredNorm();
- if(n0>n1) res = c0/std::sqrt(n0);
- else res = c1/std::sqrt(n1);
+ if(n0>n1) res = c0/sqrt(n0);
+ else res = c1/sqrt(n1);
return true;
}
@@ -719,7 +748,7 @@ struct direct_selfadjoint_eigenvalues<SolverType,2,false>
EIGEN_DEVICE_FUNC
static inline void computeRoots(const MatrixType& m, VectorType& roots)
{
- using std::sqrt;
+ EIGEN_USING_STD(sqrt);
const Scalar t0 = Scalar(0.5) * sqrt( numext::abs2(m(0,0)-m(1,1)) + Scalar(4)*numext::abs2(m(1,0)));
const Scalar t1 = Scalar(0.5) * (m(0,0) + m(1,1));
roots(0) = t1 - t0;
@@ -729,8 +758,8 @@ struct direct_selfadjoint_eigenvalues<SolverType,2,false>
EIGEN_DEVICE_FUNC
static inline void run(SolverType& solver, const MatrixType& mat, int options)
{
- EIGEN_USING_STD_MATH(sqrt);
- EIGEN_USING_STD_MATH(abs);
+ EIGEN_USING_STD(sqrt);
+ EIGEN_USING_STD(abs);
eigen_assert(mat.cols() == 2 && mat.cols() == mat.rows());
eigen_assert((options&~(EigVecMask|GenEigMask))==0
@@ -803,32 +832,38 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
}
namespace internal {
+
+// Francis implicit QR step.
template<int StorageOrder,typename RealScalar, typename Scalar, typename Index>
EIGEN_DEVICE_FUNC
static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n)
{
- using std::abs;
+ // Wilkinson Shift.
RealScalar td = (diag[end-1] - diag[end])*RealScalar(0.5);
RealScalar e = subdiag[end-1];
// Note that thanks to scaling, e^2 or td^2 cannot overflow, however they can still
// underflow thus leading to inf/NaN values when using the following commented code:
-// RealScalar e2 = numext::abs2(subdiag[end-1]);
-// RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2));
+ // RealScalar e2 = numext::abs2(subdiag[end-1]);
+ // RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2));
// This explain the following, somewhat more complicated, version:
RealScalar mu = diag[end];
- if(td==RealScalar(0))
- mu -= abs(e);
- else
- {
- RealScalar e2 = numext::abs2(subdiag[end-1]);
- RealScalar h = numext::hypot(td,e);
- if(e2==RealScalar(0)) mu -= (e / (td + (td>RealScalar(0) ? RealScalar(1) : RealScalar(-1)))) * (e / h);
- else mu -= e2 / (td + (td>RealScalar(0) ? h : -h));
+ if(td==RealScalar(0)) {
+ mu -= numext::abs(e);
+ } else if (e != RealScalar(0)) {
+ const RealScalar e2 = numext::abs2(e);
+ const RealScalar h = numext::hypot(td,e);
+ if(e2 == RealScalar(0)) {
+ mu -= e / ((td + (td>RealScalar(0) ? h : -h)) / e);
+ } else {
+ mu -= e2 / (td + (td>RealScalar(0) ? h : -h));
+ }
}
-
+
RealScalar x = diag[start] - mu;
RealScalar z = subdiag[start];
- for (Index k = start; k < end; ++k)
+ // If z ever becomes zero, the Givens rotation will be the identity and
+ // z will stay zero for all future iterations.
+ for (Index k = start; k < end && z != RealScalar(0); ++k)
{
JacobiRotation<RealScalar> rot;
rot.makeGivens(x, z);
@@ -841,12 +876,11 @@ static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index sta
diag[k+1] = rot.s() * sdk + rot.c() * dkp1;
subdiag[k] = rot.c() * sdk - rot.s() * dkp1;
-
if (k > start)
subdiag[k - 1] = rot.c() * subdiag[k-1] - rot.s() * z;
+ // "Chasing the bulge" to return to triangular form.
x = subdiag[k];
-
if (k < end - 1)
{
z = -rot.s() * subdiag[k+1];
diff --git a/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h b/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h
index 3891cf883..b0c947dc0 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h
@@ -37,7 +37,7 @@ namespace Eigen {
/** \internal Specialization for the data types supported by LAPACKe */
-#define EIGEN_LAPACKE_EIG_SELFADJ(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_NAME, EIGCOLROW, LAPACKE_COLROW ) \
+#define EIGEN_LAPACKE_EIG_SELFADJ_2(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_NAME, EIGCOLROW ) \
template<> template<typename InputType> inline \
SelfAdjointEigenSolver<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >& \
SelfAdjointEigenSolver<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const EigenBase<InputType>& matrix, int options) \
@@ -47,7 +47,7 @@ SelfAdjointEigenSolver<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(c
&& (options&EigVecMask)!=EigVecMask \
&& "invalid option parameter"); \
bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors; \
- lapack_int n = internal::convert_index<lapack_int>(matrix.cols()), lda, matrix_order, info; \
+ lapack_int n = internal::convert_index<lapack_int>(matrix.cols()), lda, info; \
m_eivalues.resize(n,1); \
m_subdiag.resize(n-1); \
m_eivec = matrix; \
@@ -63,27 +63,24 @@ SelfAdjointEigenSolver<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(c
} \
\
lda = internal::convert_index<lapack_int>(m_eivec.outerStride()); \
- matrix_order=LAPACKE_COLROW; \
char jobz, uplo='L'/*, range='A'*/; \
jobz = computeEigenvectors ? 'V' : 'N'; \
\
- info = LAPACKE_##LAPACKE_NAME( matrix_order, jobz, uplo, n, (LAPACKE_TYPE*)m_eivec.data(), lda, (LAPACKE_RTYPE*)m_eivalues.data() ); \
+ info = LAPACKE_##LAPACKE_NAME( LAPACK_COL_MAJOR, jobz, uplo, n, (LAPACKE_TYPE*)m_eivec.data(), lda, (LAPACKE_RTYPE*)m_eivalues.data() ); \
m_info = (info==0) ? Success : NoConvergence; \
m_isInitialized = true; \
m_eigenvectorsOk = computeEigenvectors; \
return *this; \
}
+#define EIGEN_LAPACKE_EIG_SELFADJ(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_NAME ) \
+ EIGEN_LAPACKE_EIG_SELFADJ_2(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_NAME, ColMajor ) \
+ EIGEN_LAPACKE_EIG_SELFADJ_2(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_NAME, RowMajor )
-EIGEN_LAPACKE_EIG_SELFADJ(double, double, double, dsyev, ColMajor, LAPACK_COL_MAJOR)
-EIGEN_LAPACKE_EIG_SELFADJ(float, float, float, ssyev, ColMajor, LAPACK_COL_MAJOR)
-EIGEN_LAPACKE_EIG_SELFADJ(dcomplex, lapack_complex_double, double, zheev, ColMajor, LAPACK_COL_MAJOR)
-EIGEN_LAPACKE_EIG_SELFADJ(scomplex, lapack_complex_float, float, cheev, ColMajor, LAPACK_COL_MAJOR)
-
-EIGEN_LAPACKE_EIG_SELFADJ(double, double, double, dsyev, RowMajor, LAPACK_ROW_MAJOR)
-EIGEN_LAPACKE_EIG_SELFADJ(float, float, float, ssyev, RowMajor, LAPACK_ROW_MAJOR)
-EIGEN_LAPACKE_EIG_SELFADJ(dcomplex, lapack_complex_double, double, zheev, RowMajor, LAPACK_ROW_MAJOR)
-EIGEN_LAPACKE_EIG_SELFADJ(scomplex, lapack_complex_float, float, cheev, RowMajor, LAPACK_ROW_MAJOR)
+EIGEN_LAPACKE_EIG_SELFADJ(double, double, double, dsyev)
+EIGEN_LAPACKE_EIG_SELFADJ(float, float, float, ssyev)
+EIGEN_LAPACKE_EIG_SELFADJ(dcomplex, lapack_complex_double, double, zheev)
+EIGEN_LAPACKE_EIG_SELFADJ(scomplex, lapack_complex_float, float, cheev)
} // end namespace Eigen
diff --git a/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/Tridiagonalization.h b/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/Tridiagonalization.h
index 1d102c17b..eda82794a 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/Tridiagonalization.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Eigenvalues/Tridiagonalization.h
@@ -11,10 +11,10 @@
#ifndef EIGEN_TRIDIAGONALIZATION_H
#define EIGEN_TRIDIAGONALIZATION_H
-namespace Eigen {
+namespace Eigen {
namespace internal {
-
+
template<typename MatrixType> struct TridiagonalizationMatrixTReturnType;
template<typename MatrixType>
struct traits<TridiagonalizationMatrixTReturnType<MatrixType> >
@@ -25,6 +25,7 @@ struct traits<TridiagonalizationMatrixTReturnType<MatrixType> >
};
template<typename MatrixType, typename CoeffVectorType>
+EIGEN_DEVICE_FUNC
void tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs);
}
@@ -344,6 +345,7 @@ namespace internal {
* \sa Tridiagonalization::packedMatrix()
*/
template<typename MatrixType, typename CoeffVectorType>
+EIGEN_DEVICE_FUNC
void tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs)
{
using numext::conj;
@@ -352,7 +354,7 @@ void tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs)
Index n = matA.rows();
eigen_assert(n==matA.cols());
eigen_assert(n==hCoeffs.size()+1 || n==1);
-
+
for (Index i = 0; i<n-1; ++i)
{
Index remainingSize = n-i-1;
@@ -423,11 +425,13 @@ struct tridiagonalization_inplace_selector;
*
* \sa class Tridiagonalization
*/
-template<typename MatrixType, typename DiagonalType, typename SubDiagonalType>
-void tridiagonalization_inplace(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
+template<typename MatrixType, typename DiagonalType, typename SubDiagonalType, typename CoeffVectorType>
+EIGEN_DEVICE_FUNC
+void tridiagonalization_inplace(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag,
+ CoeffVectorType& hcoeffs, bool extractQ)
{
eigen_assert(mat.cols()==mat.rows() && diag.size()==mat.rows() && subdiag.size()==mat.rows()-1);
- tridiagonalization_inplace_selector<MatrixType>::run(mat, diag, subdiag, extractQ);
+ tridiagonalization_inplace_selector<MatrixType>::run(mat, diag, subdiag, hcoeffs, extractQ);
}
/** \internal
@@ -436,13 +440,12 @@ void tridiagonalization_inplace(MatrixType& mat, DiagonalType& diag, SubDiagonal
template<typename MatrixType, int Size, bool IsComplex>
struct tridiagonalization_inplace_selector
{
- typedef typename Tridiagonalization<MatrixType>::CoeffVectorType CoeffVectorType;
typedef typename Tridiagonalization<MatrixType>::HouseholderSequenceType HouseholderSequenceType;
- template<typename DiagonalType, typename SubDiagonalType>
- static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
+ template<typename DiagonalType, typename SubDiagonalType, typename CoeffVectorType>
+ static EIGEN_DEVICE_FUNC
+ void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, CoeffVectorType& hCoeffs, bool extractQ)
{
- CoeffVectorType hCoeffs(mat.cols()-1);
- tridiagonalization_inplace(mat,hCoeffs);
+ tridiagonalization_inplace(mat, hCoeffs);
diag = mat.diagonal().real();
subdiag = mat.template diagonal<-1>().real();
if(extractQ)
@@ -462,8 +465,8 @@ struct tridiagonalization_inplace_selector<MatrixType,3,false>
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
- template<typename DiagonalType, typename SubDiagonalType>
- static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
+ template<typename DiagonalType, typename SubDiagonalType, typename CoeffVectorType>
+ static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, CoeffVectorType&, bool extractQ)
{
using std::sqrt;
const RealScalar tol = (std::numeric_limits<RealScalar>::min)();
@@ -507,8 +510,9 @@ struct tridiagonalization_inplace_selector<MatrixType,1,IsComplex>
{
typedef typename MatrixType::Scalar Scalar;
- template<typename DiagonalType, typename SubDiagonalType>
- static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType&, bool extractQ)
+ template<typename DiagonalType, typename SubDiagonalType, typename CoeffVectorType>
+ static EIGEN_DEVICE_FUNC
+ void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType&, CoeffVectorType&, bool extractQ)
{
diag(0,0) = numext::real(mat(0,0));
if(extractQ)
@@ -542,8 +546,8 @@ template<typename MatrixType> struct TridiagonalizationMatrixTReturnType
result.template diagonal<-1>() = m_matrix.template diagonal<-1>();
}
- Index rows() const { return m_matrix.rows(); }
- Index cols() const { return m_matrix.cols(); }
+ EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_matrix.rows(); }
+ EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_matrix.cols(); }
protected:
typename MatrixType::Nested m_matrix;
diff --git a/examples/ThirdPartyLibs/Eigen/src/Geometry/AlignedBox.h b/examples/ThirdPartyLibs/Eigen/src/Geometry/AlignedBox.h
index c902d8f0a..55a9d0ae1 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Geometry/AlignedBox.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Geometry/AlignedBox.h
@@ -7,10 +7,46 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+// Function void Eigen::AlignedBox::transform(const Transform& transform)
+// is provided under the following license agreement:
+//
+// Software License Agreement (BSD License)
+//
+// Copyright (c) 2011-2014, Willow Garage, Inc.
+// Copyright (c) 2014-2015, Open Source Robotics Foundation
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Open Source Robotics Foundation nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
#ifndef EIGEN_ALIGNEDBOX_H
#define EIGEN_ALIGNEDBOX_H
-namespace Eigen {
+namespace Eigen {
/** \geometry_module \ingroup Geometry_Module
*
@@ -231,7 +267,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
{return AlignedBox(m_min.cwiseMax(b.m_min), m_max.cwiseMin(b.m_max)); }
/** Returns an AlignedBox that is the union of \a b and \c *this.
- * \note Merging with an empty box may result in a box bigger than \c *this.
+ * \note Merging with an empty box may result in a box bigger than \c *this.
* \sa extend(const AlignedBox&) */
EIGEN_DEVICE_FUNC inline AlignedBox merged(const AlignedBox& b) const
{ return AlignedBox(m_min.cwiseMin(b.m_min), m_max.cwiseMax(b.m_max)); }
@@ -246,6 +282,15 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
return *this;
}
+ /** \returns a copy of \c *this translated by the vector \a t. */
+ template<typename Derived>
+ EIGEN_DEVICE_FUNC inline AlignedBox translated(const MatrixBase<Derived>& a_t) const
+ {
+ AlignedBox result(m_min, m_max);
+ result.translate(a_t);
+ return result;
+ }
+
/** \returns the squared distance between the point \a p and the box \c *this,
* and zero if \a p is inside the box.
* \sa exteriorDistance(const MatrixBase&), squaredExteriorDistance(const AlignedBox&)
@@ -265,14 +310,63 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
*/
template<typename Derived>
EIGEN_DEVICE_FUNC inline NonInteger exteriorDistance(const MatrixBase<Derived>& p) const
- { EIGEN_USING_STD_MATH(sqrt) return sqrt(NonInteger(squaredExteriorDistance(p))); }
+ { EIGEN_USING_STD(sqrt) return sqrt(NonInteger(squaredExteriorDistance(p))); }
/** \returns the distance between the boxes \a b and \c *this,
* and zero if the boxes intersect.
* \sa squaredExteriorDistance(const AlignedBox&), exteriorDistance(const MatrixBase&)
*/
EIGEN_DEVICE_FUNC inline NonInteger exteriorDistance(const AlignedBox& b) const
- { EIGEN_USING_STD_MATH(sqrt) return sqrt(NonInteger(squaredExteriorDistance(b))); }
+ { EIGEN_USING_STD(sqrt) return sqrt(NonInteger(squaredExteriorDistance(b))); }
+
+ /**
+ * Specialization of transform for pure translation.
+ */
+ template<int Mode, int Options>
+ EIGEN_DEVICE_FUNC inline void transform(
+ const typename Transform<Scalar, AmbientDimAtCompileTime, Mode, Options>::TranslationType& translation)
+ {
+ this->translate(translation);
+ }
+
+ /**
+ * Transforms this box by \a transform and recomputes it to
+ * still be an axis-aligned box.
+ *
+ * \note This method is provided under BSD license (see the top of this file).
+ */
+ template<int Mode, int Options>
+ EIGEN_DEVICE_FUNC inline void transform(const Transform<Scalar, AmbientDimAtCompileTime, Mode, Options>& transform)
+ {
+ // Only Affine and Isometry transforms are currently supported.
+ EIGEN_STATIC_ASSERT(Mode == Affine || Mode == AffineCompact || Mode == Isometry, THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS);
+
+ // Method adapted from FCL src/shape/geometric_shapes_utility.cpp#computeBV<AABB, Box>(...)
+ // https://github.com/flexible-collision-library/fcl/blob/fcl-0.4/src/shape/geometric_shapes_utility.cpp#L292
+ //
+ // Here's a nice explanation why it works: https://zeuxcg.org/2010/10/17/aabb-from-obb-with-component-wise-abs/
+
+ // two times rotated extent
+ const VectorType rotated_extent_2 = transform.linear().cwiseAbs() * sizes();
+ // two times new center
+ const VectorType rotated_center_2 = transform.linear() * (this->m_max + this->m_min) +
+ Scalar(2) * transform.translation();
+
+ this->m_max = (rotated_center_2 + rotated_extent_2) / Scalar(2);
+ this->m_min = (rotated_center_2 - rotated_extent_2) / Scalar(2);
+ }
+
+ /**
+ * \returns a copy of \c *this transformed by \a transform and recomputed to
+ * still be an axis-aligned box.
+ */
+ template<int Mode, int Options>
+ EIGEN_DEVICE_FUNC AlignedBox transformed(const Transform<Scalar, AmbientDimAtCompileTime, Mode, Options>& transform) const
+ {
+ AlignedBox result(m_min, m_max);
+ result.transform(transform);
+ return result;
+ }
/** \returns \c *this with scalar type casted to \a NewScalarType
*
diff --git a/examples/ThirdPartyLibs/Eigen/src/Geometry/AngleAxis.h b/examples/ThirdPartyLibs/Eigen/src/Geometry/AngleAxis.h
index 83ee1be46..78328b6b5 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Geometry/AngleAxis.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Geometry/AngleAxis.h
@@ -169,8 +169,8 @@ template<typename Scalar>
template<typename QuatDerived>
EIGEN_DEVICE_FUNC AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const QuaternionBase<QuatDerived>& q)
{
- EIGEN_USING_STD_MATH(atan2)
- EIGEN_USING_STD_MATH(abs)
+ EIGEN_USING_STD(atan2)
+ EIGEN_USING_STD(abs)
Scalar n = q.vec().norm();
if(n<NumTraits<Scalar>::epsilon())
n = q.vec().stableNorm();
@@ -217,8 +217,8 @@ template<typename Scalar>
typename AngleAxis<Scalar>::Matrix3
EIGEN_DEVICE_FUNC AngleAxis<Scalar>::toRotationMatrix(void) const
{
- EIGEN_USING_STD_MATH(sin)
- EIGEN_USING_STD_MATH(cos)
+ EIGEN_USING_STD(sin)
+ EIGEN_USING_STD(cos)
Matrix3 res;
Vector3 sin_axis = sin(m_angle) * m_axis;
Scalar c = cos(m_angle);
diff --git a/examples/ThirdPartyLibs/Eigen/src/Geometry/EulerAngles.h b/examples/ThirdPartyLibs/Eigen/src/Geometry/EulerAngles.h
index c633268af..19b734ca7 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Geometry/EulerAngles.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Geometry/EulerAngles.h
@@ -36,9 +36,9 @@ template<typename Derived>
EIGEN_DEVICE_FUNC inline Matrix<typename MatrixBase<Derived>::Scalar,3,1>
MatrixBase<Derived>::eulerAngles(Index a0, Index a1, Index a2) const
{
- EIGEN_USING_STD_MATH(atan2)
- EIGEN_USING_STD_MATH(sin)
- EIGEN_USING_STD_MATH(cos)
+ EIGEN_USING_STD(atan2)
+ EIGEN_USING_STD(sin)
+ EIGEN_USING_STD(cos)
/* Implemented from Graphics Gems IV */
EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived,3,3)
diff --git a/examples/ThirdPartyLibs/Eigen/src/Geometry/Homogeneous.h b/examples/ThirdPartyLibs/Eigen/src/Geometry/Homogeneous.h
index 5f0da1a9e..94083ac54 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Geometry/Homogeneous.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Geometry/Homogeneous.h
@@ -10,7 +10,7 @@
#ifndef EIGEN_HOMOGENEOUS_H
#define EIGEN_HOMOGENEOUS_H
-namespace Eigen {
+namespace Eigen {
/** \geometry_module \ingroup Geometry_Module
*
@@ -72,9 +72,11 @@ template<typename MatrixType,int _Direction> class Homogeneous
: m_matrix(matrix)
{}
- EIGEN_DEVICE_FUNC inline Index rows() const { return m_matrix.rows() + (int(Direction)==Vertical ? 1 : 0); }
- EIGEN_DEVICE_FUNC inline Index cols() const { return m_matrix.cols() + (int(Direction)==Horizontal ? 1 : 0); }
-
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index rows() const EIGEN_NOEXCEPT { return m_matrix.rows() + (int(Direction)==Vertical ? 1 : 0); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index cols() const EIGEN_NOEXCEPT { return m_matrix.cols() + (int(Direction)==Horizontal ? 1 : 0); }
+
EIGEN_DEVICE_FUNC const NestedExpression& nestedExpression() const { return m_matrix; }
template<typename Rhs>
@@ -262,8 +264,10 @@ struct homogeneous_left_product_impl<Homogeneous<MatrixType,Vertical>,Lhs>
m_rhs(rhs)
{}
- EIGEN_DEVICE_FUNC inline Index rows() const { return m_lhs.rows(); }
- EIGEN_DEVICE_FUNC inline Index cols() const { return m_rhs.cols(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index rows() const EIGEN_NOEXCEPT { return m_lhs.rows(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index cols() const EIGEN_NOEXCEPT { return m_rhs.cols(); }
template<typename Dest> EIGEN_DEVICE_FUNC void evalTo(Dest& dst) const
{
@@ -300,8 +304,8 @@ struct homogeneous_right_product_impl<Homogeneous<MatrixType,Horizontal>,Rhs>
: m_lhs(lhs), m_rhs(rhs)
{}
- EIGEN_DEVICE_FUNC inline Index rows() const { return m_lhs.rows(); }
- EIGEN_DEVICE_FUNC inline Index cols() const { return m_rhs.cols(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const EIGEN_NOEXCEPT { return m_lhs.rows(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { return m_rhs.cols(); }
template<typename Dest> EIGEN_DEVICE_FUNC void evalTo(Dest& dst) const
{
@@ -322,7 +326,7 @@ template<typename ArgType,int Direction>
struct evaluator_traits<Homogeneous<ArgType,Direction> >
{
typedef typename storage_kind_to_evaluator_kind<typename ArgType::StorageKind>::Kind Kind;
- typedef HomogeneousShape Shape;
+ typedef HomogeneousShape Shape;
};
template<> struct AssignmentKind<DenseShape,HomogeneousShape> { typedef Dense2Dense Kind; };
@@ -414,7 +418,7 @@ struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, Homogeneous
typedef typename helper::ConstantBlock ConstantBlock;
typedef typename helper::Xpr RefactoredXpr;
typedef evaluator<RefactoredXpr> Base;
-
+
EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr)
: Base( xpr.lhs().nestedExpression() .lazyProduct( xpr.rhs().template topRows<helper::Dim>(xpr.lhs().nestedExpression().cols()) )
+ ConstantBlock(xpr.rhs().row(xpr.rhs().rows()-1),xpr.lhs().rows(), 1) )
@@ -467,7 +471,7 @@ struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape,
typedef typename helper::ConstantBlock ConstantBlock;
typedef typename helper::Xpr RefactoredXpr;
typedef evaluator<RefactoredXpr> Base;
-
+
EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr)
: Base( xpr.lhs().template leftCols<helper::Dim>(xpr.rhs().nestedExpression().rows()) .lazyProduct( xpr.rhs().nestedExpression() )
+ ConstantBlock(xpr.lhs().col(xpr.lhs().cols()-1),1,xpr.rhs().cols()) )
diff --git a/examples/ThirdPartyLibs/Eigen/src/Geometry/Hyperplane.h b/examples/ThirdPartyLibs/Eigen/src/Geometry/Hyperplane.h
index 05929b299..cebe03557 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Geometry/Hyperplane.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Geometry/Hyperplane.h
@@ -119,7 +119,7 @@ public:
* If the dimension of the ambient space is greater than 2, then there isn't uniqueness,
* so an arbitrary choice is made.
*/
- // FIXME to be consitent with the rest this could be implemented as a static Through function ??
+ // FIXME to be consistent with the rest this could be implemented as a static Through function ??
EIGEN_DEVICE_FUNC explicit Hyperplane(const ParametrizedLine<Scalar, AmbientDimAtCompileTime>& parametrized)
{
normal() = parametrized.direction().unitOrthogonal();
diff --git a/examples/ThirdPartyLibs/Eigen/src/Geometry/OrthoMethods.h b/examples/ThirdPartyLibs/Eigen/src/Geometry/OrthoMethods.h
index a035e6310..524aebe1b 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Geometry/OrthoMethods.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Geometry/OrthoMethods.h
@@ -27,9 +27,10 @@ namespace Eigen {
template<typename Derived>
template<typename OtherDerived>
#ifndef EIGEN_PARSED_BY_DOXYGEN
-EIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::template cross_product_return_type<OtherDerived>::type
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename MatrixBase<Derived>::template cross_product_return_type<OtherDerived>::type
#else
-inline typename MatrixBase<Derived>::PlainObject
+typename MatrixBase<Derived>::PlainObject
#endif
MatrixBase<Derived>::cross(const MatrixBase<OtherDerived>& other) const
{
diff --git a/examples/ThirdPartyLibs/Eigen/src/Geometry/ParametrizedLine.h b/examples/ThirdPartyLibs/Eigen/src/Geometry/ParametrizedLine.h
index 3929ca87f..584f50087 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Geometry/ParametrizedLine.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Geometry/ParametrizedLine.h
@@ -87,7 +87,7 @@ public:
/** \returns the distance of a point \a p to its projection onto the line \c *this.
* \sa squaredDistance()
*/
- EIGEN_DEVICE_FUNC RealScalar distance(const VectorType& p) const { EIGEN_USING_STD_MATH(sqrt) return sqrt(squaredDistance(p)); }
+ EIGEN_DEVICE_FUNC RealScalar distance(const VectorType& p) const { EIGEN_USING_STD(sqrt) return sqrt(squaredDistance(p)); }
/** \returns the projection of a point \a p onto the line \c *this. */
EIGEN_DEVICE_FUNC VectorType projection(const VectorType& p) const
diff --git a/examples/ThirdPartyLibs/Eigen/src/Geometry/Quaternion.h b/examples/ThirdPartyLibs/Eigen/src/Geometry/Quaternion.h
index c3fd8c3e0..3259e592d 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Geometry/Quaternion.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Geometry/Quaternion.h
@@ -141,7 +141,7 @@ class QuaternionBase : public RotationBase<Derived, 3>
template<class OtherDerived> EIGEN_DEVICE_FUNC Scalar angularDistance(const QuaternionBase<OtherDerived>& other) const;
/** \returns an equivalent 3x3 rotation matrix */
- EIGEN_DEVICE_FUNC Matrix3 toRotationMatrix() const;
+ EIGEN_DEVICE_FUNC inline Matrix3 toRotationMatrix() const;
/** \returns the quaternion which transform \a a into \a b through a rotation */
template<typename Derived1, typename Derived2>
@@ -158,6 +158,22 @@ class QuaternionBase : public RotationBase<Derived, 3>
template<class OtherDerived> EIGEN_DEVICE_FUNC Quaternion<Scalar> slerp(const Scalar& t, const QuaternionBase<OtherDerived>& other) const;
+ /** \returns true if each coefficients of \c *this and \a other are all exactly equal.
+ * \warning When using floating point scalar values you probably should rather use a
+ * fuzzy comparison such as isApprox()
+ * \sa isApprox(), operator!= */
+ template<class OtherDerived>
+ EIGEN_DEVICE_FUNC inline bool operator==(const QuaternionBase<OtherDerived>& other) const
+ { return coeffs() == other.coeffs(); }
+
+ /** \returns true if at least one pair of coefficients of \c *this and \a other are not exactly equal to each other.
+ * \warning When using floating point scalar values you probably should rather use a
+ * fuzzy comparison such as isApprox()
+ * \sa isApprox(), operator== */
+ template<class OtherDerived>
+ EIGEN_DEVICE_FUNC inline bool operator!=(const QuaternionBase<OtherDerived>& other) const
+ { return coeffs() != other.coeffs(); }
+
/** \returns \c true if \c *this is approximately equal to \a other, within the precision
* determined by \a prec.
*
@@ -169,20 +185,45 @@ class QuaternionBase : public RotationBase<Derived, 3>
/** return the result vector of \a v through the rotation*/
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Vector3 _transformVector(const Vector3& v) const;
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
/** \returns \c *this with scalar type casted to \a NewScalarType
*
* Note that if \a NewScalarType is equal to the current scalar type of \c *this
* then this function smartly returns a const reference to \c *this.
*/
template<typename NewScalarType>
- EIGEN_DEVICE_FUNC inline typename internal::cast_return_type<Derived,Quaternion<NewScalarType> >::type cast() const
+ EIGEN_DEVICE_FUNC inline typename internal::cast_return_type<Derived,Quaternion<NewScalarType> >::type cast() const;
+
+ #else
+
+ template<typename NewScalarType>
+ EIGEN_DEVICE_FUNC inline
+ typename internal::enable_if<internal::is_same<Scalar,NewScalarType>::value,const Derived&>::type cast() const
{
- return typename internal::cast_return_type<Derived,Quaternion<NewScalarType> >::type(derived());
+ return derived();
}
+ template<typename NewScalarType>
+ EIGEN_DEVICE_FUNC inline
+ typename internal::enable_if<!internal::is_same<Scalar,NewScalarType>::value,Quaternion<NewScalarType> >::type cast() const
+ {
+ return Quaternion<NewScalarType>(coeffs().template cast<NewScalarType>());
+ }
+ #endif
+
+#ifndef EIGEN_NO_IO
+ friend std::ostream& operator<<(std::ostream& s, const QuaternionBase<Derived>& q) {
+ s << q.x() << "i + " << q.y() << "j + " << q.z() << "k" << " + " << q.w();
+ return s;
+ }
+#endif
+
#ifdef EIGEN_QUATERNIONBASE_PLUGIN
# include EIGEN_QUATERNIONBASE_PLUGIN
#endif
+protected:
+ EIGEN_DEFAULT_COPY_CONSTRUCTOR(QuaternionBase)
+ EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(QuaternionBase)
};
/***************************************************************************
@@ -276,6 +317,21 @@ public:
EIGEN_DEVICE_FUNC explicit inline Quaternion(const Quaternion<OtherScalar, OtherOptions>& other)
{ m_coeffs = other.coeffs().template cast<Scalar>(); }
+#if EIGEN_HAS_RVALUE_REFERENCES
+ // We define a copy constructor, which means we don't get an implicit move constructor or assignment operator.
+ /** Default move constructor */
+ EIGEN_DEVICE_FUNC inline Quaternion(Quaternion&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_constructible<Scalar>::value)
+ : m_coeffs(std::move(other.coeffs()))
+ {}
+
+ /** Default move assignment operator */
+ EIGEN_DEVICE_FUNC Quaternion& operator=(Quaternion&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_assignable<Scalar>::value)
+ {
+ m_coeffs = std::move(other.coeffs());
+ return *this;
+ }
+#endif
+
EIGEN_DEVICE_FUNC static Quaternion UnitRandom();
template<typename Derived1, typename Derived2>
@@ -504,8 +560,8 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator
template<class Derived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator=(const AngleAxisType& aa)
{
- EIGEN_USING_STD_MATH(cos)
- EIGEN_USING_STD_MATH(sin)
+ EIGEN_USING_STD(cos)
+ EIGEN_USING_STD(sin)
Scalar ha = Scalar(0.5)*aa.angle(); // Scalar(0.5) to suppress precision loss warnings
this->w() = cos(ha);
this->vec() = sin(ha) * aa.axis();
@@ -581,7 +637,7 @@ template<class Derived>
template<typename Derived1, typename Derived2>
EIGEN_DEVICE_FUNC inline Derived& QuaternionBase<Derived>::setFromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b)
{
- EIGEN_USING_STD_MATH(sqrt)
+ EIGEN_USING_STD(sqrt)
Vector3 v0 = a.normalized();
Vector3 v1 = b.normalized();
Scalar c = v1.dot(v0);
@@ -622,13 +678,13 @@ EIGEN_DEVICE_FUNC inline Derived& QuaternionBase<Derived>::setFromTwoVectors(con
template<typename Scalar, int Options>
EIGEN_DEVICE_FUNC Quaternion<Scalar,Options> Quaternion<Scalar,Options>::UnitRandom()
{
- EIGEN_USING_STD_MATH(sqrt)
- EIGEN_USING_STD_MATH(sin)
- EIGEN_USING_STD_MATH(cos)
+ EIGEN_USING_STD(sqrt)
+ EIGEN_USING_STD(sin)
+ EIGEN_USING_STD(cos)
const Scalar u1 = internal::random<Scalar>(0, 1),
u2 = internal::random<Scalar>(0, 2*EIGEN_PI),
u3 = internal::random<Scalar>(0, 2*EIGEN_PI);
- const Scalar a = sqrt(1 - u1),
+ const Scalar a = sqrt(Scalar(1) - u1),
b = sqrt(u1);
return Quaternion (a * sin(u2), a * cos(u2), b * sin(u3), b * cos(u3));
}
@@ -707,7 +763,7 @@ template <class OtherDerived>
EIGEN_DEVICE_FUNC inline typename internal::traits<Derived>::Scalar
QuaternionBase<Derived>::angularDistance(const QuaternionBase<OtherDerived>& other) const
{
- EIGEN_USING_STD_MATH(atan2)
+ EIGEN_USING_STD(atan2)
Quaternion<Scalar> d = (*this) * other.conjugate();
return Scalar(2) * atan2( d.vec().norm(), numext::abs(d.w()) );
}
@@ -725,8 +781,8 @@ template <class OtherDerived>
EIGEN_DEVICE_FUNC Quaternion<typename internal::traits<Derived>::Scalar>
QuaternionBase<Derived>::slerp(const Scalar& t, const QuaternionBase<OtherDerived>& other) const
{
- EIGEN_USING_STD_MATH(acos)
- EIGEN_USING_STD_MATH(sin)
+ EIGEN_USING_STD(acos)
+ EIGEN_USING_STD(sin)
const Scalar one = Scalar(1) - NumTraits<Scalar>::epsilon();
Scalar d = this->dot(other);
Scalar absD = numext::abs(d);
@@ -763,7 +819,7 @@ struct quaternionbase_assign_impl<Other,3,3>
template<class Derived> EIGEN_DEVICE_FUNC static inline void run(QuaternionBase<Derived>& q, const Other& a_mat)
{
const typename internal::nested_eval<Other,2>::type mat(a_mat);
- EIGEN_USING_STD_MATH(sqrt)
+ EIGEN_USING_STD(sqrt)
// This algorithm comes from "Quaternion Calculus and Fast Animation",
// Ken Shoemake, 1987 SIGGRAPH course notes
Scalar t = mat.trace();
diff --git a/examples/ThirdPartyLibs/Eigen/src/Geometry/Rotation2D.h b/examples/ThirdPartyLibs/Eigen/src/Geometry/Rotation2D.h
index 884b7d0ee..d0bd57569 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Geometry/Rotation2D.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Geometry/Rotation2D.h
@@ -175,7 +175,7 @@ template<typename Scalar>
template<typename Derived>
EIGEN_DEVICE_FUNC Rotation2D<Scalar>& Rotation2D<Scalar>::fromRotationMatrix(const MatrixBase<Derived>& mat)
{
- EIGEN_USING_STD_MATH(atan2)
+ EIGEN_USING_STD(atan2)
EIGEN_STATIC_ASSERT(Derived::RowsAtCompileTime==2 && Derived::ColsAtCompileTime==2,YOU_MADE_A_PROGRAMMING_MISTAKE)
m_angle = atan2(mat.coeff(1,0), mat.coeff(0,0));
return *this;
@@ -187,8 +187,8 @@ template<typename Scalar>
typename Rotation2D<Scalar>::Matrix2
EIGEN_DEVICE_FUNC Rotation2D<Scalar>::toRotationMatrix(void) const
{
- EIGEN_USING_STD_MATH(sin)
- EIGEN_USING_STD_MATH(cos)
+ EIGEN_USING_STD(sin)
+ EIGEN_USING_STD(cos)
Scalar sinA = sin(m_angle);
Scalar cosA = cos(m_angle);
return (Matrix2() << cosA, -sinA, sinA, cosA).finished();
diff --git a/examples/ThirdPartyLibs/Eigen/src/Geometry/Scaling.h b/examples/ThirdPartyLibs/Eigen/src/Geometry/Scaling.h
index f58ca03d9..d352f1f2b 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Geometry/Scaling.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Geometry/Scaling.h
@@ -14,7 +14,7 @@ namespace Eigen {
/** \geometry_module \ingroup Geometry_Module
*
- * \class Scaling
+ * \class UniformScaling
*
* \brief Represents a generic uniform scaling transformation
*
@@ -29,6 +29,22 @@ namespace Eigen {
*
* \sa Scaling(), class DiagonalMatrix, MatrixBase::asDiagonal(), class Translation, class Transform
*/
+
+namespace internal
+{
+ // This helper helps nvcc+MSVC to properly parse this file.
+ // See bug 1412.
+ template <typename Scalar, int Dim, int Mode>
+ struct uniformscaling_times_affine_returntype
+ {
+ enum
+ {
+ NewMode = int(Mode) == int(Isometry) ? Affine : Mode
+ };
+ typedef Transform <Scalar, Dim, NewMode> type;
+ };
+}
+
template<typename _Scalar>
class UniformScaling
{
@@ -60,9 +76,11 @@ public:
/** Concatenates a uniform scaling and an affine transformation */
template<int Dim, int Mode, int Options>
- inline Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Mode)> operator* (const Transform<Scalar,Dim, Mode, Options>& t) const
+ inline typename
+ internal::uniformscaling_times_affine_returntype<Scalar,Dim,Mode>::type
+ operator* (const Transform<Scalar, Dim, Mode, Options>& t) const
{
- Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Mode)> res = t;
+ typename internal::uniformscaling_times_affine_returntype<Scalar,Dim,Mode>::type res = t;
res.prescale(factor());
return res;
}
@@ -70,7 +88,7 @@ public:
/** Concatenates a uniform scaling and a linear transformation matrix */
// TODO returns an expression
template<typename Derived>
- inline typename internal::plain_matrix_type<Derived>::type operator* (const MatrixBase<Derived>& other) const
+ inline typename Eigen::internal::plain_matrix_type<Derived>::type operator* (const MatrixBase<Derived>& other) const
{ return other * m_factor; }
template<typename Derived,int Dim>
@@ -110,7 +128,7 @@ public:
/** Concatenates a linear transformation matrix and a uniform scaling
* \relates UniformScaling
*/
-// NOTE this operator is defiend in MatrixBase and not as a friend function
+// NOTE this operator is defined in MatrixBase and not as a friend function
// of UniformScaling to fix an internal crash of Intel's ICC
template<typename Derived,typename Scalar>
EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,Scalar,product)
diff --git a/examples/ThirdPartyLibs/Eigen/src/Geometry/Transform.h b/examples/ThirdPartyLibs/Eigen/src/Geometry/Transform.h
index 2d36dfadf..52b8c2a4e 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Geometry/Transform.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Geometry/Transform.h
@@ -12,7 +12,7 @@
#ifndef EIGEN_TRANSFORM_H
#define EIGEN_TRANSFORM_H
-namespace Eigen {
+namespace Eigen {
namespace internal {
@@ -47,7 +47,7 @@ struct transform_left_product_impl;
template< typename Lhs,
typename Rhs,
- bool AnyProjective =
+ bool AnyProjective =
transform_traits<Lhs>::IsProjective ||
transform_traits<Rhs>::IsProjective>
struct transform_transform_product_impl;
@@ -97,6 +97,9 @@ template<int Mode> struct transform_make_affine;
* - #AffineCompact: the transformation is stored as a (Dim)x(Dim+1) matrix.
* - #Projective: the transformation is stored as a (Dim+1)^2 matrix
* without any assumption.
+ * - #Isometry: same as #Affine with the additional assumption that
+ * the linear part represents a rotation. This assumption is exploited
+ * to speed up some functions such as inverse() and rotation().
* \tparam _Options has the same meaning as in class Matrix. It allows to specify DontAlign and/or RowMajor.
* These Options are passed directly to the underlying matrix type.
*
@@ -115,7 +118,7 @@ template<int Mode> struct transform_make_affine;
* \end{array} \right) \f$
*
* Note that for a projective transformation the last row can be anything,
- * and then the interpretation of different parts might be sightly different.
+ * and then the interpretation of different parts might be slightly different.
*
* However, unlike a plain matrix, the Transform class provides many features
* simplifying both its assembly and usage. In particular, it can be composed
@@ -220,9 +223,9 @@ public:
/** type of the matrix used to represent the linear part of the transformation */
typedef Matrix<Scalar,Dim,Dim,Options> LinearMatrixType;
/** type of read/write reference to the linear part of the transformation */
- typedef Block<MatrixType,Dim,Dim,int(Mode)==(AffineCompact) && (Options&RowMajor)==0> LinearPart;
+ typedef Block<MatrixType,Dim,Dim,int(Mode)==(AffineCompact) && (int(Options)&RowMajor)==0> LinearPart;
/** type of read reference to the linear part of the transformation */
- typedef const Block<ConstMatrixType,Dim,Dim,int(Mode)==(AffineCompact) && (Options&RowMajor)==0> ConstLinearPart;
+ typedef const Block<ConstMatrixType,Dim,Dim,int(Mode)==(AffineCompact) && (int(Options)&RowMajor)==0> ConstLinearPart;
/** type of read/write reference to the affine part of the transformation */
typedef typename internal::conditional<int(Mode)==int(AffineCompact),
MatrixType&,
@@ -239,7 +242,7 @@ public:
typedef const Block<ConstMatrixType,Dim,1,!(internal::traits<MatrixType>::Flags & RowMajorBit)> ConstTranslationPart;
/** corresponding translation type */
typedef Translation<Scalar,Dim> TranslationType;
-
+
// this intermediate enum is needed to avoid an ICE with gcc 3.4 and 4.0
enum { TransformTimeDiagonalMode = ((Mode==int(Isometry))?Affine:int(Mode)) };
/** The return type of the product between a diagonal matrix and a transform */
@@ -252,17 +255,11 @@ protected:
public:
/** Default constructor without initialization of the meaningful coefficients.
- * If Mode==Affine, then the last row is set to [0 ... 0 1] */
+ * If Mode==Affine or Mode==Isometry, then the last row is set to [0 ... 0 1] */
EIGEN_DEVICE_FUNC inline Transform()
{
check_template_params();
- internal::transform_make_affine<(int(Mode)==Affine) ? Affine : AffineCompact>::run(m_matrix);
- }
-
- EIGEN_DEVICE_FUNC inline Transform(const Transform& other)
- {
- check_template_params();
- m_matrix = other.m_matrix;
+ internal::transform_make_affine<(int(Mode)==Affine || int(Mode)==Isometry) ? Affine : AffineCompact>::run(m_matrix);
}
EIGEN_DEVICE_FUNC inline explicit Transform(const TranslationType& t)
@@ -282,9 +279,6 @@ public:
*this = r;
}
- EIGEN_DEVICE_FUNC inline Transform& operator=(const Transform& other)
- { m_matrix = other.m_matrix; return *this; }
-
typedef internal::transform_take_affine_part<Transform> take_affine_part;
/** Constructs and initializes a transformation from a Dim^2 or a (Dim+1)^2 matrix. */
@@ -308,7 +302,7 @@ public:
internal::transform_construct_from_matrix<OtherDerived,Mode,Options,Dim,HDim>::run(this, other.derived());
return *this;
}
-
+
template<int OtherOptions>
EIGEN_DEVICE_FUNC inline Transform(const Transform<Scalar,Dim,Mode,OtherOptions>& other)
{
@@ -380,9 +374,9 @@ public:
inline Transform& operator=(const QTransform& other);
inline QTransform toQTransform(void) const;
#endif
-
- EIGEN_DEVICE_FUNC Index rows() const { return int(Mode)==int(Projective) ? m_matrix.cols() : (m_matrix.cols()-1); }
- EIGEN_DEVICE_FUNC Index cols() const { return m_matrix.cols(); }
+
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return int(Mode)==int(Projective) ? m_matrix.cols() : (m_matrix.cols()-1); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_matrix.cols(); }
/** shortcut for m_matrix(row,col);
* \sa MatrixBase::operator(Index,Index) const */
@@ -456,7 +450,7 @@ public:
/** \returns The product expression of a transform \a a times a diagonal matrix \a b
*
* The rhs diagonal matrix is interpreted as an affine scaling transformation. The
- * product results in a Transform of the same type (mode) as the lhs only if the lhs
+ * product results in a Transform of the same type (mode) as the lhs only if the lhs
* mode is no isometry. In that case, the returned transform is an affinity.
*/
template<typename DiagonalDerived>
@@ -471,7 +465,7 @@ public:
/** \returns The product expression of a diagonal matrix \a a times a transform \a b
*
* The lhs diagonal matrix is interpreted as an affine scaling transformation. The
- * product results in a Transform of the same type (mode) as the lhs only if the lhs
+ * product results in a Transform of the same type (mode) as the lhs only if the lhs
* mode is no isometry. In that case, the returned transform is an affinity.
*/
template<typename DiagonalDerived>
@@ -494,7 +488,7 @@ public:
{
return internal::transform_transform_product_impl<Transform,Transform>::run(*this,other);
}
-
+
#if EIGEN_COMP_ICC
private:
// this intermediate structure permits to workaround a bug in ICC 11:
@@ -503,13 +497,13 @@ private:
// (the meaning of a name may have changed since the template declaration -- the type of the template is:
// "Eigen::internal::transform_transform_product_impl<Eigen::Transform<double, 3, 32, 0>,
// Eigen::Transform<double, 3, Mode, Options>, <expression>>::ResultType (const Eigen::Transform<double, 3, Mode, Options> &) const")
- //
+ //
template<int OtherMode,int OtherOptions> struct icc_11_workaround
{
typedef internal::transform_transform_product_impl<Transform,Transform<Scalar,Dim,OtherMode,OtherOptions> > ProductType;
typedef typename ProductType::ResultType ResultType;
};
-
+
public:
/** Concatenates two different transformations */
template<int OtherMode,int OtherOptions>
@@ -542,7 +536,7 @@ public:
}
template<typename OtherDerived>
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC
inline Transform& scale(const MatrixBase<OtherDerived> &other);
template<typename OtherDerived>
@@ -572,18 +566,18 @@ public:
EIGEN_DEVICE_FUNC Transform& preshear(const Scalar& sx, const Scalar& sy);
EIGEN_DEVICE_FUNC inline Transform& operator=(const TranslationType& t);
-
+
EIGEN_DEVICE_FUNC
inline Transform& operator*=(const TranslationType& t) { return translate(t.vector()); }
-
+
EIGEN_DEVICE_FUNC inline Transform operator*(const TranslationType& t) const;
- EIGEN_DEVICE_FUNC
+ EIGEN_DEVICE_FUNC
inline Transform& operator=(const UniformScaling<Scalar>& t);
-
+
EIGEN_DEVICE_FUNC
inline Transform& operator*=(const UniformScaling<Scalar>& s) { return scale(s.factor()); }
-
+
EIGEN_DEVICE_FUNC
inline TransformTimeDiagonalReturnType operator*(const UniformScaling<Scalar>& s) const
{
@@ -602,7 +596,9 @@ public:
template<typename Derived>
EIGEN_DEVICE_FUNC inline Transform operator*(const RotationBase<Derived,Dim>& r) const;
- EIGEN_DEVICE_FUNC const LinearMatrixType rotation() const;
+ typedef typename internal::conditional<int(Mode)==Isometry,ConstLinearPart,const LinearMatrixType>::type RotationReturnType;
+ EIGEN_DEVICE_FUNC RotationReturnType rotation() const;
+
template<typename RotationMatrixType, typename ScalingMatrixType>
EIGEN_DEVICE_FUNC
void computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const;
@@ -684,7 +680,7 @@ public:
#ifdef EIGEN_TRANSFORM_PLUGIN
#include EIGEN_TRANSFORM_PLUGIN
#endif
-
+
protected:
#ifndef EIGEN_PARSED_BY_DOXYGEN
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void check_template_params()
@@ -1046,20 +1042,43 @@ EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode,Options> Transform<Scalar,Dim
*** Special functions ***
************************/
+namespace internal {
+template<int Mode> struct transform_rotation_impl {
+ template<typename TransformType>
+ EIGEN_DEVICE_FUNC static inline
+ const typename TransformType::LinearMatrixType run(const TransformType& t)
+ {
+ typedef typename TransformType::LinearMatrixType LinearMatrixType;
+ LinearMatrixType result;
+ t.computeRotationScaling(&result, (LinearMatrixType*)0);
+ return result;
+ }
+};
+template<> struct transform_rotation_impl<Isometry> {
+ template<typename TransformType>
+ EIGEN_DEVICE_FUNC static inline
+ typename TransformType::ConstLinearPart run(const TransformType& t)
+ {
+ return t.linear();
+ }
+};
+}
/** \returns the rotation part of the transformation
*
+ * If Mode==Isometry, then this method is an alias for linear(),
+ * otherwise it calls computeRotationScaling() to extract the rotation
+ * through a SVD decomposition.
*
* \svd_module
*
* \sa computeRotationScaling(), computeScalingRotation(), class SVD
*/
template<typename Scalar, int Dim, int Mode, int Options>
-EIGEN_DEVICE_FUNC const typename Transform<Scalar,Dim,Mode,Options>::LinearMatrixType
+EIGEN_DEVICE_FUNC
+typename Transform<Scalar,Dim,Mode,Options>::RotationReturnType
Transform<Scalar,Dim,Mode,Options>::rotation() const
{
- LinearMatrixType result;
- computeRotationScaling(&result, (LinearMatrixType*)0);
- return result;
+ return internal::transform_rotation_impl<Mode>::run(*this);
}
@@ -1078,17 +1097,18 @@ template<typename Scalar, int Dim, int Mode, int Options>
template<typename RotationMatrixType, typename ScalingMatrixType>
EIGEN_DEVICE_FUNC void Transform<Scalar,Dim,Mode,Options>::computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const
{
+ // Note that JacobiSVD is faster than BDCSVD for small matrices.
JacobiSVD<LinearMatrixType> svd(linear(), ComputeFullU | ComputeFullV);
- Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1
+ Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant() < Scalar(0) ? Scalar(-1) : Scalar(1); // so x has absolute value 1
VectorType sv(svd.singularValues());
- sv.coeffRef(0) *= x;
- if(scaling) scaling->lazyAssign(svd.matrixV() * sv.asDiagonal() * svd.matrixV().adjoint());
+ sv.coeffRef(Dim-1) *= x;
+ if(scaling) *scaling = svd.matrixV() * sv.asDiagonal() * svd.matrixV().adjoint();
if(rotation)
{
LinearMatrixType m(svd.matrixU());
- m.col(0) /= x;
- rotation->lazyAssign(m * svd.matrixV().adjoint());
+ m.col(Dim-1) *= x;
+ *rotation = m * svd.matrixV().adjoint();
}
}
@@ -1107,17 +1127,18 @@ template<typename Scalar, int Dim, int Mode, int Options>
template<typename ScalingMatrixType, typename RotationMatrixType>
EIGEN_DEVICE_FUNC void Transform<Scalar,Dim,Mode,Options>::computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const
{
+ // Note that JacobiSVD is faster than BDCSVD for small matrices.
JacobiSVD<LinearMatrixType> svd(linear(), ComputeFullU | ComputeFullV);
- Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1
+ Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant() < Scalar(0) ? Scalar(-1) : Scalar(1); // so x has absolute value 1
VectorType sv(svd.singularValues());
- sv.coeffRef(0) *= x;
- if(scaling) scaling->lazyAssign(svd.matrixU() * sv.asDiagonal() * svd.matrixU().adjoint());
+ sv.coeffRef(Dim-1) *= x;
+ if(scaling) *scaling = svd.matrixU() * sv.asDiagonal() * svd.matrixU().adjoint();
if(rotation)
{
LinearMatrixType m(svd.matrixU());
- m.col(0) /= x;
- rotation->lazyAssign(m * svd.matrixV().adjoint());
+ m.col(Dim-1) *= x;
+ *rotation = m * svd.matrixV().adjoint();
}
}
@@ -1156,7 +1177,7 @@ struct transform_make_affine<AffineCompact>
{
template<typename MatrixType> EIGEN_DEVICE_FUNC static void run(MatrixType &) { }
};
-
+
// selector needed to avoid taking the inverse of a 3x4 matrix
template<typename TransformType, int Mode=TransformType::Mode>
struct projective_transform_inverse
@@ -1297,8 +1318,8 @@ struct transform_construct_from_matrix<Other, AffineCompact,Options,Dim,HDim, HD
template<int LhsMode,int RhsMode>
struct transform_product_result
{
- enum
- {
+ enum
+ {
Mode =
(LhsMode == (int)Projective || RhsMode == (int)Projective ) ? Projective :
(LhsMode == (int)Affine || RhsMode == (int)Affine ) ? Affine :
@@ -1312,7 +1333,7 @@ struct transform_right_product_impl< TransformType, MatrixType, 0, RhsCols>
{
typedef typename MatrixType::PlainObject ResultType;
- static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other)
{
return T.matrix() * other;
}
@@ -1321,8 +1342,8 @@ struct transform_right_product_impl< TransformType, MatrixType, 0, RhsCols>
template< typename TransformType, typename MatrixType, int RhsCols>
struct transform_right_product_impl< TransformType, MatrixType, 1, RhsCols>
{
- enum {
- Dim = TransformType::Dim,
+ enum {
+ Dim = TransformType::Dim,
HDim = TransformType::HDim,
OtherRows = MatrixType::RowsAtCompileTime,
OtherCols = MatrixType::ColsAtCompileTime
@@ -1330,7 +1351,7 @@ struct transform_right_product_impl< TransformType, MatrixType, 1, RhsCols>
typedef typename MatrixType::PlainObject ResultType;
- static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other)
{
EIGEN_STATIC_ASSERT(OtherRows==HDim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES);
@@ -1339,7 +1360,7 @@ struct transform_right_product_impl< TransformType, MatrixType, 1, RhsCols>
ResultType res(other.rows(),other.cols());
TopLeftLhs(res, 0, 0, Dim, other.cols()).noalias() = T.affine() * other;
res.row(OtherRows-1) = other.row(OtherRows-1);
-
+
return res;
}
};
@@ -1347,8 +1368,8 @@ struct transform_right_product_impl< TransformType, MatrixType, 1, RhsCols>
template< typename TransformType, typename MatrixType, int RhsCols>
struct transform_right_product_impl< TransformType, MatrixType, 2, RhsCols>
{
- enum {
- Dim = TransformType::Dim,
+ enum {
+ Dim = TransformType::Dim,
HDim = TransformType::HDim,
OtherRows = MatrixType::RowsAtCompileTime,
OtherCols = MatrixType::ColsAtCompileTime
@@ -1356,7 +1377,7 @@ struct transform_right_product_impl< TransformType, MatrixType, 2, RhsCols>
typedef typename MatrixType::PlainObject ResultType;
- static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other)
{
EIGEN_STATIC_ASSERT(OtherRows==Dim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES);
@@ -1381,7 +1402,7 @@ struct transform_right_product_impl< TransformType, MatrixType, 2, 1> // rhs is
typedef typename MatrixType::PlainObject ResultType;
- static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other)
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other)
{
EIGEN_STATIC_ASSERT(OtherRows==Dim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES);
diff --git a/examples/ThirdPartyLibs/Eigen/src/Geometry/Translation.h b/examples/ThirdPartyLibs/Eigen/src/Geometry/Translation.h
index 51d9a82eb..8c2290121 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Geometry/Translation.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Geometry/Translation.h
@@ -70,18 +70,18 @@ public:
/** Constructs and initialize the translation transformation from a vector of translation coefficients */
EIGEN_DEVICE_FUNC explicit inline Translation(const VectorType& vector) : m_coeffs(vector) {}
- /** \brief Retruns the x-translation by value. **/
+ /** \brief Returns the x-translation by value. **/
EIGEN_DEVICE_FUNC inline Scalar x() const { return m_coeffs.x(); }
- /** \brief Retruns the y-translation by value. **/
+ /** \brief Returns the y-translation by value. **/
EIGEN_DEVICE_FUNC inline Scalar y() const { return m_coeffs.y(); }
- /** \brief Retruns the z-translation by value. **/
+ /** \brief Returns the z-translation by value. **/
EIGEN_DEVICE_FUNC inline Scalar z() const { return m_coeffs.z(); }
- /** \brief Retruns the x-translation as a reference. **/
+ /** \brief Returns the x-translation as a reference. **/
EIGEN_DEVICE_FUNC inline Scalar& x() { return m_coeffs.x(); }
- /** \brief Retruns the y-translation as a reference. **/
+ /** \brief Returns the y-translation as a reference. **/
EIGEN_DEVICE_FUNC inline Scalar& y() { return m_coeffs.y(); }
- /** \brief Retruns the z-translation as a reference. **/
+ /** \brief Returns the z-translation as a reference. **/
EIGEN_DEVICE_FUNC inline Scalar& z() { return m_coeffs.z(); }
EIGEN_DEVICE_FUNC const VectorType& vector() const { return m_coeffs; }
@@ -138,12 +138,6 @@ public:
/** \returns the inverse translation (opposite) */
Translation inverse() const { return Translation(-m_coeffs); }
- Translation& operator=(const Translation& other)
- {
- m_coeffs = other.m_coeffs;
- return *this;
- }
-
static const Translation Identity() { return Translation(VectorType::Zero()); }
/** \returns \c *this with scalar type casted to \a NewScalarType
diff --git a/examples/ThirdPartyLibs/Eigen/src/Geometry/Umeyama.h b/examples/ThirdPartyLibs/Eigen/src/Geometry/Umeyama.h
index 7e933fca1..6b755008f 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Geometry/Umeyama.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Geometry/Umeyama.h
@@ -87,7 +87,7 @@ struct umeyama_transform_matrix_type
* \f{align*}
* T = \begin{bmatrix} c\mathbf{R} & \mathbf{t} \\ \mathbf{0} & 1 \end{bmatrix}
* \f}
-* minimizing the resudiual above. This transformation is always returned as an
+* minimizing the residual above. This transformation is always returned as an
* Eigen::Matrix.
*/
template <typename Derived, typename OtherDerived>
diff --git a/examples/ThirdPartyLibs/Eigen/src/Geometry/arch/Geometry_SIMD.h b/examples/ThirdPartyLibs/Eigen/src/Geometry/arch/Geometry_SIMD.h
new file mode 100644
index 000000000..9af6a9af7
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/Geometry/arch/Geometry_SIMD.h
@@ -0,0 +1,168 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Rohit Garg <rpg.314@gmail.com>
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_GEOMETRY_SIMD_H
+#define EIGEN_GEOMETRY_SIMD_H
+
+namespace Eigen {
+
+namespace internal {
+
+template<class Derived, class OtherDerived>
+struct quat_product<Architecture::Target, Derived, OtherDerived, float>
+{
+ enum {
+ AAlignment = traits<Derived>::Alignment,
+ BAlignment = traits<OtherDerived>::Alignment,
+ ResAlignment = traits<Quaternion<float> >::Alignment
+ };
+ static inline Quaternion<float> run(const QuaternionBase<Derived>& _a, const QuaternionBase<OtherDerived>& _b)
+ {
+ evaluator<typename Derived::Coefficients> ae(_a.coeffs());
+ evaluator<typename OtherDerived::Coefficients> be(_b.coeffs());
+ Quaternion<float> res;
+ const float neg_zero = numext::bit_cast<float>(0x80000000u);
+ const float arr[4] = {0.f, 0.f, 0.f, neg_zero};
+ const Packet4f mask = ploadu<Packet4f>(arr);
+ Packet4f a = ae.template packet<AAlignment,Packet4f>(0);
+ Packet4f b = be.template packet<BAlignment,Packet4f>(0);
+ Packet4f s1 = pmul(vec4f_swizzle1(a,1,2,0,2),vec4f_swizzle1(b,2,0,1,2));
+ Packet4f s2 = pmul(vec4f_swizzle1(a,3,3,3,1),vec4f_swizzle1(b,0,1,2,1));
+ pstoret<float,Packet4f,ResAlignment>(
+ &res.x(),
+ padd(psub(pmul(a,vec4f_swizzle1(b,3,3,3,3)),
+ pmul(vec4f_swizzle1(a,2,0,1,0),
+ vec4f_swizzle1(b,1,2,0,0))),
+ pxor(mask,padd(s1,s2))));
+
+ return res;
+ }
+};
+
+template<class Derived>
+struct quat_conj<Architecture::Target, Derived, float>
+{
+ enum {
+ ResAlignment = traits<Quaternion<float> >::Alignment
+ };
+ static inline Quaternion<float> run(const QuaternionBase<Derived>& q)
+ {
+ evaluator<typename Derived::Coefficients> qe(q.coeffs());
+ Quaternion<float> res;
+ const float neg_zero = numext::bit_cast<float>(0x80000000u);
+ const float arr[4] = {neg_zero, neg_zero, neg_zero,0.f};
+ const Packet4f mask = ploadu<Packet4f>(arr);
+ pstoret<float,Packet4f,ResAlignment>(&res.x(), pxor(mask, qe.template packet<traits<Derived>::Alignment,Packet4f>(0)));
+ return res;
+ }
+};
+
+
+template<typename VectorLhs,typename VectorRhs>
+struct cross3_impl<Architecture::Target,VectorLhs,VectorRhs,float,true>
+{
+ enum {
+ ResAlignment = traits<typename plain_matrix_type<VectorLhs>::type>::Alignment
+ };
+ static inline typename plain_matrix_type<VectorLhs>::type
+ run(const VectorLhs& lhs, const VectorRhs& rhs)
+ {
+ evaluator<VectorLhs> lhs_eval(lhs);
+ evaluator<VectorRhs> rhs_eval(rhs);
+ Packet4f a = lhs_eval.template packet<traits<VectorLhs>::Alignment,Packet4f>(0);
+ Packet4f b = rhs_eval.template packet<traits<VectorRhs>::Alignment,Packet4f>(0);
+ Packet4f mul1 = pmul(vec4f_swizzle1(a,1,2,0,3),vec4f_swizzle1(b,2,0,1,3));
+ Packet4f mul2 = pmul(vec4f_swizzle1(a,2,0,1,3),vec4f_swizzle1(b,1,2,0,3));
+ typename plain_matrix_type<VectorLhs>::type res;
+ pstoret<float,Packet4f,ResAlignment>(&res.x(),psub(mul1,mul2));
+ return res;
+ }
+};
+
+
+
+#if (defined EIGEN_VECTORIZE_SSE) || (EIGEN_ARCH_ARM64)
+
+template<class Derived, class OtherDerived>
+struct quat_product<Architecture::Target, Derived, OtherDerived, double>
+{
+ enum {
+ BAlignment = traits<OtherDerived>::Alignment,
+ ResAlignment = traits<Quaternion<double> >::Alignment
+ };
+
+ static inline Quaternion<double> run(const QuaternionBase<Derived>& _a, const QuaternionBase<OtherDerived>& _b)
+ {
+ Quaternion<double> res;
+
+ evaluator<typename Derived::Coefficients> ae(_a.coeffs());
+ evaluator<typename OtherDerived::Coefficients> be(_b.coeffs());
+
+ const double* a = _a.coeffs().data();
+ Packet2d b_xy = be.template packet<BAlignment,Packet2d>(0);
+ Packet2d b_zw = be.template packet<BAlignment,Packet2d>(2);
+ Packet2d a_xx = pset1<Packet2d>(a[0]);
+ Packet2d a_yy = pset1<Packet2d>(a[1]);
+ Packet2d a_zz = pset1<Packet2d>(a[2]);
+ Packet2d a_ww = pset1<Packet2d>(a[3]);
+
+ // two temporaries:
+ Packet2d t1, t2;
+
+ /*
+ * t1 = ww*xy + yy*zw
+ * t2 = zz*xy - xx*zw
+ * res.xy = t1 +/- swap(t2)
+ */
+ t1 = padd(pmul(a_ww, b_xy), pmul(a_yy, b_zw));
+ t2 = psub(pmul(a_zz, b_xy), pmul(a_xx, b_zw));
+ pstoret<double,Packet2d,ResAlignment>(&res.x(), paddsub(t1, preverse(t2)));
+
+ /*
+ * t1 = ww*zw - yy*xy
+ * t2 = zz*zw + xx*xy
+ * res.zw = t1 -/+ swap(t2) = swap( swap(t1) +/- t2)
+ */
+ t1 = psub(pmul(a_ww, b_zw), pmul(a_yy, b_xy));
+ t2 = padd(pmul(a_zz, b_zw), pmul(a_xx, b_xy));
+ pstoret<double,Packet2d,ResAlignment>(&res.z(), preverse(paddsub(preverse(t1), t2)));
+
+ return res;
+}
+};
+
+template<class Derived>
+struct quat_conj<Architecture::Target, Derived, double>
+{
+ enum {
+ ResAlignment = traits<Quaternion<double> >::Alignment
+ };
+ static inline Quaternion<double> run(const QuaternionBase<Derived>& q)
+ {
+ evaluator<typename Derived::Coefficients> qe(q.coeffs());
+ Quaternion<double> res;
+ const double neg_zero = numext::bit_cast<double>(0x8000000000000000ull);
+ const double arr1[2] = {neg_zero, neg_zero};
+ const double arr2[2] = {neg_zero, 0.0};
+ const Packet2d mask0 = ploadu<Packet2d>(arr1);
+ const Packet2d mask2 = ploadu<Packet2d>(arr2);
+ pstoret<double,Packet2d,ResAlignment>(&res.x(), pxor(mask0, qe.template packet<traits<Derived>::Alignment,Packet2d>(0)));
+ pstoret<double,Packet2d,ResAlignment>(&res.z(), pxor(mask2, qe.template packet<traits<Derived>::Alignment,Packet2d>(2)));
+ return res;
+ }
+};
+
+#endif // end EIGEN_VECTORIZE_SSE_OR_EIGEN_ARCH_ARM64
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_GEOMETRY_SIMD_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Geometry/arch/Geometry_SSE.h b/examples/ThirdPartyLibs/Eigen/src/Geometry/arch/Geometry_SSE.h
deleted file mode 100644
index f68cab583..000000000
--- a/examples/ThirdPartyLibs/Eigen/src/Geometry/arch/Geometry_SSE.h
+++ /dev/null
@@ -1,161 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009 Rohit Garg <rpg.314@gmail.com>
-// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_GEOMETRY_SSE_H
-#define EIGEN_GEOMETRY_SSE_H
-
-namespace Eigen {
-
-namespace internal {
-
-template<class Derived, class OtherDerived>
-struct quat_product<Architecture::SSE, Derived, OtherDerived, float>
-{
- enum {
- AAlignment = traits<Derived>::Alignment,
- BAlignment = traits<OtherDerived>::Alignment,
- ResAlignment = traits<Quaternion<float> >::Alignment
- };
- static inline Quaternion<float> run(const QuaternionBase<Derived>& _a, const QuaternionBase<OtherDerived>& _b)
- {
- Quaternion<float> res;
- const __m128 mask = _mm_setr_ps(0.f,0.f,0.f,-0.f);
- __m128 a = _a.coeffs().template packet<AAlignment>(0);
- __m128 b = _b.coeffs().template packet<BAlignment>(0);
- __m128 s1 = _mm_mul_ps(vec4f_swizzle1(a,1,2,0,2),vec4f_swizzle1(b,2,0,1,2));
- __m128 s2 = _mm_mul_ps(vec4f_swizzle1(a,3,3,3,1),vec4f_swizzle1(b,0,1,2,1));
- pstoret<float,Packet4f,ResAlignment>(
- &res.x(),
- _mm_add_ps(_mm_sub_ps(_mm_mul_ps(a,vec4f_swizzle1(b,3,3,3,3)),
- _mm_mul_ps(vec4f_swizzle1(a,2,0,1,0),
- vec4f_swizzle1(b,1,2,0,0))),
- _mm_xor_ps(mask,_mm_add_ps(s1,s2))));
-
- return res;
- }
-};
-
-template<class Derived>
-struct quat_conj<Architecture::SSE, Derived, float>
-{
- enum {
- ResAlignment = traits<Quaternion<float> >::Alignment
- };
- static inline Quaternion<float> run(const QuaternionBase<Derived>& q)
- {
- Quaternion<float> res;
- const __m128 mask = _mm_setr_ps(-0.f,-0.f,-0.f,0.f);
- pstoret<float,Packet4f,ResAlignment>(&res.x(), _mm_xor_ps(mask, q.coeffs().template packet<traits<Derived>::Alignment>(0)));
- return res;
- }
-};
-
-
-template<typename VectorLhs,typename VectorRhs>
-struct cross3_impl<Architecture::SSE,VectorLhs,VectorRhs,float,true>
-{
- enum {
- ResAlignment = traits<typename plain_matrix_type<VectorLhs>::type>::Alignment
- };
- static inline typename plain_matrix_type<VectorLhs>::type
- run(const VectorLhs& lhs, const VectorRhs& rhs)
- {
- __m128 a = lhs.template packet<traits<VectorLhs>::Alignment>(0);
- __m128 b = rhs.template packet<traits<VectorRhs>::Alignment>(0);
- __m128 mul1=_mm_mul_ps(vec4f_swizzle1(a,1,2,0,3),vec4f_swizzle1(b,2,0,1,3));
- __m128 mul2=_mm_mul_ps(vec4f_swizzle1(a,2,0,1,3),vec4f_swizzle1(b,1,2,0,3));
- typename plain_matrix_type<VectorLhs>::type res;
- pstoret<float,Packet4f,ResAlignment>(&res.x(),_mm_sub_ps(mul1,mul2));
- return res;
- }
-};
-
-
-
-
-template<class Derived, class OtherDerived>
-struct quat_product<Architecture::SSE, Derived, OtherDerived, double>
-{
- enum {
- BAlignment = traits<OtherDerived>::Alignment,
- ResAlignment = traits<Quaternion<double> >::Alignment
- };
-
- static inline Quaternion<double> run(const QuaternionBase<Derived>& _a, const QuaternionBase<OtherDerived>& _b)
- {
- const Packet2d mask = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0));
-
- Quaternion<double> res;
-
- const double* a = _a.coeffs().data();
- Packet2d b_xy = _b.coeffs().template packet<BAlignment>(0);
- Packet2d b_zw = _b.coeffs().template packet<BAlignment>(2);
- Packet2d a_xx = pset1<Packet2d>(a[0]);
- Packet2d a_yy = pset1<Packet2d>(a[1]);
- Packet2d a_zz = pset1<Packet2d>(a[2]);
- Packet2d a_ww = pset1<Packet2d>(a[3]);
-
- // two temporaries:
- Packet2d t1, t2;
-
- /*
- * t1 = ww*xy + yy*zw
- * t2 = zz*xy - xx*zw
- * res.xy = t1 +/- swap(t2)
- */
- t1 = padd(pmul(a_ww, b_xy), pmul(a_yy, b_zw));
- t2 = psub(pmul(a_zz, b_xy), pmul(a_xx, b_zw));
-#ifdef EIGEN_VECTORIZE_SSE3
- EIGEN_UNUSED_VARIABLE(mask)
- pstoret<double,Packet2d,ResAlignment>(&res.x(), _mm_addsub_pd(t1, preverse(t2)));
-#else
- pstoret<double,Packet2d,ResAlignment>(&res.x(), padd(t1, pxor(mask,preverse(t2))));
-#endif
-
- /*
- * t1 = ww*zw - yy*xy
- * t2 = zz*zw + xx*xy
- * res.zw = t1 -/+ swap(t2) = swap( swap(t1) +/- t2)
- */
- t1 = psub(pmul(a_ww, b_zw), pmul(a_yy, b_xy));
- t2 = padd(pmul(a_zz, b_zw), pmul(a_xx, b_xy));
-#ifdef EIGEN_VECTORIZE_SSE3
- EIGEN_UNUSED_VARIABLE(mask)
- pstoret<double,Packet2d,ResAlignment>(&res.z(), preverse(_mm_addsub_pd(preverse(t1), t2)));
-#else
- pstoret<double,Packet2d,ResAlignment>(&res.z(), psub(t1, pxor(mask,preverse(t2))));
-#endif
-
- return res;
-}
-};
-
-template<class Derived>
-struct quat_conj<Architecture::SSE, Derived, double>
-{
- enum {
- ResAlignment = traits<Quaternion<double> >::Alignment
- };
- static inline Quaternion<double> run(const QuaternionBase<Derived>& q)
- {
- Quaternion<double> res;
- const __m128d mask0 = _mm_setr_pd(-0.,-0.);
- const __m128d mask2 = _mm_setr_pd(-0.,0.);
- pstoret<double,Packet2d,ResAlignment>(&res.x(), _mm_xor_pd(mask0, q.coeffs().template packet<traits<Derived>::Alignment>(0)));
- pstoret<double,Packet2d,ResAlignment>(&res.z(), _mm_xor_pd(mask2, q.coeffs().template packet<traits<Derived>::Alignment>(2)));
- return res;
- }
-};
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-#endif // EIGEN_GEOMETRY_SSE_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/Householder/BlockHouseholder.h b/examples/ThirdPartyLibs/Eigen/src/Householder/BlockHouseholder.h
index 01a7ed188..39ce1c2a0 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Householder/BlockHouseholder.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Householder/BlockHouseholder.h
@@ -63,8 +63,15 @@ void make_block_householder_triangular_factor(TriangularFactorType& triFactor, c
triFactor.row(i).tail(rt).noalias() = -hCoeffs(i) * vectors.col(i).tail(rs).adjoint()
* vectors.bottomRightCorner(rs, rt).template triangularView<UnitLower>();
- // FIXME add .noalias() once the triangular product can work inplace
- triFactor.row(i).tail(rt) = triFactor.row(i).tail(rt) * triFactor.bottomRightCorner(rt,rt).template triangularView<Upper>();
+ // FIXME use the following line with .noalias() once the triangular product can work inplace
+ // triFactor.row(i).tail(rt) = triFactor.row(i).tail(rt) * triFactor.bottomRightCorner(rt,rt).template triangularView<Upper>();
+ for(Index j=nbVecs-1; j>i; --j)
+ {
+ typename TriangularFactorType::Scalar z = triFactor(i,j);
+ triFactor(i,j) = z * triFactor(j,j);
+ if(nbVecs-j-1>0)
+ triFactor.row(i).tail(nbVecs-j-1) += z * triFactor.row(j).tail(nbVecs-j-1);
+ }
}
triFactor(i,i) = hCoeffs(i);
diff --git a/examples/ThirdPartyLibs/Eigen/src/Householder/Householder.h b/examples/ThirdPartyLibs/Eigen/src/Householder/Householder.h
index 80de2c305..5bc037f00 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Householder/Householder.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Householder/Householder.h
@@ -39,6 +39,7 @@ template<int n> struct decrement_size
* MatrixBase::applyHouseholderOnTheRight()
*/
template<typename Derived>
+EIGEN_DEVICE_FUNC
void MatrixBase<Derived>::makeHouseholderInPlace(Scalar& tau, RealScalar& beta)
{
VectorBlock<Derived, internal::decrement_size<Base::SizeAtCompileTime>::ret> essentialPart(derived(), 1, size()-1);
@@ -62,6 +63,7 @@ void MatrixBase<Derived>::makeHouseholderInPlace(Scalar& tau, RealScalar& beta)
*/
template<typename Derived>
template<typename EssentialPart>
+EIGEN_DEVICE_FUNC
void MatrixBase<Derived>::makeHouseholder(
EssentialPart& essential,
Scalar& tau,
@@ -103,13 +105,14 @@ void MatrixBase<Derived>::makeHouseholder(
* \param essential the essential part of the vector \c v
* \param tau the scaling factor of the Householder transformation
* \param workspace a pointer to working space with at least
- * this->cols() * essential.size() entries
+ * this->cols() entries
*
* \sa MatrixBase::makeHouseholder(), MatrixBase::makeHouseholderInPlace(),
* MatrixBase::applyHouseholderOnTheRight()
*/
template<typename Derived>
template<typename EssentialPart>
+EIGEN_DEVICE_FUNC
void MatrixBase<Derived>::applyHouseholderOnTheLeft(
const EssentialPart& essential,
const Scalar& tau,
@@ -140,13 +143,14 @@ void MatrixBase<Derived>::applyHouseholderOnTheLeft(
* \param essential the essential part of the vector \c v
* \param tau the scaling factor of the Householder transformation
* \param workspace a pointer to working space with at least
- * this->cols() * essential.size() entries
+ * this->rows() entries
*
* \sa MatrixBase::makeHouseholder(), MatrixBase::makeHouseholderInPlace(),
* MatrixBase::applyHouseholderOnTheLeft()
*/
template<typename Derived>
template<typename EssentialPart>
+EIGEN_DEVICE_FUNC
void MatrixBase<Derived>::applyHouseholderOnTheRight(
const EssentialPart& essential,
const Scalar& tau,
@@ -160,10 +164,10 @@ void MatrixBase<Derived>::applyHouseholderOnTheRight(
{
Map<typename internal::plain_col_type<PlainObject>::type> tmp(workspace,rows());
Block<Derived, Derived::RowsAtCompileTime, EssentialPart::SizeAtCompileTime> right(derived(), 0, 1, rows(), cols()-1);
- tmp.noalias() = right * essential.conjugate();
+ tmp.noalias() = right * essential;
tmp += this->col(0);
this->col(0) -= tau * tmp;
- right.noalias() -= tau * tmp * essential.transpose();
+ right.noalias() -= tau * tmp * essential.adjoint();
}
}
diff --git a/examples/ThirdPartyLibs/Eigen/src/Householder/HouseholderSequence.h b/examples/ThirdPartyLibs/Eigen/src/Householder/HouseholderSequence.h
index 3ce0a693d..022f6c3db 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Householder/HouseholderSequence.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Householder/HouseholderSequence.h
@@ -11,7 +11,7 @@
#ifndef EIGEN_HOUSEHOLDER_SEQUENCE_H
#define EIGEN_HOUSEHOLDER_SEQUENCE_H
-namespace Eigen {
+namespace Eigen {
/** \ingroup Householder_Module
* \householder_module
@@ -34,8 +34,8 @@ namespace Eigen {
* form \f$ H = \prod_{i=0}^{n-1} H_i \f$ where the i-th Householder reflection is \f$ H_i = I - h_i v_i
* v_i^* \f$. The i-th Householder coefficient \f$ h_i \f$ is a scalar and the i-th Householder vector \f$
* v_i \f$ is a vector of the form
- * \f[
- * v_i = [\underbrace{0, \ldots, 0}_{i-1\mbox{ zeros}}, 1, \underbrace{*, \ldots,*}_{n-i\mbox{ arbitrary entries}} ].
+ * \f[
+ * v_i = [\underbrace{0, \ldots, 0}_{i-1\mbox{ zeros}}, 1, \underbrace{*, \ldots,*}_{n-i\mbox{ arbitrary entries}} ].
* \f]
* The last \f$ n-i \f$ entries of \f$ v_i \f$ are called the essential part of the Householder vector.
*
@@ -87,7 +87,7 @@ struct hseq_side_dependent_impl
{
typedef Block<const VectorsType, Dynamic, 1> EssentialVectorType;
typedef HouseholderSequence<VectorsType, CoeffsType, OnTheLeft> HouseholderSequenceType;
- static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k)
+ static EIGEN_DEVICE_FUNC inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k)
{
Index start = k+1+h.m_shift;
return Block<const VectorsType,Dynamic,1>(h.m_vectors, start, k, h.rows()-start, 1);
@@ -120,7 +120,7 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
: public EigenBase<HouseholderSequence<VectorsType,CoeffsType,Side> >
{
typedef typename internal::hseq_side_dependent_impl<VectorsType,CoeffsType,Side>::EssentialVectorType EssentialVectorType;
-
+
public:
enum {
RowsAtCompileTime = internal::traits<HouseholderSequence>::RowsAtCompileTime,
@@ -140,6 +140,28 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
Side
> ConjugateReturnType;
+ typedef HouseholderSequence<
+ VectorsType,
+ typename internal::conditional<NumTraits<Scalar>::IsComplex,
+ typename internal::remove_all<typename CoeffsType::ConjugateReturnType>::type,
+ CoeffsType>::type,
+ Side
+ > AdjointReturnType;
+
+ typedef HouseholderSequence<
+ typename internal::conditional<NumTraits<Scalar>::IsComplex,
+ typename internal::remove_all<typename VectorsType::ConjugateReturnType>::type,
+ VectorsType>::type,
+ CoeffsType,
+ Side
+ > TransposeReturnType;
+
+ typedef HouseholderSequence<
+ typename internal::add_const<VectorsType>::type,
+ typename internal::add_const<CoeffsType>::type,
+ Side
+ > ConstHouseholderSequence;
+
/** \brief Constructor.
* \param[in] v %Matrix containing the essential parts of the Householder vectors
* \param[in] h Vector containing the Householder coefficients
@@ -157,33 +179,37 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
*
* \sa setLength(), setShift()
*/
+ EIGEN_DEVICE_FUNC
HouseholderSequence(const VectorsType& v, const CoeffsType& h)
- : m_vectors(v), m_coeffs(h), m_trans(false), m_length(v.diagonalSize()),
+ : m_vectors(v), m_coeffs(h), m_reverse(false), m_length(v.diagonalSize()),
m_shift(0)
{
}
/** \brief Copy constructor. */
+ EIGEN_DEVICE_FUNC
HouseholderSequence(const HouseholderSequence& other)
: m_vectors(other.m_vectors),
m_coeffs(other.m_coeffs),
- m_trans(other.m_trans),
+ m_reverse(other.m_reverse),
m_length(other.m_length),
m_shift(other.m_shift)
{
}
/** \brief Number of rows of transformation viewed as a matrix.
- * \returns Number of rows
+ * \returns Number of rows
* \details This equals the dimension of the space that the transformation acts on.
*/
- Index rows() const { return Side==OnTheLeft ? m_vectors.rows() : m_vectors.cols(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ Index rows() const EIGEN_NOEXCEPT { return Side==OnTheLeft ? m_vectors.rows() : m_vectors.cols(); }
/** \brief Number of columns of transformation viewed as a matrix.
* \returns Number of columns
* \details This equals the dimension of the space that the transformation acts on.
*/
- Index cols() const { return rows(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ Index cols() const EIGEN_NOEXCEPT { return rows(); }
/** \brief Essential part of a Householder vector.
* \param[in] k Index of Householder reflection
@@ -191,14 +217,15 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
*
* This function returns the essential part of the Householder vector \f$ v_i \f$. This is a vector of
* length \f$ n-i \f$ containing the last \f$ n-i \f$ entries of the vector
- * \f[
- * v_i = [\underbrace{0, \ldots, 0}_{i-1\mbox{ zeros}}, 1, \underbrace{*, \ldots,*}_{n-i\mbox{ arbitrary entries}} ].
+ * \f[
+ * v_i = [\underbrace{0, \ldots, 0}_{i-1\mbox{ zeros}}, 1, \underbrace{*, \ldots,*}_{n-i\mbox{ arbitrary entries}} ].
* \f]
* The index \f$ i \f$ equals \p k + shift(), corresponding to the k-th column of the matrix \p v
* passed to the constructor.
*
* \sa setShift(), shift()
*/
+ EIGEN_DEVICE_FUNC
const EssentialVectorType essentialVector(Index k) const
{
eigen_assert(k >= 0 && k < m_length);
@@ -206,31 +233,51 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
}
/** \brief %Transpose of the Householder sequence. */
- HouseholderSequence transpose() const
+ TransposeReturnType transpose() const
{
- return HouseholderSequence(*this).setTrans(!m_trans);
+ return TransposeReturnType(m_vectors.conjugate(), m_coeffs)
+ .setReverseFlag(!m_reverse)
+ .setLength(m_length)
+ .setShift(m_shift);
}
/** \brief Complex conjugate of the Householder sequence. */
ConjugateReturnType conjugate() const
{
return ConjugateReturnType(m_vectors.conjugate(), m_coeffs.conjugate())
- .setTrans(m_trans)
+ .setReverseFlag(m_reverse)
.setLength(m_length)
.setShift(m_shift);
}
+ /** \returns an expression of the complex conjugate of \c *this if Cond==true,
+ * returns \c *this otherwise.
+ */
+ template<bool Cond>
+ EIGEN_DEVICE_FUNC
+ inline typename internal::conditional<Cond,ConjugateReturnType,ConstHouseholderSequence>::type
+ conjugateIf() const
+ {
+ typedef typename internal::conditional<Cond,ConjugateReturnType,ConstHouseholderSequence>::type ReturnType;
+ return ReturnType(m_vectors.template conjugateIf<Cond>(), m_coeffs.template conjugateIf<Cond>());
+ }
+
/** \brief Adjoint (conjugate transpose) of the Householder sequence. */
- ConjugateReturnType adjoint() const
+ AdjointReturnType adjoint() const
{
- return conjugate().setTrans(!m_trans);
+ return AdjointReturnType(m_vectors, m_coeffs.conjugate())
+ .setReverseFlag(!m_reverse)
+ .setLength(m_length)
+ .setShift(m_shift);
}
/** \brief Inverse of the Householder sequence (equals the adjoint). */
- ConjugateReturnType inverse() const { return adjoint(); }
+ AdjointReturnType inverse() const { return adjoint(); }
/** \internal */
- template<typename DestType> inline void evalTo(DestType& dst) const
+ template<typename DestType>
+ inline EIGEN_DEVICE_FUNC
+ void evalTo(DestType& dst) const
{
Matrix<Scalar, DestType::RowsAtCompileTime, 1,
AutoAlign|ColMajor, DestType::MaxRowsAtCompileTime, 1> workspace(rows());
@@ -239,6 +286,7 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
/** \internal */
template<typename Dest, typename Workspace>
+ EIGEN_DEVICE_FUNC
void evalTo(Dest& dst, Workspace& workspace) const
{
workspace.resize(rows());
@@ -251,7 +299,7 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
for(Index k = vecs-1; k >= 0; --k)
{
Index cornerSize = rows() - k - m_shift;
- if(m_trans)
+ if(m_reverse)
dst.bottomRightCorner(cornerSize, cornerSize)
.applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), workspace.data());
else
@@ -265,18 +313,26 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
for(Index k = 0; k<cols()-vecs ; ++k)
dst.col(k).tail(rows()-k-1).setZero();
}
+ else if(m_length>BlockSize)
+ {
+ dst.setIdentity(rows(), rows());
+ if(m_reverse)
+ applyThisOnTheLeft(dst,workspace,true);
+ else
+ applyThisOnTheLeft(dst,workspace,true);
+ }
else
{
dst.setIdentity(rows(), rows());
for(Index k = vecs-1; k >= 0; --k)
{
Index cornerSize = rows() - k - m_shift;
- if(m_trans)
+ if(m_reverse)
dst.bottomRightCorner(cornerSize, cornerSize)
- .applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), &workspace.coeffRef(0));
+ .applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), workspace.data());
else
dst.bottomRightCorner(cornerSize, cornerSize)
- .applyHouseholderOnTheLeft(essentialVector(k), m_coeffs.coeff(k), &workspace.coeffRef(0));
+ .applyHouseholderOnTheLeft(essentialVector(k), m_coeffs.coeff(k), workspace.data());
}
}
}
@@ -295,42 +351,52 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
workspace.resize(dst.rows());
for(Index k = 0; k < m_length; ++k)
{
- Index actual_k = m_trans ? m_length-k-1 : k;
+ Index actual_k = m_reverse ? m_length-k-1 : k;
dst.rightCols(rows()-m_shift-actual_k)
.applyHouseholderOnTheRight(essentialVector(actual_k), m_coeffs.coeff(actual_k), workspace.data());
}
}
/** \internal */
- template<typename Dest> inline void applyThisOnTheLeft(Dest& dst) const
+ template<typename Dest> inline void applyThisOnTheLeft(Dest& dst, bool inputIsIdentity = false) const
{
Matrix<Scalar,1,Dest::ColsAtCompileTime,RowMajor,1,Dest::MaxColsAtCompileTime> workspace;
- applyThisOnTheLeft(dst, workspace);
+ applyThisOnTheLeft(dst, workspace, inputIsIdentity);
}
/** \internal */
template<typename Dest, typename Workspace>
- inline void applyThisOnTheLeft(Dest& dst, Workspace& workspace) const
+ inline void applyThisOnTheLeft(Dest& dst, Workspace& workspace, bool inputIsIdentity = false) const
{
- const Index BlockSize = 48;
+ if(inputIsIdentity && m_reverse)
+ inputIsIdentity = false;
// if the entries are large enough, then apply the reflectors by block
if(m_length>=BlockSize && dst.cols()>1)
{
- for(Index i = 0; i < m_length; i+=BlockSize)
+ // Make sure we have at least 2 useful blocks, otherwise it is point-less:
+ Index blockSize = m_length<Index(2*BlockSize) ? (m_length+1)/2 : Index(BlockSize);
+ for(Index i = 0; i < m_length; i+=blockSize)
{
- Index end = m_trans ? (std::min)(m_length,i+BlockSize) : m_length-i;
- Index k = m_trans ? i : (std::max)(Index(0),end-BlockSize);
+ Index end = m_reverse ? (std::min)(m_length,i+blockSize) : m_length-i;
+ Index k = m_reverse ? i : (std::max)(Index(0),end-blockSize);
Index bs = end-k;
Index start = k + m_shift;
-
+
typedef Block<typename internal::remove_all<VectorsType>::type,Dynamic,Dynamic> SubVectorsType;
SubVectorsType sub_vecs1(m_vectors.const_cast_derived(), Side==OnTheRight ? k : start,
Side==OnTheRight ? start : k,
Side==OnTheRight ? bs : m_vectors.rows()-start,
Side==OnTheRight ? m_vectors.cols()-start : bs);
typename internal::conditional<Side==OnTheRight, Transpose<SubVectorsType>, SubVectorsType&>::type sub_vecs(sub_vecs1);
- Block<Dest,Dynamic,Dynamic> sub_dst(dst,dst.rows()-rows()+m_shift+k,0, rows()-m_shift-k,dst.cols());
- apply_block_householder_on_the_left(sub_dst, sub_vecs, m_coeffs.segment(k, bs), !m_trans);
+
+ Index dstStart = dst.rows()-rows()+m_shift+k;
+ Index dstRows = rows()-m_shift-k;
+ Block<Dest,Dynamic,Dynamic> sub_dst(dst,
+ dstStart,
+ inputIsIdentity ? dstStart : 0,
+ dstRows,
+ inputIsIdentity ? dstRows : dst.cols());
+ apply_block_householder_on_the_left(sub_dst, sub_vecs, m_coeffs.segment(k, bs), !m_reverse);
}
}
else
@@ -338,8 +404,9 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
workspace.resize(dst.cols());
for(Index k = 0; k < m_length; ++k)
{
- Index actual_k = m_trans ? k : m_length-k-1;
- dst.bottomRows(rows()-m_shift-actual_k)
+ Index actual_k = m_reverse ? k : m_length-k-1;
+ Index dstStart = rows()-m_shift-actual_k;
+ dst.bottomRightCorner(dstStart, inputIsIdentity ? dstStart : dst.cols())
.applyHouseholderOnTheLeft(essentialVector(actual_k), m_coeffs.coeff(actual_k), workspace.data());
}
}
@@ -357,7 +424,7 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
{
typename internal::matrix_type_times_scalar_type<Scalar, OtherDerived>::Type
res(other.template cast<typename internal::matrix_type_times_scalar_type<Scalar,OtherDerived>::ResultScalar>());
- applyThisOnTheLeft(res);
+ applyThisOnTheLeft(res, internal::is_identity<OtherDerived>::value && res.rows()==res.cols());
return res;
}
@@ -372,6 +439,7 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
*
* \sa length()
*/
+ EIGEN_DEVICE_FUNC
HouseholderSequence& setLength(Index length)
{
m_length = length;
@@ -389,13 +457,17 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
*
* \sa shift()
*/
+ EIGEN_DEVICE_FUNC
HouseholderSequence& setShift(Index shift)
{
m_shift = shift;
return *this;
}
+ EIGEN_DEVICE_FUNC
Index length() const { return m_length; } /**< \brief Returns the length of the Householder sequence. */
+
+ EIGEN_DEVICE_FUNC
Index shift() const { return m_shift; } /**< \brief Returns the shift of the Householder sequence. */
/* Necessary for .adjoint() and .conjugate() */
@@ -403,27 +475,30 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
protected:
- /** \brief Sets the transpose flag.
- * \param [in] trans New value of the transpose flag.
+ /** \internal
+ * \brief Sets the reverse flag.
+ * \param [in] reverse New value of the reverse flag.
*
- * By default, the transpose flag is not set. If the transpose flag is set, then this object represents
- * \f$ H^T = H_{n-1}^T \ldots H_1^T H_0^T \f$ instead of \f$ H = H_0 H_1 \ldots H_{n-1} \f$.
+ * By default, the reverse flag is not set. If the reverse flag is set, then this object represents
+ * \f$ H^r = H_{n-1} \ldots H_1 H_0 \f$ instead of \f$ H = H_0 H_1 \ldots H_{n-1} \f$.
+ * \note For real valued HouseholderSequence this is equivalent to transposing \f$ H \f$.
*
- * \sa trans()
+ * \sa reverseFlag(), transpose(), adjoint()
*/
- HouseholderSequence& setTrans(bool trans)
+ HouseholderSequence& setReverseFlag(bool reverse)
{
- m_trans = trans;
+ m_reverse = reverse;
return *this;
}
- bool trans() const { return m_trans; } /**< \brief Returns the transpose flag. */
+ bool reverseFlag() const { return m_reverse; } /**< \internal \brief Returns the reverse flag. */
typename VectorsType::Nested m_vectors;
typename CoeffsType::Nested m_coeffs;
- bool m_trans;
+ bool m_reverse;
Index m_length;
Index m_shift;
+ enum { BlockSize = 48 };
};
/** \brief Computes the product of a matrix with a Householder sequence.
@@ -444,7 +519,7 @@ typename internal::matrix_type_times_scalar_type<typename VectorsType::Scalar,Ot
}
/** \ingroup Householder_Module \householder_module
- * \brief Convenience function for constructing a Householder sequence.
+ * \brief Convenience function for constructing a Householder sequence.
* \returns A HouseholderSequence constructed from the specified arguments.
*/
template<typename VectorsType, typename CoeffsType>
@@ -454,7 +529,7 @@ HouseholderSequence<VectorsType,CoeffsType> householderSequence(const VectorsTyp
}
/** \ingroup Householder_Module \householder_module
- * \brief Convenience function for constructing a Householder sequence.
+ * \brief Convenience function for constructing a Householder sequence.
* \returns A HouseholderSequence constructed from the specified arguments.
* \details This function differs from householderSequence() in that the template argument \p OnTheSide of
* the constructed HouseholderSequence is set to OnTheRight, instead of the default OnTheLeft.
diff --git a/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h b/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h
index facdaf890..a117fc155 100644
--- a/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h
+++ b/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h
@@ -10,7 +10,7 @@
#ifndef EIGEN_BASIC_PRECONDITIONERS_H
#define EIGEN_BASIC_PRECONDITIONERS_H
-namespace Eigen {
+namespace Eigen {
/** \ingroup IterativeLinearSolvers_Module
* \brief A preconditioner based on the digonal entries
@@ -52,15 +52,15 @@ class DiagonalPreconditioner
compute(mat);
}
- Index rows() const { return m_invdiag.size(); }
- Index cols() const { return m_invdiag.size(); }
-
+ EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_invdiag.size(); }
+ EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_invdiag.size(); }
+
template<typename MatType>
DiagonalPreconditioner& analyzePattern(const MatType& )
{
return *this;
}
-
+
template<typename MatType>
DiagonalPreconditioner& factorize(const MatType& mat)
{
@@ -77,7 +77,7 @@ class DiagonalPreconditioner
m_isInitialized = true;
return *this;
}
-
+
template<typename MatType>
DiagonalPreconditioner& compute(const MatType& mat)
{
@@ -99,7 +99,7 @@ class DiagonalPreconditioner
&& "DiagonalPreconditioner::solve(): invalid number of rows of the right hand side matrix b");
return Solve<DiagonalPreconditioner, Rhs>(*this, b.derived());
}
-
+
ComputationInfo info() { return Success; }
protected:
@@ -121,7 +121,7 @@ class DiagonalPreconditioner
* \implsparsesolverconcept
*
* The diagonal entries are pre-inverted and stored into a dense vector.
- *
+ *
* \sa class LeastSquaresConjugateGradient, class DiagonalPreconditioner
*/
template <typename _Scalar>
@@ -146,7 +146,7 @@ class LeastSquareDiagonalPreconditioner : public DiagonalPreconditioner<_Scalar>
{
return *this;
}
-
+
template<typename MatType>
LeastSquareDiagonalPreconditioner& factorize(const MatType& mat)
{
@@ -168,7 +168,7 @@ class LeastSquareDiagonalPreconditioner : public DiagonalPreconditioner<_Scalar>
{
for(Index j=0; j<mat.outerSize(); ++j)
{
- RealScalar sum = mat.innerVector(j).squaredNorm();
+ RealScalar sum = mat.col(j).squaredNorm();
if(sum>RealScalar(0))
m_invdiag(j) = RealScalar(1)/sum;
else
@@ -178,13 +178,13 @@ class LeastSquareDiagonalPreconditioner : public DiagonalPreconditioner<_Scalar>
Base::m_isInitialized = true;
return *this;
}
-
+
template<typename MatType>
LeastSquareDiagonalPreconditioner& compute(const MatType& mat)
{
return factorize(mat);
}
-
+
ComputationInfo info() { return Success; }
protected:
@@ -205,19 +205,19 @@ class IdentityPreconditioner
template<typename MatrixType>
explicit IdentityPreconditioner(const MatrixType& ) {}
-
+
template<typename MatrixType>
IdentityPreconditioner& analyzePattern(const MatrixType& ) { return *this; }
-
+
template<typename MatrixType>
IdentityPreconditioner& factorize(const MatrixType& ) { return *this; }
template<typename MatrixType>
IdentityPreconditioner& compute(const MatrixType& ) { return *this; }
-
+
template<typename Rhs>
inline const Rhs& solve(const Rhs& b) const { return b; }
-
+
ComputationInfo info() { return Success; }
};
diff --git a/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h b/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
index 454f46814..153acef65 100644
--- a/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
+++ b/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
@@ -191,32 +191,16 @@ public:
/** \internal */
template<typename Rhs,typename Dest>
- void _solve_with_guess_impl(const Rhs& b, Dest& x) const
+ void _solve_vector_with_guess_impl(const Rhs& b, Dest& x) const
{
- bool failed = false;
- for(Index j=0; j<b.cols(); ++j)
- {
- m_iterations = Base::maxIterations();
- m_error = Base::m_tolerance;
-
- typename Dest::ColXpr xj(x,j);
- if(!internal::bicgstab(matrix(), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error))
- failed = true;
- }
- m_info = failed ? NumericalIssue
+ m_iterations = Base::maxIterations();
+ m_error = Base::m_tolerance;
+
+ bool ret = internal::bicgstab(matrix(), b, x, Base::m_preconditioner, m_iterations, m_error);
+
+ m_info = (!ret) ? NumericalIssue
: m_error <= Base::m_tolerance ? Success
: NoConvergence;
- m_isInitialized = true;
- }
-
- /** \internal */
- using Base::_solve_impl;
- template<typename Rhs,typename Dest>
- void _solve_impl(const MatrixBase<Rhs>& b, Dest& x) const
- {
- x.resize(this->rows(),b.cols());
- x.setZero();
- _solve_with_guess_impl(b,x);
}
protected:
diff --git a/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h b/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
index 395daa8e4..5d8c6b433 100644
--- a/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
+++ b/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
@@ -50,7 +50,8 @@ void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,
tol_error = 0;
return;
}
- RealScalar threshold = tol*tol*rhsNorm2;
+ const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)();
+ RealScalar threshold = numext::maxi(RealScalar(tol*tol*rhsNorm2),considerAsZero);
RealScalar residualNorm2 = residual.squaredNorm();
if (residualNorm2 < threshold)
{
@@ -58,7 +59,7 @@ void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,
tol_error = sqrt(residualNorm2 / rhsNorm2);
return;
}
-
+
VectorType p(n);
p = precond.solve(residual); // initial search direction
@@ -194,7 +195,7 @@ public:
/** \internal */
template<typename Rhs,typename Dest>
- void _solve_with_guess_impl(const Rhs& b, Dest& x) const
+ void _solve_vector_with_guess_impl(const Rhs& b, Dest& x) const
{
typedef typename Base::MatrixWrapper MatrixWrapper;
typedef typename Base::ActualMatrixType ActualMatrixType;
@@ -210,31 +211,14 @@ public:
RowMajorWrapper,
typename MatrixWrapper::template ConstSelfAdjointViewReturnType<UpLo>::Type
>::type SelfAdjointWrapper;
+
m_iterations = Base::maxIterations();
m_error = Base::m_tolerance;
- for(Index j=0; j<b.cols(); ++j)
- {
- m_iterations = Base::maxIterations();
- m_error = Base::m_tolerance;
-
- typename Dest::ColXpr xj(x,j);
- RowMajorWrapper row_mat(matrix());
- internal::conjugate_gradient(SelfAdjointWrapper(row_mat), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error);
- }
-
- m_isInitialized = true;
+ RowMajorWrapper row_mat(matrix());
+ internal::conjugate_gradient(SelfAdjointWrapper(row_mat), b, x, Base::m_preconditioner, m_iterations, m_error);
m_info = m_error <= Base::m_tolerance ? Success : NoConvergence;
}
-
- /** \internal */
- using Base::_solve_impl;
- template<typename Rhs,typename Dest>
- void _solve_impl(const MatrixBase<Rhs>& b, Dest& x) const
- {
- x.setZero();
- _solve_with_guess_impl(b.derived(),x);
- }
protected:
diff --git a/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h b/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h
index e45c272b4..7803fd817 100644
--- a/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h
+++ b/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h
@@ -14,8 +14,8 @@
#include <vector>
#include <list>
-namespace Eigen {
-/**
+namespace Eigen {
+/**
* \brief Modified Incomplete Cholesky with dual threshold
*
* References : C-J. Lin and J. J. Moré, Incomplete Cholesky Factorizations with
@@ -41,28 +41,22 @@ namespace Eigen {
* the info() method, then you can either increase the initial shift, or better use another preconditioning technique.
*
*/
-template <typename Scalar, int _UpLo = Lower, typename _OrderingType =
-#ifndef EIGEN_MPL2_ONLY
-AMDOrdering<int>
-#else
-NaturalOrdering<int>
-#endif
->
+template <typename Scalar, int _UpLo = Lower, typename _OrderingType = AMDOrdering<int> >
class IncompleteCholesky : public SparseSolverBase<IncompleteCholesky<Scalar,_UpLo,_OrderingType> >
{
protected:
typedef SparseSolverBase<IncompleteCholesky<Scalar,_UpLo,_OrderingType> > Base;
using Base::m_isInitialized;
public:
- typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
typedef _OrderingType OrderingType;
typedef typename OrderingType::PermutationType PermutationType;
- typedef typename PermutationType::StorageIndex StorageIndex;
+ typedef typename PermutationType::StorageIndex StorageIndex;
typedef SparseMatrix<Scalar,ColMajor,StorageIndex> FactorType;
typedef Matrix<Scalar,Dynamic,1> VectorSx;
typedef Matrix<RealScalar,Dynamic,1> VectorRx;
typedef Matrix<StorageIndex,Dynamic, 1> VectorIx;
- typedef std::vector<std::list<StorageIndex> > VectorList;
+ typedef std::vector<std::list<StorageIndex> > VectorList;
enum { UpLo = _UpLo };
enum {
ColsAtCompileTime = Dynamic,
@@ -76,22 +70,22 @@ class IncompleteCholesky : public SparseSolverBase<IncompleteCholesky<Scalar,_Up
*
* \sa IncompleteCholesky(const MatrixType&)
*/
- IncompleteCholesky() : m_initialShift(1e-3),m_factorizationIsOk(false) {}
-
+ IncompleteCholesky() : m_initialShift(1e-3),m_analysisIsOk(false),m_factorizationIsOk(false) {}
+
/** Constructor computing the incomplete factorization for the given matrix \a matrix.
*/
template<typename MatrixType>
- IncompleteCholesky(const MatrixType& matrix) : m_initialShift(1e-3),m_factorizationIsOk(false)
+ IncompleteCholesky(const MatrixType& matrix) : m_initialShift(1e-3),m_analysisIsOk(false),m_factorizationIsOk(false)
{
compute(matrix);
}
-
+
/** \returns number of rows of the factored matrix */
- Index rows() const { return m_L.rows(); }
-
+ EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_L.rows(); }
+
/** \returns number of columns of the factored matrix */
- Index cols() const { return m_L.cols(); }
-
+ EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_L.cols(); }
+
/** \brief Reports whether previous computation was successful.
*
@@ -106,19 +100,19 @@ class IncompleteCholesky : public SparseSolverBase<IncompleteCholesky<Scalar,_Up
eigen_assert(m_isInitialized && "IncompleteCholesky is not initialized.");
return m_info;
}
-
+
/** \brief Set the initial shift parameter \f$ \sigma \f$.
*/
void setInitialShift(RealScalar shift) { m_initialShift = shift; }
-
+
/** \brief Computes the fill reducing permutation vector using the sparsity pattern of \a mat
*/
template<typename MatrixType>
void analyzePattern(const MatrixType& mat)
{
- OrderingType ord;
+ OrderingType ord;
PermutationType pinv;
- ord(mat.template selfadjointView<UpLo>(), pinv);
+ ord(mat.template selfadjointView<UpLo>(), pinv);
if(pinv.size()>0) m_perm = pinv.inverse();
else m_perm.resize(0);
m_L.resize(mat.rows(), mat.cols());
@@ -126,7 +120,7 @@ class IncompleteCholesky : public SparseSolverBase<IncompleteCholesky<Scalar,_Up
m_isInitialized = true;
m_info = Success;
}
-
+
/** \brief Performs the numerical factorization of the input matrix \a mat
*
* The method analyzePattern() or compute() must have been called beforehand
@@ -136,7 +130,7 @@ class IncompleteCholesky : public SparseSolverBase<IncompleteCholesky<Scalar,_Up
*/
template<typename MatrixType>
void factorize(const MatrixType& mat);
-
+
/** Computes or re-computes the incomplete Cholesky factorization of the input matrix \a mat
*
* It is a shortcut for a sequential call to the analyzePattern() and factorize() methods.
@@ -149,7 +143,7 @@ class IncompleteCholesky : public SparseSolverBase<IncompleteCholesky<Scalar,_Up
analyzePattern(mat);
factorize(mat);
}
-
+
// internal
template<typename Rhs, typename Dest>
void _solve_impl(const Rhs& b, Dest& x) const
@@ -176,16 +170,16 @@ class IncompleteCholesky : public SparseSolverBase<IncompleteCholesky<Scalar,_Up
protected:
FactorType m_L; // The lower part stored in CSC
- VectorRx m_scale; // The vector for scaling the matrix
+ VectorRx m_scale; // The vector for scaling the matrix
RealScalar m_initialShift; // The initial shift parameter
- bool m_analysisIsOk;
- bool m_factorizationIsOk;
+ bool m_analysisIsOk;
+ bool m_factorizationIsOk;
ComputationInfo m_info;
- PermutationType m_perm;
+ PermutationType m_perm;
private:
- inline void updateList(Ref<const VectorIx> colPtr, Ref<VectorIx> rowIdx, Ref<VectorSx> vals, const Index& col, const Index& jk, VectorIx& firstElt, VectorList& listCol);
-};
+ inline void updateList(Ref<const VectorIx> colPtr, Ref<VectorIx> rowIdx, Ref<VectorSx> vals, const Index& col, const Index& jk, VectorIx& firstElt, VectorList& listCol);
+};
// Based on the following paper:
// C-J. Lin and J. J. Moré, Incomplete Cholesky Factorizations with
@@ -196,10 +190,10 @@ template<typename _MatrixType>
void IncompleteCholesky<Scalar,_UpLo, OrderingType>::factorize(const _MatrixType& mat)
{
using std::sqrt;
- eigen_assert(m_analysisIsOk && "analyzePattern() should be called first");
-
+ eigen_assert(m_analysisIsOk && "analyzePattern() should be called first");
+
// Dropping strategy : Keep only the p largest elements per column, where p is the number of elements in the column of the original matrix. Other strategies will be added
-
+
// Apply the fill-reducing permutation computed in analyzePattern()
if (m_perm.rows() == mat.rows() ) // To detect the null permutation
{
@@ -212,8 +206,8 @@ void IncompleteCholesky<Scalar,_UpLo, OrderingType>::factorize(const _MatrixType
{
m_L.template selfadjointView<Lower>() = mat.template selfadjointView<_UpLo>();
}
-
- Index n = m_L.cols();
+
+ Index n = m_L.cols();
Index nnz = m_L.nonZeros();
Map<VectorSx> vals(m_L.valuePtr(), nnz); //values
Map<VectorIx> rowIdx(m_L.innerIndexPtr(), nnz); //Row indices
@@ -225,9 +219,9 @@ void IncompleteCholesky<Scalar,_UpLo, OrderingType>::factorize(const _MatrixType
VectorIx col_pattern(n);
col_pattern.fill(-1);
StorageIndex col_nnz;
-
-
- // Computes the scaling factors
+
+
+ // Computes the scaling factors
m_scale.resize(n);
m_scale.setZero();
for (Index j = 0; j < n; j++)
@@ -237,7 +231,7 @@ void IncompleteCholesky<Scalar,_UpLo, OrderingType>::factorize(const _MatrixType
if(rowIdx[k]!=j)
m_scale(rowIdx[k]) += numext::abs2(vals(k));
}
-
+
m_scale = m_scale.cwiseSqrt().cwiseSqrt();
for (Index j = 0; j < n; ++j)
@@ -247,8 +241,8 @@ void IncompleteCholesky<Scalar,_UpLo, OrderingType>::factorize(const _MatrixType
m_scale(j) = 1;
// TODO disable scaling if not needed, i.e., if it is roughly uniform? (this will make solve() faster)
-
- // Scale and compute the shift for the matrix
+
+ // Scale and compute the shift for the matrix
RealScalar mindiag = NumTraits<RealScalar>::highest();
for (Index j = 0; j < n; j++)
{
@@ -259,7 +253,7 @@ void IncompleteCholesky<Scalar,_UpLo, OrderingType>::factorize(const _MatrixType
}
FactorType L_save = m_L;
-
+
RealScalar shift = 0;
if(mindiag <= RealScalar(0.))
shift = m_initialShift - mindiag;
@@ -381,7 +375,7 @@ inline void IncompleteCholesky<Scalar,_UpLo, OrderingType>::updateList(Ref<const
if (jk < colPtr(col+1) )
{
Index p = colPtr(col+1) - jk;
- Index minpos;
+ Index minpos;
rowIdx.segment(jk,p).minCoeff(&minpos);
minpos += jk;
if (rowIdx(minpos) != rowIdx(jk))
@@ -395,6 +389,6 @@ inline void IncompleteCholesky<Scalar,_UpLo, OrderingType>::updateList(Ref<const
}
}
-} // end namespace Eigen
+} // end namespace Eigen
#endif
diff --git a/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h b/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
index 338e6f10a..cdcf709eb 100644
--- a/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
+++ b/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
@@ -12,19 +12,19 @@
#define EIGEN_INCOMPLETE_LUT_H
-namespace Eigen {
+namespace Eigen {
namespace internal {
-
+
/** \internal
- * Compute a quick-sort split of a vector
+ * Compute a quick-sort split of a vector
* On output, the vector row is permuted such that its elements satisfy
* abs(row(i)) >= abs(row(ncut)) if i<ncut
- * abs(row(i)) <= abs(row(ncut)) if i>ncut
+ * abs(row(i)) <= abs(row(ncut)) if i>ncut
* \param row The vector of values
* \param ind The array of index for the elements in @p row
* \param ncut The number of largest elements to keep
- **/
+ **/
template <typename VectorV, typename VectorI>
Index QuickSplit(VectorV &row, VectorI &ind, Index ncut)
{
@@ -34,15 +34,15 @@ Index QuickSplit(VectorV &row, VectorI &ind, Index ncut)
Index mid;
Index n = row.size(); /* length of the vector */
Index first, last ;
-
+
ncut--; /* to fit the zero-based indices */
- first = 0;
- last = n-1;
+ first = 0;
+ last = n-1;
if (ncut < first || ncut > last ) return 0;
-
+
do {
- mid = first;
- RealScalar abskey = abs(row(mid));
+ mid = first;
+ RealScalar abskey = abs(row(mid));
for (Index j = first + 1; j <= last; j++) {
if ( abs(row(j)) > abskey) {
++mid;
@@ -53,12 +53,12 @@ Index QuickSplit(VectorV &row, VectorI &ind, Index ncut)
/* Interchange for the pivot element */
swap(row(mid), row(first));
swap(ind(mid), ind(first));
-
+
if (mid > ncut) last = mid - 1;
- else if (mid < ncut ) first = mid + 1;
+ else if (mid < ncut ) first = mid + 1;
} while (mid != ncut );
-
- return 0; /* mid is equal to ncut */
+
+ return 0; /* mid is equal to ncut */
}
}// end namespace internal
@@ -71,23 +71,23 @@ Index QuickSplit(VectorV &row, VectorI &ind, Index ncut)
*
* During the numerical factorization, two dropping rules are used :
* 1) any element whose magnitude is less than some tolerance is dropped.
- * This tolerance is obtained by multiplying the input tolerance @p droptol
+ * This tolerance is obtained by multiplying the input tolerance @p droptol
* by the average magnitude of all the original elements in the current row.
- * 2) After the elimination of the row, only the @p fill largest elements in
- * the L part and the @p fill largest elements in the U part are kept
- * (in addition to the diagonal element ). Note that @p fill is computed from
- * the input parameter @p fillfactor which is used the ratio to control the fill_in
+ * 2) After the elimination of the row, only the @p fill largest elements in
+ * the L part and the @p fill largest elements in the U part are kept
+ * (in addition to the diagonal element ). Note that @p fill is computed from
+ * the input parameter @p fillfactor which is used the ratio to control the fill_in
* relatively to the initial number of nonzero elements.
- *
+ *
* The two extreme cases are when @p droptol=0 (to keep all the @p fill*2 largest elements)
- * and when @p fill=n/2 with @p droptol being different to zero.
- *
- * References : Yousef Saad, ILUT: A dual threshold incomplete LU factorization,
+ * and when @p fill=n/2 with @p droptol being different to zero.
+ *
+ * References : Yousef Saad, ILUT: A dual threshold incomplete LU factorization,
* Numerical Linear Algebra with Applications, 1(4), pp 387-402, 1994.
- *
+ *
* NOTE : The following implementation is derived from the ILUT implementation
- * in the SPARSKIT package, Copyright (C) 2005, the Regents of the University of Minnesota
- * released under the terms of the GNU LGPL:
+ * in the SPARSKIT package, Copyright (C) 2005, the Regents of the University of Minnesota
+ * released under the terms of the GNU LGPL:
* http://www-users.cs.umn.edu/~saad/software/SPARSKIT/README
* However, Yousef Saad gave us permission to relicense his ILUT code to MPL2.
* See the Eigen mailing list archive, thread: ILUT, date: July 8, 2012:
@@ -115,28 +115,28 @@ class IncompleteLUT : public SparseSolverBase<IncompleteLUT<_Scalar, _StorageInd
};
public:
-
+
IncompleteLUT()
: m_droptol(NumTraits<Scalar>::dummy_precision()), m_fillfactor(10),
m_analysisIsOk(false), m_factorizationIsOk(false)
{}
-
+
template<typename MatrixType>
explicit IncompleteLUT(const MatrixType& mat, const RealScalar& droptol=NumTraits<Scalar>::dummy_precision(), int fillfactor = 10)
: m_droptol(droptol),m_fillfactor(fillfactor),
m_analysisIsOk(false),m_factorizationIsOk(false)
{
eigen_assert(fillfactor != 0);
- compute(mat);
+ compute(mat);
}
-
- Index rows() const { return m_lu.rows(); }
-
- Index cols() const { return m_lu.cols(); }
+
+ EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_lu.rows(); }
+
+ EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_lu.cols(); }
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the matrix.appears to be negative.
*/
ComputationInfo info() const
@@ -144,36 +144,36 @@ class IncompleteLUT : public SparseSolverBase<IncompleteLUT<_Scalar, _StorageInd
eigen_assert(m_isInitialized && "IncompleteLUT is not initialized.");
return m_info;
}
-
+
template<typename MatrixType>
void analyzePattern(const MatrixType& amat);
-
+
template<typename MatrixType>
void factorize(const MatrixType& amat);
-
+
/**
* Compute an incomplete LU factorization with dual threshold on the matrix mat
* No pivoting is done in this version
- *
+ *
**/
template<typename MatrixType>
IncompleteLUT& compute(const MatrixType& amat)
{
- analyzePattern(amat);
+ analyzePattern(amat);
factorize(amat);
return *this;
}
- void setDroptol(const RealScalar& droptol);
- void setFillfactor(int fillfactor);
-
+ void setDroptol(const RealScalar& droptol);
+ void setFillfactor(int fillfactor);
+
template<typename Rhs, typename Dest>
void _solve_impl(const Rhs& b, Dest& x) const
{
x = m_Pinv * b;
x = m_lu.template triangularView<UnitLower>().solve(x);
x = m_lu.template triangularView<Upper>().solve(x);
- x = m_P * x;
+ x = m_P * x;
}
protected:
@@ -200,22 +200,22 @@ protected:
/**
* Set control parameter droptol
- * \param droptol Drop any element whose magnitude is less than this tolerance
- **/
+ * \param droptol Drop any element whose magnitude is less than this tolerance
+ **/
template<typename Scalar, typename StorageIndex>
void IncompleteLUT<Scalar,StorageIndex>::setDroptol(const RealScalar& droptol)
{
- this->m_droptol = droptol;
+ this->m_droptol = droptol;
}
/**
* Set control parameter fillfactor
- * \param fillfactor This is used to compute the number @p fill_in of largest elements to keep on each row.
- **/
+ * \param fillfactor This is used to compute the number @p fill_in of largest elements to keep on each row.
+ **/
template<typename Scalar, typename StorageIndex>
void IncompleteLUT<Scalar,StorageIndex>::setFillfactor(int fillfactor)
{
- this->m_fillfactor = fillfactor;
+ this->m_fillfactor = fillfactor;
}
template <typename Scalar, typename StorageIndex>
@@ -225,24 +225,15 @@ void IncompleteLUT<Scalar,StorageIndex>::analyzePattern(const _MatrixType& amat)
// Compute the Fill-reducing permutation
// Since ILUT does not perform any numerical pivoting,
// it is highly preferable to keep the diagonal through symmetric permutations.
-#ifndef EIGEN_MPL2_ONLY
// To this end, let's symmetrize the pattern and perform AMD on it.
SparseMatrix<Scalar,ColMajor, StorageIndex> mat1 = amat;
SparseMatrix<Scalar,ColMajor, StorageIndex> mat2 = amat.transpose();
// FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice.
- // on the other hand for a really non-symmetric pattern, mat2*mat1 should be prefered...
+ // on the other hand for a really non-symmetric pattern, mat2*mat1 should be preferred...
SparseMatrix<Scalar,ColMajor, StorageIndex> AtA = mat2 + mat1;
AMDOrdering<StorageIndex> ordering;
ordering(AtA,m_P);
m_Pinv = m_P.inverse(); // cache the inverse permutation
-#else
- // If AMD is not available, (MPL2-only), then let's use the slower COLAMD routine.
- SparseMatrix<Scalar,ColMajor, StorageIndex> mat1 = amat;
- COLAMDOrdering<StorageIndex> ordering;
- ordering(mat1,m_Pinv);
- m_P = m_Pinv.inverse();
-#endif
-
m_analysisIsOk = true;
m_factorizationIsOk = false;
m_isInitialized = true;
diff --git a/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h b/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
index 7c2326eb7..28a0c5109 100644
--- a/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
+++ b/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
@@ -10,7 +10,7 @@
#ifndef EIGEN_ITERATIVE_SOLVER_BASE_H
#define EIGEN_ITERATIVE_SOLVER_BASE_H
-namespace Eigen {
+namespace Eigen {
namespace internal {
@@ -145,7 +145,7 @@ class IterativeSolverBase : public SparseSolverBase<Derived>
protected:
typedef SparseSolverBase<Derived> Base;
using Base::m_isInitialized;
-
+
public:
typedef typename internal::traits<Derived>::MatrixType MatrixType;
typedef typename internal::traits<Derived>::Preconditioner Preconditioner;
@@ -169,10 +169,10 @@ public:
}
/** Initialize the solver with matrix \a A for further \c Ax=b solving.
- *
+ *
* This constructor is a shortcut for the default constructor followed
* by a call to compute().
- *
+ *
* \warning this class stores a reference to the matrix A as well as some
* precomputed values that depend on it. Therefore, if \a A is changed
* this class becomes invalid. Call compute() to update it with the new
@@ -187,7 +187,7 @@ public:
}
~IterativeSolverBase() {}
-
+
/** Initializes the iterative solver for the sparsity pattern of the matrix \a A for further solving \c Ax=b problems.
*
* Currently, this function mostly calls analyzePattern on the preconditioner. In the future
@@ -203,7 +203,7 @@ public:
m_info = m_preconditioner.info();
return derived();
}
-
+
/** Initializes the iterative solver with the numerical values of the matrix \a A for further solving \c Ax=b problems.
*
* Currently, this function mostly calls factorize on the preconditioner.
@@ -216,7 +216,7 @@ public:
template<typename MatrixDerived>
Derived& factorize(const EigenBase<MatrixDerived>& A)
{
- eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
+ eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
grab(A.derived());
m_preconditioner.factorize(matrix());
m_factorizationIsOk = true;
@@ -247,16 +247,16 @@ public:
}
/** \internal */
- Index rows() const { return matrix().rows(); }
+ EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return matrix().rows(); }
/** \internal */
- Index cols() const { return matrix().cols(); }
+ EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return matrix().cols(); }
/** \returns the tolerance threshold used by the stopping criteria.
* \sa setTolerance()
*/
RealScalar tolerance() const { return m_tolerance; }
-
+
/** Sets the tolerance threshold used by the stopping criteria.
*
* This value is used as an upper bound to the relative residual error: |Ax-b|/|b|.
@@ -270,19 +270,19 @@ public:
/** \returns a read-write reference to the preconditioner for custom configuration. */
Preconditioner& preconditioner() { return m_preconditioner; }
-
+
/** \returns a read-only reference to the preconditioner. */
const Preconditioner& preconditioner() const { return m_preconditioner; }
/** \returns the max number of iterations.
- * It is either the value setted by setMaxIterations or, by default,
+ * It is either the value set by setMaxIterations or, by default,
* twice the number of columns of the matrix.
*/
Index maxIterations() const
{
return (m_maxIterations<0) ? 2*matrix().cols() : m_maxIterations;
}
-
+
/** Sets the max number of iterations.
* Default is twice the number of columns of the matrix.
*/
@@ -328,13 +328,13 @@ public:
eigen_assert(m_isInitialized && "IterativeSolverBase is not initialized.");
return m_info;
}
-
+
/** \internal */
template<typename Rhs, typename DestDerived>
- void _solve_impl(const Rhs& b, SparseMatrixBase<DestDerived> &aDest) const
+ void _solve_with_guess_impl(const Rhs& b, SparseMatrixBase<DestDerived> &aDest) const
{
eigen_assert(rows()==b.rows());
-
+
Index rhsCols = b.cols();
Index size = b.rows();
DestDerived& dest(aDest.derived());
@@ -344,15 +344,65 @@ public:
// We do not directly fill dest because sparse expressions have to be free of aliasing issue.
// For non square least-square problems, b and dest might not have the same size whereas they might alias each-other.
typename DestDerived::PlainObject tmp(cols(),rhsCols);
+ ComputationInfo global_info = Success;
for(Index k=0; k<rhsCols; ++k)
{
tb = b.col(k);
- tx = derived().solve(tb);
+ tx = dest.col(k);
+ derived()._solve_vector_with_guess_impl(tb,tx);
tmp.col(k) = tx.sparseView(0);
+
+ // The call to _solve_vector_with_guess_impl updates m_info, so if it failed for a previous column
+ // we need to restore it to the worst value.
+ if(m_info==NumericalIssue)
+ global_info = NumericalIssue;
+ else if(m_info==NoConvergence)
+ global_info = NoConvergence;
}
+ m_info = global_info;
dest.swap(tmp);
}
+ template<typename Rhs, typename DestDerived>
+ typename internal::enable_if<Rhs::ColsAtCompileTime!=1 && DestDerived::ColsAtCompileTime!=1>::type
+ _solve_with_guess_impl(const Rhs& b, MatrixBase<DestDerived> &aDest) const
+ {
+ eigen_assert(rows()==b.rows());
+
+ Index rhsCols = b.cols();
+ DestDerived& dest(aDest.derived());
+ ComputationInfo global_info = Success;
+ for(Index k=0; k<rhsCols; ++k)
+ {
+ typename DestDerived::ColXpr xk(dest,k);
+ typename Rhs::ConstColXpr bk(b,k);
+ derived()._solve_vector_with_guess_impl(bk,xk);
+
+ // The call to _solve_vector_with_guess updates m_info, so if it failed for a previous column
+ // we need to restore it to the worst value.
+ if(m_info==NumericalIssue)
+ global_info = NumericalIssue;
+ else if(m_info==NoConvergence)
+ global_info = NoConvergence;
+ }
+ m_info = global_info;
+ }
+
+ template<typename Rhs, typename DestDerived>
+ typename internal::enable_if<Rhs::ColsAtCompileTime==1 || DestDerived::ColsAtCompileTime==1>::type
+ _solve_with_guess_impl(const Rhs& b, MatrixBase<DestDerived> &dest) const
+ {
+ derived()._solve_vector_with_guess_impl(b,dest.derived());
+ }
+
+ /** \internal default initial guess = 0 */
+ template<typename Rhs,typename Dest>
+ void _solve_impl(const Rhs& b, Dest& x) const
+ {
+ x.setZero();
+ derived()._solve_with_guess_impl(b,x);
+ }
+
protected:
void init()
{
@@ -370,19 +420,19 @@ protected:
{
return m_matrixWrapper.matrix();
}
-
+
template<typename InputType>
void grab(const InputType &A)
{
m_matrixWrapper.grab(A);
}
-
+
MatrixWrapper m_matrixWrapper;
Preconditioner m_preconditioner;
Index m_maxIterations;
RealScalar m_tolerance;
-
+
mutable RealScalar m_error;
mutable Index m_iterations;
mutable ComputationInfo m_info;
diff --git a/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h b/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h
index 0aea0e099..203fd0ec6 100644
--- a/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h
+++ b/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h
@@ -182,32 +182,14 @@ public:
/** \internal */
template<typename Rhs,typename Dest>
- void _solve_with_guess_impl(const Rhs& b, Dest& x) const
+ void _solve_vector_with_guess_impl(const Rhs& b, Dest& x) const
{
m_iterations = Base::maxIterations();
m_error = Base::m_tolerance;
- for(Index j=0; j<b.cols(); ++j)
- {
- m_iterations = Base::maxIterations();
- m_error = Base::m_tolerance;
-
- typename Dest::ColXpr xj(x,j);
- internal::least_square_conjugate_gradient(matrix(), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error);
- }
-
- m_isInitialized = true;
+ internal::least_square_conjugate_gradient(matrix(), b, x, Base::m_preconditioner, m_iterations, m_error);
m_info = m_error <= Base::m_tolerance ? Success : NoConvergence;
}
-
- /** \internal */
- using Base::_solve_impl;
- template<typename Rhs,typename Dest>
- void _solve_impl(const MatrixBase<Rhs>& b, Dest& x) const
- {
- x.setZero();
- _solve_with_guess_impl(b.derived(),x);
- }
};
diff --git a/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h b/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h
index 0ace45177..7b8965754 100644
--- a/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h
+++ b/examples/ThirdPartyLibs/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h
@@ -13,7 +13,7 @@
namespace Eigen {
template<typename Decomposition, typename RhsType, typename GuessType> class SolveWithGuess;
-
+
/** \class SolveWithGuess
* \ingroup IterativeLinearSolvers_Module
*
@@ -45,13 +45,15 @@ public:
typedef typename internal::traits<SolveWithGuess>::PlainObject PlainObject;
typedef typename internal::generic_xpr_base<SolveWithGuess<Decomposition,RhsType,GuessType>, MatrixXpr, typename internal::traits<RhsType>::StorageKind>::type Base;
typedef typename internal::ref_selector<SolveWithGuess>::type Nested;
-
+
SolveWithGuess(const Decomposition &dec, const RhsType &rhs, const GuessType &guess)
: m_dec(dec), m_rhs(rhs), m_guess(guess)
{}
-
- EIGEN_DEVICE_FUNC Index rows() const { return m_dec.cols(); }
- EIGEN_DEVICE_FUNC Index cols() const { return m_rhs.cols(); }
+
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ Index rows() const EIGEN_NOEXCEPT { return m_dec.cols(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ Index cols() const EIGEN_NOEXCEPT { return m_rhs.cols(); }
EIGEN_DEVICE_FUNC const Decomposition& dec() const { return m_dec; }
EIGEN_DEVICE_FUNC const RhsType& rhs() const { return m_rhs; }
@@ -61,7 +63,7 @@ protected:
const Decomposition &m_dec;
const RhsType &m_rhs;
const GuessType &m_guess;
-
+
private:
Scalar coeff(Index row, Index col) const;
Scalar coeff(Index i) const;
@@ -85,8 +87,8 @@ struct evaluator<SolveWithGuess<Decomposition,RhsType, GuessType> >
m_result = solve.guess();
solve.dec()._solve_with_guess_impl(solve.rhs(), m_result);
}
-
-protected:
+
+protected:
PlainObject m_result;
};
@@ -108,7 +110,7 @@ struct Assignment<DstXprType, SolveWithGuess<DecType,RhsType,GuessType>, interna
}
};
-} // end namepsace internal
+} // end namespace internal
} // end namespace Eigen
diff --git a/examples/ThirdPartyLibs/Eigen/src/Jacobi/Jacobi.h b/examples/ThirdPartyLibs/Eigen/src/Jacobi/Jacobi.h
index af1228cb8..76668a574 100644
--- a/examples/ThirdPartyLibs/Eigen/src/Jacobi/Jacobi.h
+++ b/examples/ThirdPartyLibs/Eigen/src/Jacobi/Jacobi.h
@@ -11,7 +11,7 @@
#ifndef EIGEN_JACOBI_H
#define EIGEN_JACOBI_H
-namespace Eigen {
+namespace Eigen {
/** \ingroup Jacobi_Module
* \jacobi_module
@@ -73,13 +73,13 @@ template<typename Scalar> class JacobiRotation
bool makeJacobi(const RealScalar& x, const Scalar& y, const RealScalar& z);
EIGEN_DEVICE_FUNC
- void makeGivens(const Scalar& p, const Scalar& q, Scalar* z=0);
+ void makeGivens(const Scalar& p, const Scalar& q, Scalar* r=0);
protected:
EIGEN_DEVICE_FUNC
- void makeGivens(const Scalar& p, const Scalar& q, Scalar* z, internal::true_type);
+ void makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::true_type);
EIGEN_DEVICE_FUNC
- void makeGivens(const Scalar& p, const Scalar& q, Scalar* z, internal::false_type);
+ void makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::false_type);
Scalar m_c, m_s;
};
@@ -90,11 +90,12 @@ template<typename Scalar> class JacobiRotation
* \sa MatrixBase::makeJacobi(const MatrixBase<Derived>&, Index, Index), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
*/
template<typename Scalar>
+EIGEN_DEVICE_FUNC
bool JacobiRotation<Scalar>::makeJacobi(const RealScalar& x, const Scalar& y, const RealScalar& z)
{
using std::sqrt;
using std::abs;
- typedef typename NumTraits<Scalar>::Real RealScalar;
+
RealScalar deno = RealScalar(2)*abs(y);
if(deno < (std::numeric_limits<RealScalar>::min)())
{
@@ -134,6 +135,7 @@ bool JacobiRotation<Scalar>::makeJacobi(const RealScalar& x, const Scalar& y, co
*/
template<typename Scalar>
template<typename Derived>
+EIGEN_DEVICE_FUNC
inline bool JacobiRotation<Scalar>::makeJacobi(const MatrixBase<Derived>& m, Index p, Index q)
{
return makeJacobi(numext::real(m.coeff(p,p)), m.coeff(p,q), numext::real(m.coeff(q,q)));
@@ -143,7 +145,7 @@ inline bool JacobiRotation<Scalar>::makeJacobi(const MatrixBase<Derived>& m, Ind
* \f$ V = \left ( \begin{array}{c} p \\ q \end{array} \right )\f$ yields:
* \f$ G^* V = \left ( \begin{array}{c} r \\ 0 \end{array} \right )\f$.
*
- * The value of \a z is returned if \a z is not null (the default is null).
+ * The value of \a r is returned if \a r is not null (the default is null).
* Also note that G is built such that the cosine is always real.
*
* Example: \include Jacobi_makeGivens.cpp
@@ -156,20 +158,22 @@ inline bool JacobiRotation<Scalar>::makeJacobi(const MatrixBase<Derived>& m, Ind
* \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
*/
template<typename Scalar>
-void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* z)
+EIGEN_DEVICE_FUNC
+void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* r)
{
- makeGivens(p, q, z, typename internal::conditional<NumTraits<Scalar>::IsComplex, internal::true_type, internal::false_type>::type());
+ makeGivens(p, q, r, typename internal::conditional<NumTraits<Scalar>::IsComplex, internal::true_type, internal::false_type>::type());
}
// specialization for complexes
template<typename Scalar>
+EIGEN_DEVICE_FUNC
void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::true_type)
{
using std::sqrt;
using std::abs;
using numext::conj;
-
+
if(q==Scalar(0))
{
m_c = numext::real(p)<0 ? Scalar(-1) : Scalar(1);
@@ -223,6 +227,7 @@ void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar
// specialization for reals
template<typename Scalar>
+EIGEN_DEVICE_FUNC
void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::false_type)
{
using std::sqrt;
@@ -268,7 +273,7 @@ void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar
namespace internal {
/** \jacobi_module
- * Applies the clock wise 2D rotation \a j to the set of 2D vectors of cordinates \a x and \a y:
+ * Applies the clock wise 2D rotation \a j to the set of 2D vectors of coordinates \a x and \a y:
* \f$ \left ( \begin{array}{cc} x \\ y \end{array} \right ) = J \left ( \begin{array}{cc} x \\ y \end{array} \right ) \f$
*
* \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
@@ -286,6 +291,7 @@ void apply_rotation_in_the_plane(DenseBase<VectorX>& xpr_x, DenseBase<VectorY>&
*/
template<typename Derived>
template<typename OtherScalar>
+EIGEN_DEVICE_FUNC
inline void MatrixBase<Derived>::applyOnTheLeft(Index p, Index q, const JacobiRotation<OtherScalar>& j)
{
RowXpr x(this->row(p));
@@ -301,6 +307,7 @@ inline void MatrixBase<Derived>::applyOnTheLeft(Index p, Index q, const JacobiRo
*/
template<typename Derived>
template<typename OtherScalar>
+EIGEN_DEVICE_FUNC
inline void MatrixBase<Derived>::applyOnTheRight(Index p, Index q, const JacobiRotation<OtherScalar>& j)
{
ColXpr x(this->col(p));
@@ -314,7 +321,8 @@ template<typename Scalar, typename OtherScalar,
int SizeAtCompileTime, int MinAlignment, bool Vectorizable>
struct apply_rotation_in_the_plane_selector
{
- static inline void run(Scalar *x, Index incrx, Scalar *y, Index incry, Index size, OtherScalar c, OtherScalar s)
+ static EIGEN_DEVICE_FUNC
+ inline void run(Scalar *x, Index incrx, Scalar *y, Index incry, Index size, OtherScalar c, OtherScalar s)
{
for(Index i=0; i<size; ++i)
{
@@ -441,10 +449,11 @@ struct apply_rotation_in_the_plane_selector<Scalar,OtherScalar,SizeAtCompileTime
};
template<typename VectorX, typename VectorY, typename OtherScalar>
+EIGEN_DEVICE_FUNC
void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(DenseBase<VectorX>& xpr_x, DenseBase<VectorY>& xpr_y, const JacobiRotation<OtherScalar>& j)
{
typedef typename VectorX::Scalar Scalar;
- const bool Vectorizable = (VectorX::Flags & VectorY::Flags & PacketAccessBit)
+ const bool Vectorizable = (int(VectorX::Flags) & int(VectorY::Flags) & PacketAccessBit)
&& (int(packet_traits<Scalar>::size) == int(packet_traits<OtherScalar>::size));
eigen_assert(xpr_x.size() == xpr_y.size());
@@ -454,7 +463,7 @@ void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(DenseBase<VectorX>& xpr_x
Scalar* EIGEN_RESTRICT x = &xpr_x.derived().coeffRef(0);
Scalar* EIGEN_RESTRICT y = &xpr_y.derived().coeffRef(0);
-
+
OtherScalar c = j.c();
OtherScalar s = j.s();
if (c==OtherScalar(1) && s==OtherScalar(0))
diff --git a/examples/ThirdPartyLibs/Eigen/src/KLUSupport/KLUSupport.h b/examples/ThirdPartyLibs/Eigen/src/KLUSupport/KLUSupport.h
new file mode 100644
index 000000000..215db35b0
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/KLUSupport/KLUSupport.h
@@ -0,0 +1,358 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2017 Kyle Macfarlan <kyle.macfarlan@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_KLUSUPPORT_H
+#define EIGEN_KLUSUPPORT_H
+
+namespace Eigen {
+
+/* TODO extract L, extract U, compute det, etc... */
+
+/** \ingroup KLUSupport_Module
+ * \brief A sparse LU factorization and solver based on KLU
+ *
+ * This class allows to solve for A.X = B sparse linear problems via a LU factorization
+ * using the KLU library. The sparse matrix A must be squared and full rank.
+ * The vectors or matrices X and B can be either dense or sparse.
+ *
+ * \warning The input matrix A should be in a \b compressed and \b column-major form.
+ * Otherwise an expensive copy will be made. You can call the inexpensive makeCompressed() to get a compressed matrix.
+ * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+ *
+ * \implsparsesolverconcept
+ *
+ * \sa \ref TutorialSparseSolverConcept, class UmfPackLU, class SparseLU
+ */
+
+
+inline int klu_solve(klu_symbolic *Symbolic, klu_numeric *Numeric, Index ldim, Index nrhs, double B [ ], klu_common *Common, double) {
+ return klu_solve(Symbolic, Numeric, internal::convert_index<int>(ldim), internal::convert_index<int>(nrhs), B, Common);
+}
+
+inline int klu_solve(klu_symbolic *Symbolic, klu_numeric *Numeric, Index ldim, Index nrhs, std::complex<double>B[], klu_common *Common, std::complex<double>) {
+ return klu_z_solve(Symbolic, Numeric, internal::convert_index<int>(ldim), internal::convert_index<int>(nrhs), &numext::real_ref(B[0]), Common);
+}
+
+inline int klu_tsolve(klu_symbolic *Symbolic, klu_numeric *Numeric, Index ldim, Index nrhs, double B[], klu_common *Common, double) {
+ return klu_tsolve(Symbolic, Numeric, internal::convert_index<int>(ldim), internal::convert_index<int>(nrhs), B, Common);
+}
+
+inline int klu_tsolve(klu_symbolic *Symbolic, klu_numeric *Numeric, Index ldim, Index nrhs, std::complex<double>B[], klu_common *Common, std::complex<double>) {
+ return klu_z_tsolve(Symbolic, Numeric, internal::convert_index<int>(ldim), internal::convert_index<int>(nrhs), &numext::real_ref(B[0]), 0, Common);
+}
+
+inline klu_numeric* klu_factor(int Ap [ ], int Ai [ ], double Ax [ ], klu_symbolic *Symbolic, klu_common *Common, double) {
+ return klu_factor(Ap, Ai, Ax, Symbolic, Common);
+}
+
+inline klu_numeric* klu_factor(int Ap[], int Ai[], std::complex<double> Ax[], klu_symbolic *Symbolic, klu_common *Common, std::complex<double>) {
+ return klu_z_factor(Ap, Ai, &numext::real_ref(Ax[0]), Symbolic, Common);
+}
+
+
+template<typename _MatrixType>
+class KLU : public SparseSolverBase<KLU<_MatrixType> >
+{
+ protected:
+ typedef SparseSolverBase<KLU<_MatrixType> > Base;
+ using Base::m_isInitialized;
+ public:
+ using Base::_solve_impl;
+ typedef _MatrixType MatrixType;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::StorageIndex StorageIndex;
+ typedef Matrix<Scalar,Dynamic,1> Vector;
+ typedef Matrix<int, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
+ typedef Matrix<int, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
+ typedef SparseMatrix<Scalar> LUMatrixType;
+ typedef SparseMatrix<Scalar,ColMajor,int> KLUMatrixType;
+ typedef Ref<const KLUMatrixType, StandardCompressedFormat> KLUMatrixRef;
+ enum {
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+ };
+
+ public:
+
+ KLU()
+ : m_dummy(0,0), mp_matrix(m_dummy)
+ {
+ init();
+ }
+
+ template<typename InputMatrixType>
+ explicit KLU(const InputMatrixType& matrix)
+ : mp_matrix(matrix)
+ {
+ init();
+ compute(matrix);
+ }
+
+ ~KLU()
+ {
+ if(m_symbolic) klu_free_symbolic(&m_symbolic,&m_common);
+ if(m_numeric) klu_free_numeric(&m_numeric,&m_common);
+ }
+
+ EIGEN_CONSTEXPR inline Index rows() const EIGEN_NOEXCEPT { return mp_matrix.rows(); }
+ EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { return mp_matrix.cols(); }
+
+ /** \brief Reports whether previous computation was successful.
+ *
+ * \returns \c Success if computation was successful,
+ * \c NumericalIssue if the matrix.appears to be negative.
+ */
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+ return m_info;
+ }
+#if 0 // not implemented yet
+ inline const LUMatrixType& matrixL() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_l;
+ }
+
+ inline const LUMatrixType& matrixU() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_u;
+ }
+
+ inline const IntColVectorType& permutationP() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_p;
+ }
+
+ inline const IntRowVectorType& permutationQ() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_q;
+ }
+#endif
+ /** Computes the sparse Cholesky decomposition of \a matrix
+ * Note that the matrix should be column-major, and in compressed format for best performance.
+ * \sa SparseMatrix::makeCompressed().
+ */
+ template<typename InputMatrixType>
+ void compute(const InputMatrixType& matrix)
+ {
+ if(m_symbolic) klu_free_symbolic(&m_symbolic, &m_common);
+ if(m_numeric) klu_free_numeric(&m_numeric, &m_common);
+ grab(matrix.derived());
+ analyzePattern_impl();
+ factorize_impl();
+ }
+
+ /** Performs a symbolic decomposition on the sparcity of \a matrix.
+ *
+ * This function is particularly useful when solving for several problems having the same structure.
+ *
+ * \sa factorize(), compute()
+ */
+ template<typename InputMatrixType>
+ void analyzePattern(const InputMatrixType& matrix)
+ {
+ if(m_symbolic) klu_free_symbolic(&m_symbolic, &m_common);
+ if(m_numeric) klu_free_numeric(&m_numeric, &m_common);
+
+ grab(matrix.derived());
+
+ analyzePattern_impl();
+ }
+
+
+ /** Provides access to the control settings array used by KLU.
+ *
+ * See KLU documentation for details.
+ */
+ inline const klu_common& kluCommon() const
+ {
+ return m_common;
+ }
+
+ /** Provides access to the control settings array used by UmfPack.
+ *
+ * If this array contains NaN's, the default values are used.
+ *
+ * See KLU documentation for details.
+ */
+ inline klu_common& kluCommon()
+ {
+ return m_common;
+ }
+
+ /** Performs a numeric decomposition of \a matrix
+ *
+ * The given matrix must has the same sparcity than the matrix on which the pattern anylysis has been performed.
+ *
+ * \sa analyzePattern(), compute()
+ */
+ template<typename InputMatrixType>
+ void factorize(const InputMatrixType& matrix)
+ {
+ eigen_assert(m_analysisIsOk && "KLU: you must first call analyzePattern()");
+ if(m_numeric)
+ klu_free_numeric(&m_numeric,&m_common);
+
+ grab(matrix.derived());
+
+ factorize_impl();
+ }
+
+ /** \internal */
+ template<typename BDerived,typename XDerived>
+ bool _solve_impl(const MatrixBase<BDerived> &b, MatrixBase<XDerived> &x) const;
+
+#if 0 // not implemented yet
+ Scalar determinant() const;
+
+ void extractData() const;
+#endif
+
+ protected:
+
+ void init()
+ {
+ m_info = InvalidInput;
+ m_isInitialized = false;
+ m_numeric = 0;
+ m_symbolic = 0;
+ m_extractedDataAreDirty = true;
+
+ klu_defaults(&m_common);
+ }
+
+ void analyzePattern_impl()
+ {
+ m_info = InvalidInput;
+ m_analysisIsOk = false;
+ m_factorizationIsOk = false;
+ m_symbolic = klu_analyze(internal::convert_index<int>(mp_matrix.rows()),
+ const_cast<StorageIndex*>(mp_matrix.outerIndexPtr()), const_cast<StorageIndex*>(mp_matrix.innerIndexPtr()),
+ &m_common);
+ if (m_symbolic) {
+ m_isInitialized = true;
+ m_info = Success;
+ m_analysisIsOk = true;
+ m_extractedDataAreDirty = true;
+ }
+ }
+
+ void factorize_impl()
+ {
+
+ m_numeric = klu_factor(const_cast<StorageIndex*>(mp_matrix.outerIndexPtr()), const_cast<StorageIndex*>(mp_matrix.innerIndexPtr()), const_cast<Scalar*>(mp_matrix.valuePtr()),
+ m_symbolic, &m_common, Scalar());
+
+
+ m_info = m_numeric ? Success : NumericalIssue;
+ m_factorizationIsOk = m_numeric ? 1 : 0;
+ m_extractedDataAreDirty = true;
+ }
+
+ template<typename MatrixDerived>
+ void grab(const EigenBase<MatrixDerived> &A)
+ {
+ mp_matrix.~KLUMatrixRef();
+ ::new (&mp_matrix) KLUMatrixRef(A.derived());
+ }
+
+ void grab(const KLUMatrixRef &A)
+ {
+ if(&(A.derived()) != &mp_matrix)
+ {
+ mp_matrix.~KLUMatrixRef();
+ ::new (&mp_matrix) KLUMatrixRef(A);
+ }
+ }
+
+ // cached data to reduce reallocation, etc.
+#if 0 // not implemented yet
+ mutable LUMatrixType m_l;
+ mutable LUMatrixType m_u;
+ mutable IntColVectorType m_p;
+ mutable IntRowVectorType m_q;
+#endif
+
+ KLUMatrixType m_dummy;
+ KLUMatrixRef mp_matrix;
+
+ klu_numeric* m_numeric;
+ klu_symbolic* m_symbolic;
+ klu_common m_common;
+ mutable ComputationInfo m_info;
+ int m_factorizationIsOk;
+ int m_analysisIsOk;
+ mutable bool m_extractedDataAreDirty;
+
+ private:
+ KLU(const KLU& ) { }
+};
+
+#if 0 // not implemented yet
+template<typename MatrixType>
+void KLU<MatrixType>::extractData() const
+{
+ if (m_extractedDataAreDirty)
+ {
+ eigen_assert(false && "KLU: extractData Not Yet Implemented");
+
+ // get size of the data
+ int lnz, unz, rows, cols, nz_udiag;
+ umfpack_get_lunz(&lnz, &unz, &rows, &cols, &nz_udiag, m_numeric, Scalar());
+
+ // allocate data
+ m_l.resize(rows,(std::min)(rows,cols));
+ m_l.resizeNonZeros(lnz);
+
+ m_u.resize((std::min)(rows,cols),cols);
+ m_u.resizeNonZeros(unz);
+
+ m_p.resize(rows);
+ m_q.resize(cols);
+
+ // extract
+ umfpack_get_numeric(m_l.outerIndexPtr(), m_l.innerIndexPtr(), m_l.valuePtr(),
+ m_u.outerIndexPtr(), m_u.innerIndexPtr(), m_u.valuePtr(),
+ m_p.data(), m_q.data(), 0, 0, 0, m_numeric);
+
+ m_extractedDataAreDirty = false;
+ }
+}
+
+template<typename MatrixType>
+typename KLU<MatrixType>::Scalar KLU<MatrixType>::determinant() const
+{
+ eigen_assert(false && "KLU: extractData Not Yet Implemented");
+ return Scalar();
+}
+#endif
+
+template<typename MatrixType>
+template<typename BDerived,typename XDerived>
+bool KLU<MatrixType>::_solve_impl(const MatrixBase<BDerived> &b, MatrixBase<XDerived> &x) const
+{
+ Index rhsCols = b.cols();
+ EIGEN_STATIC_ASSERT((XDerived::Flags&RowMajorBit)==0, THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or analyzePattern()/factorize()");
+
+ x = b;
+ int info = klu_solve(m_symbolic, m_numeric, b.rows(), rhsCols, x.const_cast_derived().data(), const_cast<klu_common*>(&m_common), Scalar());
+
+ m_info = info!=0 ? Success : NumericalIssue;
+ return true;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_KLUSUPPORT_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/LU/Determinant.h b/examples/ThirdPartyLibs/Eigen/src/LU/Determinant.h
index d6a3c1e5a..3a41e6fcb 100644
--- a/examples/ThirdPartyLibs/Eigen/src/LU/Determinant.h
+++ b/examples/ThirdPartyLibs/Eigen/src/LU/Determinant.h
@@ -15,6 +15,7 @@ namespace Eigen {
namespace internal {
template<typename Derived>
+EIGEN_DEVICE_FUNC
inline const typename Derived::Scalar bruteforce_det3_helper
(const MatrixBase<Derived>& matrix, int a, int b, int c)
{
@@ -22,14 +23,6 @@ inline const typename Derived::Scalar bruteforce_det3_helper
* (matrix.coeff(1,b) * matrix.coeff(2,c) - matrix.coeff(1,c) * matrix.coeff(2,b));
}
-template<typename Derived>
-const typename Derived::Scalar bruteforce_det4_helper
-(const MatrixBase<Derived>& matrix, int j, int k, int m, int n)
-{
- return (matrix.coeff(j,0) * matrix.coeff(k,1) - matrix.coeff(k,0) * matrix.coeff(j,1))
- * (matrix.coeff(m,2) * matrix.coeff(n,3) - matrix.coeff(n,2) * matrix.coeff(m,3));
-}
-
template<typename Derived,
int DeterminantType = Derived::RowsAtCompileTime
> struct determinant_impl
@@ -44,7 +37,8 @@ template<typename Derived,
template<typename Derived> struct determinant_impl<Derived, 1>
{
- static inline typename traits<Derived>::Scalar run(const Derived& m)
+ static inline EIGEN_DEVICE_FUNC
+ typename traits<Derived>::Scalar run(const Derived& m)
{
return m.coeff(0,0);
}
@@ -52,7 +46,8 @@ template<typename Derived> struct determinant_impl<Derived, 1>
template<typename Derived> struct determinant_impl<Derived, 2>
{
- static inline typename traits<Derived>::Scalar run(const Derived& m)
+ static inline EIGEN_DEVICE_FUNC
+ typename traits<Derived>::Scalar run(const Derived& m)
{
return m.coeff(0,0) * m.coeff(1,1) - m.coeff(1,0) * m.coeff(0,1);
}
@@ -60,7 +55,8 @@ template<typename Derived> struct determinant_impl<Derived, 2>
template<typename Derived> struct determinant_impl<Derived, 3>
{
- static inline typename traits<Derived>::Scalar run(const Derived& m)
+ static inline EIGEN_DEVICE_FUNC
+ typename traits<Derived>::Scalar run(const Derived& m)
{
return bruteforce_det3_helper(m,0,1,2)
- bruteforce_det3_helper(m,1,0,2)
@@ -70,15 +66,34 @@ template<typename Derived> struct determinant_impl<Derived, 3>
template<typename Derived> struct determinant_impl<Derived, 4>
{
- static typename traits<Derived>::Scalar run(const Derived& m)
+ typedef typename traits<Derived>::Scalar Scalar;
+ static EIGEN_DEVICE_FUNC
+ Scalar run(const Derived& m)
+ {
+ Scalar d2_01 = det2(m, 0, 1);
+ Scalar d2_02 = det2(m, 0, 2);
+ Scalar d2_03 = det2(m, 0, 3);
+ Scalar d2_12 = det2(m, 1, 2);
+ Scalar d2_13 = det2(m, 1, 3);
+ Scalar d2_23 = det2(m, 2, 3);
+ Scalar d3_0 = det3(m, 1,d2_23, 2,d2_13, 3,d2_12);
+ Scalar d3_1 = det3(m, 0,d2_23, 2,d2_03, 3,d2_02);
+ Scalar d3_2 = det3(m, 0,d2_13, 1,d2_03, 3,d2_01);
+ Scalar d3_3 = det3(m, 0,d2_12, 1,d2_02, 2,d2_01);
+ return internal::pmadd(-m(0,3),d3_0, m(1,3)*d3_1) +
+ internal::pmadd(-m(2,3),d3_2, m(3,3)*d3_3);
+ }
+protected:
+ static EIGEN_DEVICE_FUNC
+ Scalar det2(const Derived& m, Index i0, Index i1)
+ {
+ return m(i0,0) * m(i1,1) - m(i1,0) * m(i0,1);
+ }
+
+ static EIGEN_DEVICE_FUNC
+ Scalar det3(const Derived& m, Index i0, const Scalar& d0, Index i1, const Scalar& d1, Index i2, const Scalar& d2)
{
- // trick by Martin Costabel to compute 4x4 det with only 30 muls
- return bruteforce_det4_helper(m,0,1,2,3)
- - bruteforce_det4_helper(m,0,2,1,3)
- + bruteforce_det4_helper(m,0,3,1,2)
- + bruteforce_det4_helper(m,1,2,0,3)
- - bruteforce_det4_helper(m,1,3,0,2)
- + bruteforce_det4_helper(m,2,3,0,1);
+ return internal::pmadd(m(i0,2), d0, internal::pmadd(-m(i1,2), d1, m(i2,2)*d2));
}
};
@@ -89,6 +104,7 @@ template<typename Derived> struct determinant_impl<Derived, 4>
* \returns the determinant of this matrix
*/
template<typename Derived>
+EIGEN_DEVICE_FUNC
inline typename internal::traits<Derived>::Scalar MatrixBase<Derived>::determinant() const
{
eigen_assert(rows() == cols());
diff --git a/examples/ThirdPartyLibs/Eigen/src/LU/FullPivLU.h b/examples/ThirdPartyLibs/Eigen/src/LU/FullPivLU.h
index ec61086d5..ba1749fa6 100644
--- a/examples/ThirdPartyLibs/Eigen/src/LU/FullPivLU.h
+++ b/examples/ThirdPartyLibs/Eigen/src/LU/FullPivLU.h
@@ -18,6 +18,7 @@ template<typename _MatrixType> struct traits<FullPivLU<_MatrixType> >
{
typedef MatrixXpr XprKind;
typedef SolverStorage StorageKind;
+ typedef int StorageIndex;
enum { Flags = 0 };
};
@@ -48,12 +49,12 @@ template<typename _MatrixType> struct traits<FullPivLU<_MatrixType> >
* The data of the LU decomposition can be directly accessed through the methods matrixLU(),
* permutationP(), permutationQ().
*
- * As an exemple, here is how the original matrix can be retrieved:
+ * As an example, here is how the original matrix can be retrieved:
* \include class_FullPivLU.cpp
* Output: \verbinclude class_FullPivLU.out
*
* This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism.
- *
+ *
* \sa MatrixBase::fullPivLu(), MatrixBase::determinant(), MatrixBase::inverse()
*/
template<typename _MatrixType> class FullPivLU
@@ -62,9 +63,9 @@ template<typename _MatrixType> class FullPivLU
public:
typedef _MatrixType MatrixType;
typedef SolverBase<FullPivLU> Base;
+ friend class SolverBase<FullPivLU>;
EIGEN_GENERIC_PUBLIC_INTERFACE(FullPivLU)
- // FIXME StorageIndex defined in EIGEN_GENERIC_PUBLIC_INTERFACE should be int
enum {
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
@@ -218,6 +219,7 @@ template<typename _MatrixType> class FullPivLU
return internal::image_retval<FullPivLU>(*this, originalMatrix);
}
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
/** \return a solution x to the equation Ax=b, where A is the matrix of which
* *this is the LU decomposition.
*
@@ -237,14 +239,10 @@ template<typename _MatrixType> class FullPivLU
*
* \sa TriangularView::solve(), kernel(), inverse()
*/
- // FIXME this is a copy-paste of the base-class member to add the isInitialized assertion.
template<typename Rhs>
inline const Solve<FullPivLU, Rhs>
- solve(const MatrixBase<Rhs>& b) const
- {
- eigen_assert(m_isInitialized && "LU is not initialized.");
- return Solve<FullPivLU, Rhs>(*this, b.derived());
- }
+ solve(const MatrixBase<Rhs>& b) const;
+ #endif
/** \returns an estimate of the reciprocal condition number of the matrix of which \c *this is
the LU decomposition.
@@ -320,7 +318,7 @@ template<typename _MatrixType> class FullPivLU
return m_usePrescribedThreshold ? m_prescribedThreshold
// this formula comes from experimenting (see "LU precision tuning" thread on the list)
// and turns out to be identical to Higham's formula used already in LDLt.
- : NumTraits<Scalar>::epsilon() * m_lu.diagonalSize();
+ : NumTraits<Scalar>::epsilon() * RealScalar(m_lu.diagonalSize());
}
/** \returns the rank of the matrix of which *this is the LU decomposition.
@@ -406,8 +404,10 @@ template<typename _MatrixType> class FullPivLU
MatrixType reconstructedMatrix() const;
- EIGEN_DEVICE_FUNC inline Index rows() const { return m_lu.rows(); }
- EIGEN_DEVICE_FUNC inline Index cols() const { return m_lu.cols(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index rows() const EIGEN_NOEXCEPT { return m_lu.rows(); }
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
+ inline Index cols() const EIGEN_NOEXCEPT { return m_lu.cols(); }
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
@@ -529,8 +529,8 @@ void FullPivLU<MatrixType>::computeInPlace()
m_nonzero_pivots = k;
for(Index i = k; i < size; ++i)
{
- m_rowsTranspositions.coeffRef(i) = i;
- m_colsTranspositions.coeffRef(i) = i;
+ m_rowsTranspositions.coeffRef(i) = internal::convert_index<StorageIndex>(i);
+ m_colsTranspositions.coeffRef(i) = internal::convert_index<StorageIndex>(i);
}
break;
}
@@ -541,8 +541,8 @@ void FullPivLU<MatrixType>::computeInPlace()
// Now that we've found the pivot, we need to apply the row/col swaps to
// bring it to the location (k,k).
- m_rowsTranspositions.coeffRef(k) = row_of_biggest_in_corner;
- m_colsTranspositions.coeffRef(k) = col_of_biggest_in_corner;
+ m_rowsTranspositions.coeffRef(k) = internal::convert_index<StorageIndex>(row_of_biggest_in_corner);
+ m_colsTranspositions.coeffRef(k) = internal::convert_index<StorageIndex>(col_of_biggest_in_corner);
if(k != row_of_biggest_in_corner) {
m_lu.row(k).swap(m_lu.row(row_of_biggest_in_corner));
++number_of_transpositions;
@@ -755,7 +755,6 @@ void FullPivLU<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const
const Index rows = this->rows(),
cols = this->cols(),
nonzero_pivots = this->rank();
- eigen_assert(rhs.rows() == rows);
const Index smalldim = (std::min)(rows, cols);
if(nonzero_pivots == 0)
@@ -805,7 +804,6 @@ void FullPivLU<_MatrixType>::_solve_impl_transposed(const RhsType &rhs, DstType
const Index rows = this->rows(), cols = this->cols(),
nonzero_pivots = this->rank();
- eigen_assert(rhs.rows() == cols);
const Index smalldim = (std::min)(rows, cols);
if(nonzero_pivots == 0)
@@ -819,29 +817,19 @@ void FullPivLU<_MatrixType>::_solve_impl_transposed(const RhsType &rhs, DstType
// Step 1
c = permutationQ().inverse() * rhs;
- if (Conjugate) {
- // Step 2
- m_lu.topLeftCorner(nonzero_pivots, nonzero_pivots)
- .template triangularView<Upper>()
- .adjoint()
- .solveInPlace(c.topRows(nonzero_pivots));
- // Step 3
- m_lu.topLeftCorner(smalldim, smalldim)
- .template triangularView<UnitLower>()
- .adjoint()
- .solveInPlace(c.topRows(smalldim));
- } else {
- // Step 2
- m_lu.topLeftCorner(nonzero_pivots, nonzero_pivots)
- .template triangularView<Upper>()
- .transpose()
- .solveInPlace(c.topRows(nonzero_pivots));
- // Step 3
- m_lu.topLeftCorner(smalldim, smalldim)
- .template triangularView<UnitLower>()
- .transpose()
- .solveInPlace(c.topRows(smalldim));
- }
+ // Step 2
+ m_lu.topLeftCorner(nonzero_pivots, nonzero_pivots)
+ .template triangularView<Upper>()
+ .transpose()
+ .template conjugateIf<Conjugate>()
+ .solveInPlace(c.topRows(nonzero_pivots));
+
+ // Step 3
+ m_lu.topLeftCorner(smalldim, smalldim)
+ .template triangularView<UnitLower>()
+ .transpose()
+ .template conjugateIf<Conjugate>()
+ .solveInPlace(c.topRows(smalldim));
// Step 4
PermutationPType invp = permutationP().inverse().eval();
diff --git a/examples/ThirdPartyLibs/Eigen/src/LU/InverseImpl.h b/examples/ThirdPartyLibs/Eigen/src/LU/InverseImpl.h
index 018f99b58..a40cefa9e 100644
--- a/examples/ThirdPartyLibs/Eigen/src/LU/InverseImpl.h
+++ b/examples/ThirdPartyLibs/Eigen/src/LU/InverseImpl.h
@@ -77,10 +77,11 @@ inline void compute_inverse_size2_helper(
const MatrixType& matrix, const typename ResultType::Scalar& invdet,
ResultType& result)
{
+ typename ResultType::Scalar temp = matrix.coeff(0,0);
result.coeffRef(0,0) = matrix.coeff(1,1) * invdet;
result.coeffRef(1,0) = -matrix.coeff(1,0) * invdet;
result.coeffRef(0,1) = -matrix.coeff(0,1) * invdet;
- result.coeffRef(1,1) = matrix.coeff(0,0) * invdet;
+ result.coeffRef(1,1) = temp * invdet;
}
template<typename MatrixType, typename ResultType>
@@ -143,13 +144,18 @@ inline void compute_inverse_size3_helper(
const Matrix<typename ResultType::Scalar,3,1>& cofactors_col0,
ResultType& result)
{
- result.row(0) = cofactors_col0 * invdet;
- result.coeffRef(1,0) = cofactor_3x3<MatrixType,0,1>(matrix) * invdet;
- result.coeffRef(1,1) = cofactor_3x3<MatrixType,1,1>(matrix) * invdet;
+ // Compute cofactors in a way that avoids aliasing issues.
+ typedef typename ResultType::Scalar Scalar;
+ const Scalar c01 = cofactor_3x3<MatrixType,0,1>(matrix) * invdet;
+ const Scalar c11 = cofactor_3x3<MatrixType,1,1>(matrix) * invdet;
+ const Scalar c02 = cofactor_3x3<MatrixType,0,2>(matrix) * invdet;
result.coeffRef(1,2) = cofactor_3x3<MatrixType,2,1>(matrix) * invdet;
- result.coeffRef(2,0) = cofactor_3x3<MatrixType,0,2>(matrix) * invdet;
result.coeffRef(2,1) = cofactor_3x3<MatrixType,1,2>(matrix) * invdet;
result.coeffRef(2,2) = cofactor_3x3<MatrixType,2,2>(matrix) * invdet;
+ result.coeffRef(1,0) = c01;
+ result.coeffRef(1,1) = c11;
+ result.coeffRef(2,0) = c02;
+ result.row(0) = cofactors_col0 * invdet;
}
template<typename MatrixType, typename ResultType>
@@ -181,14 +187,13 @@ struct compute_inverse_and_det_with_check<MatrixType, ResultType, 3>
bool& invertible
)
{
- using std::abs;
typedef typename ResultType::Scalar Scalar;
Matrix<Scalar,3,1> cofactors_col0;
cofactors_col0.coeffRef(0) = cofactor_3x3<MatrixType,0,0>(matrix);
cofactors_col0.coeffRef(1) = cofactor_3x3<MatrixType,1,0>(matrix);
cofactors_col0.coeffRef(2) = cofactor_3x3<MatrixType,2,0>(matrix);
determinant = (cofactors_col0.cwiseProduct(matrix.col(0))).sum();
- invertible = abs(determinant) > absDeterminantThreshold;
+ invertible = Eigen::numext::abs(determinant) > absDeterminantThreshold;
if(!invertible) return;
const Scalar invdet = Scalar(1) / determinant;
compute_inverse_size3_helper(matrix, invdet, cofactors_col0, inverse);
@@ -273,7 +278,13 @@ struct compute_inverse_and_det_with_check<MatrixType, ResultType, 4>
using std::abs;
determinant = matrix.determinant();
invertible = abs(determinant) > absDeterminantThreshold;
- if(invertible) compute_inverse<MatrixType, ResultType>::run(matrix, inverse);
+ if(invertible && extract_data(matrix) != extract_data(inverse)) {
+ compute_inverse<MatrixType, ResultType>::run(matrix, inverse);
+ }
+ else if(invertible) {
+ MatrixType matrix_t = matrix;
+ compute_inverse<MatrixType, ResultType>::run(matrix_t, inverse);
+ }
}
};
@@ -290,6 +301,7 @@ template<typename DstXprType, typename XprType>
struct Assignment<DstXprType, Inverse<XprType>, internal::assign_op<typename DstXprType::Scalar,typename XprType::Scalar>, Dense2Dense>
{
typedef Inverse<XprType> SrcXprType;
+ EIGEN_DEVICE_FUNC
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename XprType::Scalar> &)
{
Index dstRows = src.rows();
@@ -332,6 +344,7 @@ struct Assignment<DstXprType, Inverse<XprType>, internal::assign_op<typename Dst
* \sa computeInverseAndDetWithCheck()
*/
template<typename Derived>
+EIGEN_DEVICE_FUNC
inline const Inverse<Derived> MatrixBase<Derived>::inverse() const
{
EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsInteger,THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES)
@@ -345,6 +358,8 @@ inline const Inverse<Derived> MatrixBase<Derived>::inverse() const
*
* This is only for fixed-size square matrices of size up to 4x4.
*
+ * Notice that it will trigger a copy of input matrix when trying to do the inverse in place.
+ *
* \param inverse Reference to the matrix in which to store the inverse.
* \param determinant Reference to the variable in which to store the determinant.
* \param invertible Reference to the bool variable in which to store whether the matrix is invertible.
@@ -385,6 +400,8 @@ inline void MatrixBase<Derived>::computeInverseAndDetWithCheck(
*
* This is only for fixed-size square matrices of size up to 4x4.
*
+ * Notice that it will trigger a copy of input matrix when trying to do the inverse in place.
+ *
* \param inverse Reference to the matrix in which to store the inverse.
* \param invertible Reference to the bool variable in which to store whether the matrix is invertible.
* \param absDeterminantThreshold Optional parameter controlling the invertibility check.
@@ -404,7 +421,7 @@ inline void MatrixBase<Derived>::computeInverseWithCheck(
const RealScalar& absDeterminantThreshold
) const
{
- RealScalar determinant;
+ Scalar determinant;
// i'd love to put some static assertions there, but SFINAE means that they have no effect...
eigen_assert(rows() == cols());
computeInverseAndDetWithCheck(inverse,determinant,invertible,absDeterminantThreshold);
diff --git a/examples/ThirdPartyLibs/Eigen/src/LU/PartialPivLU.h b/examples/ThirdPartyLibs/Eigen/src/LU/PartialPivLU.h
index d43961887..34aed7249 100644
--- a/examples/ThirdPartyLibs/Eigen/src/LU/PartialPivLU.h
+++ b/examples/ThirdPartyLibs/Eigen/src/LU/PartialPivLU.h
@@ -19,6 +19,7 @@ template<typename _MatrixType> struct traits<PartialPivLU<_MatrixType> >
{
typedef MatrixXpr XprKind;
typedef SolverStorage StorageKind;
+ typedef int StorageIndex;
typedef traits<_MatrixType> BaseTraits;
enum {
Flags = BaseTraits::Flags & RowMajorBit,
@@ -69,7 +70,7 @@ struct enable_if_ref<Ref<T>,Derived> {
* The data of the LU decomposition can be directly accessed through the methods matrixLU(), permutationP().
*
* This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism.
- *
+ *
* \sa MatrixBase::partialPivLu(), MatrixBase::determinant(), MatrixBase::inverse(), MatrixBase::computeInverse(), class FullPivLU
*/
template<typename _MatrixType> class PartialPivLU
@@ -79,8 +80,9 @@ template<typename _MatrixType> class PartialPivLU
typedef _MatrixType MatrixType;
typedef SolverBase<PartialPivLU> Base;
+ friend class SolverBase<PartialPivLU>;
+
EIGEN_GENERIC_PUBLIC_INTERFACE(PartialPivLU)
- // FIXME StorageIndex defined in EIGEN_GENERIC_PUBLIC_INTERFACE should be int
enum {
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
@@ -152,6 +154,7 @@ template<typename _MatrixType> class PartialPivLU
return m_p;
}
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
/** This method returns the solution x to the equation Ax=b, where A is the matrix of which
* *this is the LU decomposition.
*
@@ -169,14 +172,10 @@ template<typename _MatrixType> class PartialPivLU
*
* \sa TriangularView::solve(), inverse(), computeInverse()
*/
- // FIXME this is a copy-paste of the base-class member to add the isInitialized assertion.
template<typename Rhs>
inline const Solve<PartialPivLU, Rhs>
- solve(const MatrixBase<Rhs>& b) const
- {
- eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
- return Solve<PartialPivLU, Rhs>(*this, b.derived());
- }
+ solve(const MatrixBase<Rhs>& b) const;
+ #endif
/** \returns an estimate of the reciprocal condition number of the matrix of which \c *this is
the LU decomposition.
@@ -217,8 +216,8 @@ template<typename _MatrixType> class PartialPivLU
MatrixType reconstructedMatrix() const;
- inline Index rows() const { return m_lu.rows(); }
- inline Index cols() const { return m_lu.cols(); }
+ EIGEN_CONSTEXPR inline Index rows() const EIGEN_NOEXCEPT { return m_lu.rows(); }
+ EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { return m_lu.cols(); }
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
@@ -231,8 +230,6 @@ template<typename _MatrixType> class PartialPivLU
* Step 3: replace c by the solution x to Ux = c.
*/
- eigen_assert(rhs.rows() == m_lu.rows());
-
// Step 1
dst = permutationP() * rhs;
@@ -246,26 +243,21 @@ template<typename _MatrixType> class PartialPivLU
template<bool Conjugate, typename RhsType, typename DstType>
EIGEN_DEVICE_FUNC
void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const {
- /* The decomposition PA = LU can be rewritten as A = P^{-1} L U.
+ /* The decomposition PA = LU can be rewritten as A^T = U^T L^T P.
* So we proceed as follows:
- * Step 1: compute c = Pb.
- * Step 2: replace c by the solution x to Lx = c.
- * Step 3: replace c by the solution x to Ux = c.
+ * Step 1: compute c as the solution to L^T c = b
+ * Step 2: replace c by the solution x to U^T x = c.
+ * Step 3: update c = P^-1 c.
*/
eigen_assert(rhs.rows() == m_lu.cols());
- if (Conjugate) {
- // Step 1
- dst = m_lu.template triangularView<Upper>().adjoint().solve(rhs);
- // Step 2
- m_lu.template triangularView<UnitLower>().adjoint().solveInPlace(dst);
- } else {
- // Step 1
- dst = m_lu.template triangularView<Upper>().transpose().solve(rhs);
- // Step 2
- m_lu.template triangularView<UnitLower>().transpose().solveInPlace(dst);
- }
+ // Step 1
+ dst = m_lu.template triangularView<Upper>().transpose()
+ .template conjugateIf<Conjugate>().solve(rhs);
+ // Step 2
+ m_lu.template triangularView<UnitLower>().transpose()
+ .template conjugateIf<Conjugate>().solveInPlace(dst);
// Step 3
dst = permutationP().transpose() * dst;
}
@@ -339,17 +331,18 @@ PartialPivLU<MatrixType>::PartialPivLU(EigenBase<InputType>& matrix)
namespace internal {
/** \internal This is the blocked version of fullpivlu_unblocked() */
-template<typename Scalar, int StorageOrder, typename PivIndex>
+template<typename Scalar, int StorageOrder, typename PivIndex, int SizeAtCompileTime=Dynamic>
struct partial_lu_impl
{
- // FIXME add a stride to Map, so that the following mapping becomes easier,
- // another option would be to create an expression being able to automatically
- // warp any Map, Matrix, and Block expressions as a unique type, but since that's exactly
- // a Map + stride, why not adding a stride to Map, and convenient ctors from a Matrix,
- // and Block.
- typedef Map<Matrix<Scalar, Dynamic, Dynamic, StorageOrder> > MapLU;
- typedef Block<MapLU, Dynamic, Dynamic> MatrixType;
- typedef Block<MatrixType,Dynamic,Dynamic> BlockType;
+ static const int UnBlockedBound = 16;
+ static const bool UnBlockedAtCompileTime = SizeAtCompileTime!=Dynamic && SizeAtCompileTime<=UnBlockedBound;
+ static const int ActualSizeAtCompileTime = UnBlockedAtCompileTime ? SizeAtCompileTime : Dynamic;
+ // Remaining rows and columns at compile-time:
+ static const int RRows = SizeAtCompileTime==2 ? 1 : Dynamic;
+ static const int RCols = SizeAtCompileTime==2 ? 1 : Dynamic;
+ typedef Matrix<Scalar, ActualSizeAtCompileTime, ActualSizeAtCompileTime, StorageOrder> MatrixType;
+ typedef Ref<MatrixType> MatrixTypeRef;
+ typedef Ref<Matrix<Scalar, Dynamic, Dynamic, StorageOrder> > BlockType;
typedef typename MatrixType::RealScalar RealScalar;
/** \internal performs the LU decomposition in-place of the matrix \a lu
@@ -362,19 +355,22 @@ struct partial_lu_impl
*
* \returns The index of the first pivot which is exactly zero if any, or a negative number otherwise.
*/
- static Index unblocked_lu(MatrixType& lu, PivIndex* row_transpositions, PivIndex& nb_transpositions)
+ static Index unblocked_lu(MatrixTypeRef& lu, PivIndex* row_transpositions, PivIndex& nb_transpositions)
{
typedef scalar_score_coeff_op<Scalar> Scoring;
typedef typename Scoring::result_type Score;
const Index rows = lu.rows();
const Index cols = lu.cols();
const Index size = (std::min)(rows,cols);
+ // For small compile-time matrices it is worth processing the last row separately:
+ // speedup: +100% for 2x2, +10% for others.
+ const Index endk = UnBlockedAtCompileTime ? size-1 : size;
nb_transpositions = 0;
Index first_zero_pivot = -1;
- for(Index k = 0; k < size; ++k)
+ for(Index k = 0; k < endk; ++k)
{
- Index rrows = rows-k-1;
- Index rcols = cols-k-1;
+ int rrows = internal::convert_index<int>(rows-k-1);
+ int rcols = internal::convert_index<int>(cols-k-1);
Index row_of_biggest_in_col;
Score biggest_in_corner
@@ -391,9 +387,7 @@ struct partial_lu_impl
++nb_transpositions;
}
- // FIXME shall we introduce a safe quotient expression in cas 1/lu.coeff(k,k)
- // overflow but not the actual quotient?
- lu.col(k).tail(rrows) /= lu.coeff(k,k);
+ lu.col(k).tail(fix<RRows>(rrows)) /= lu.coeff(k,k);
}
else if(first_zero_pivot==-1)
{
@@ -403,8 +397,18 @@ struct partial_lu_impl
}
if(k<rows-1)
- lu.bottomRightCorner(rrows,rcols).noalias() -= lu.col(k).tail(rrows) * lu.row(k).tail(rcols);
+ lu.bottomRightCorner(fix<RRows>(rrows),fix<RCols>(rcols)).noalias() -= lu.col(k).tail(fix<RRows>(rrows)) * lu.row(k).tail(fix<RCols>(rcols));
+ }
+
+ // special handling of the last entry
+ if(UnBlockedAtCompileTime)
+ {
+ Index k = endk;
+ row_transpositions[k] = PivIndex(k);
+ if (Scoring()(lu(k, k)) == Score(0) && first_zero_pivot == -1)
+ first_zero_pivot = k;
}
+
return first_zero_pivot;
}
@@ -420,18 +424,17 @@ struct partial_lu_impl
* \returns The index of the first pivot which is exactly zero if any, or a negative number otherwise.
*
* \note This very low level interface using pointers, etc. is to:
- * 1 - reduce the number of instanciations to the strict minimum
- * 2 - avoid infinite recursion of the instanciations with Block<Block<Block<...> > >
+ * 1 - reduce the number of instantiations to the strict minimum
+ * 2 - avoid infinite recursion of the instantiations with Block<Block<Block<...> > >
*/
static Index blocked_lu(Index rows, Index cols, Scalar* lu_data, Index luStride, PivIndex* row_transpositions, PivIndex& nb_transpositions, Index maxBlockSize=256)
{
- MapLU lu1(lu_data,StorageOrder==RowMajor?rows:luStride,StorageOrder==RowMajor?luStride:cols);
- MatrixType lu(lu1,0,0,rows,cols);
+ MatrixTypeRef lu = MatrixType::Map(lu_data,rows, cols, OuterStride<>(luStride));
const Index size = (std::min)(rows,cols);
// if the matrix is too small, no blocking:
- if(size<=16)
+ if(UnBlockedAtCompileTime || size<=UnBlockedBound)
{
return unblocked_lu(lu, row_transpositions, nb_transpositions);
}
@@ -457,12 +460,12 @@ struct partial_lu_impl
// A00 | A01 | A02
// lu = A_0 | A_1 | A_2 = A10 | A11 | A12
// A20 | A21 | A22
- BlockType A_0(lu,0,0,rows,k);
- BlockType A_2(lu,0,k+bs,rows,tsize);
- BlockType A11(lu,k,k,bs,bs);
- BlockType A12(lu,k,k+bs,bs,tsize);
- BlockType A21(lu,k+bs,k,trows,bs);
- BlockType A22(lu,k+bs,k+bs,trows,tsize);
+ BlockType A_0 = lu.block(0,0,rows,k);
+ BlockType A_2 = lu.block(0,k+bs,rows,tsize);
+ BlockType A11 = lu.block(k,k,bs,bs);
+ BlockType A12 = lu.block(k,k+bs,bs,tsize);
+ BlockType A21 = lu.block(k+bs,k,trows,bs);
+ BlockType A22 = lu.block(k+bs,k+bs,trows,tsize);
PivIndex nb_transpositions_in_panel;
// recursively call the blocked LU algorithm on [A11^T A21^T]^T
@@ -501,11 +504,18 @@ struct partial_lu_impl
template<typename MatrixType, typename TranspositionType>
void partial_lu_inplace(MatrixType& lu, TranspositionType& row_transpositions, typename TranspositionType::StorageIndex& nb_transpositions)
{
+ // Special-case of zero matrix.
+ if (lu.rows() == 0 || lu.cols() == 0) {
+ nb_transpositions = 0;
+ return;
+ }
eigen_assert(lu.cols() == row_transpositions.size());
- eigen_assert((&row_transpositions.coeffRef(1)-&row_transpositions.coeffRef(0)) == 1);
+ eigen_assert(row_transpositions.size() < 2 || (&row_transpositions.coeffRef(1)-&row_transpositions.coeffRef(0)) == 1);
partial_lu_impl
- <typename MatrixType::Scalar, MatrixType::Flags&RowMajorBit?RowMajor:ColMajor, typename TranspositionType::StorageIndex>
+ < typename MatrixType::Scalar, MatrixType::Flags&RowMajorBit?RowMajor:ColMajor,
+ typename TranspositionType::StorageIndex,
+ EIGEN_SIZE_MIN_PREFER_FIXED(MatrixType::RowsAtCompileTime,MatrixType::ColsAtCompileTime)>
::blocked_lu(lu.rows(), lu.cols(), &lu.coeffRef(0,0), lu.outerStride(), &row_transpositions.coeffRef(0), nb_transpositions);
}
@@ -519,7 +529,10 @@ void PartialPivLU<MatrixType>::compute()
// the row permutation is stored as int indices, so just to be sure:
eigen_assert(m_lu.rows()<NumTraits<int>::highest());
- m_l1_norm = m_lu.cwiseAbs().colwise().sum().maxCoeff();
+ if(m_lu.cols()>0)
+ m_l1_norm = m_lu.cwiseAbs().colwise().sum().maxCoeff();
+ else
+ m_l1_norm = RealScalar(0);
eigen_assert(m_lu.rows() == m_lu.cols() && "PartialPivLU is only for square (and moreover invertible) matrices");
const Index size = m_lu.rows();
diff --git a/examples/ThirdPartyLibs/Eigen/src/LU/arch/InverseSize4.h b/examples/ThirdPartyLibs/Eigen/src/LU/arch/InverseSize4.h
new file mode 100644
index 000000000..a232ffc0a
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/LU/arch/InverseSize4.h
@@ -0,0 +1,351 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2001 Intel Corporation
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+//
+// The algorithm below is a reimplementation of former \src\LU\Inverse_SSE.h using PacketMath.
+// inv(M) = M#/|M|, where inv(M), M# and |M| denote the inverse of M,
+// adjugate of M and determinant of M respectively. M# is computed block-wise
+// using specific formulae. For proof, see:
+// https://lxjk.github.io/2017/09/03/Fast-4x4-Matrix-Inverse-with-SSE-SIMD-Explained.html
+// Variable names are adopted from \src\LU\Inverse_SSE.h.
+//
+// The SSE code for the 4x4 float and double matrix inverse in former (deprecated) \src\LU\Inverse_SSE.h
+// comes from the following Intel's library:
+// http://software.intel.com/en-us/articles/optimized-matrix-library-for-use-with-the-intel-pentiumr-4-processors-sse2-instructions/
+//
+// Here is the respective copyright and license statement:
+//
+// Copyright (c) 2001 Intel Corporation.
+//
+// Permition is granted to use, copy, distribute and prepare derivative works
+// of this library for any purpose and without fee, provided, that the above
+// copyright notice and this statement appear in all copies.
+// Intel makes no representations about the suitability of this software for
+// any purpose, and specifically disclaims all warranties.
+// See LEGAL.TXT for all the legal information.
+//
+// TODO: Unify implementations of different data types (i.e. float and double).
+#ifndef EIGEN_INVERSE_SIZE_4_H
+#define EIGEN_INVERSE_SIZE_4_H
+
+namespace Eigen
+{
+namespace internal
+{
+template <typename MatrixType, typename ResultType>
+struct compute_inverse_size4<Architecture::Target, float, MatrixType, ResultType>
+{
+ enum
+ {
+ MatrixAlignment = traits<MatrixType>::Alignment,
+ ResultAlignment = traits<ResultType>::Alignment,
+ StorageOrdersMatch = (MatrixType::Flags & RowMajorBit) == (ResultType::Flags & RowMajorBit)
+ };
+ typedef typename conditional<(MatrixType::Flags & LinearAccessBit), MatrixType const &, typename MatrixType::PlainObject>::type ActualMatrixType;
+
+ static void run(const MatrixType &mat, ResultType &result)
+ {
+ ActualMatrixType matrix(mat);
+
+ const float* data = matrix.data();
+ const Index stride = matrix.innerStride();
+ Packet4f _L1 = ploadt<Packet4f,MatrixAlignment>(data);
+ Packet4f _L2 = ploadt<Packet4f,MatrixAlignment>(data + stride*4);
+ Packet4f _L3 = ploadt<Packet4f,MatrixAlignment>(data + stride*8);
+ Packet4f _L4 = ploadt<Packet4f,MatrixAlignment>(data + stride*12);
+
+ // Four 2x2 sub-matrices of the input matrix
+ // input = [[A, B],
+ // [C, D]]
+ Packet4f A, B, C, D;
+
+ if (!StorageOrdersMatch)
+ {
+ A = vec4f_unpacklo(_L1, _L2);
+ B = vec4f_unpacklo(_L3, _L4);
+ C = vec4f_unpackhi(_L1, _L2);
+ D = vec4f_unpackhi(_L3, _L4);
+ }
+ else
+ {
+ A = vec4f_movelh(_L1, _L2);
+ B = vec4f_movehl(_L2, _L1);
+ C = vec4f_movelh(_L3, _L4);
+ D = vec4f_movehl(_L4, _L3);
+ }
+
+ Packet4f AB, DC;
+
+ // AB = A# * B, where A# denotes the adjugate of A, and * denotes matrix product.
+ AB = pmul(vec4f_swizzle2(A, A, 3, 3, 0, 0), B);
+ AB = psub(AB, pmul(vec4f_swizzle2(A, A, 1, 1, 2, 2), vec4f_swizzle2(B, B, 2, 3, 0, 1)));
+
+ // DC = D#*C
+ DC = pmul(vec4f_swizzle2(D, D, 3, 3, 0, 0), C);
+ DC = psub(DC, pmul(vec4f_swizzle2(D, D, 1, 1, 2, 2), vec4f_swizzle2(C, C, 2, 3, 0, 1)));
+
+ // determinants of the sub-matrices
+ Packet4f dA, dB, dC, dD;
+
+ dA = pmul(vec4f_swizzle2(A, A, 3, 3, 1, 1), A);
+ dA = psub(dA, vec4f_movehl(dA, dA));
+
+ dB = pmul(vec4f_swizzle2(B, B, 3, 3, 1, 1), B);
+ dB = psub(dB, vec4f_movehl(dB, dB));
+
+ dC = pmul(vec4f_swizzle2(C, C, 3, 3, 1, 1), C);
+ dC = psub(dC, vec4f_movehl(dC, dC));
+
+ dD = pmul(vec4f_swizzle2(D, D, 3, 3, 1, 1), D);
+ dD = psub(dD, vec4f_movehl(dD, dD));
+
+ Packet4f d, d1, d2;
+
+ d = pmul(vec4f_swizzle2(DC, DC, 0, 2, 1, 3), AB);
+ d = padd(d, vec4f_movehl(d, d));
+ d = padd(d, vec4f_swizzle2(d, d, 1, 0, 0, 0));
+ d1 = pmul(dA, dD);
+ d2 = pmul(dB, dC);
+
+ // determinant of the input matrix, det = |A||D| + |B||C| - trace(A#*B*D#*C)
+ Packet4f det = vec4f_duplane(psub(padd(d1, d2), d), 0);
+
+ // reciprocal of the determinant of the input matrix, rd = 1/det
+ Packet4f rd = pdiv(pset1<Packet4f>(1.0f), det);
+
+ // Four sub-matrices of the inverse
+ Packet4f iA, iB, iC, iD;
+
+ // iD = D*|A| - C*A#*B
+ iD = pmul(vec4f_swizzle2(C, C, 0, 0, 2, 2), vec4f_movelh(AB, AB));
+ iD = padd(iD, pmul(vec4f_swizzle2(C, C, 1, 1, 3, 3), vec4f_movehl(AB, AB)));
+ iD = psub(pmul(D, vec4f_duplane(dA, 0)), iD);
+
+ // iA = A*|D| - B*D#*C
+ iA = pmul(vec4f_swizzle2(B, B, 0, 0, 2, 2), vec4f_movelh(DC, DC));
+ iA = padd(iA, pmul(vec4f_swizzle2(B, B, 1, 1, 3, 3), vec4f_movehl(DC, DC)));
+ iA = psub(pmul(A, vec4f_duplane(dD, 0)), iA);
+
+ // iB = C*|B| - D * (A#B)# = C*|B| - D*B#*A
+ iB = pmul(D, vec4f_swizzle2(AB, AB, 3, 0, 3, 0));
+ iB = psub(iB, pmul(vec4f_swizzle2(D, D, 1, 0, 3, 2), vec4f_swizzle2(AB, AB, 2, 1, 2, 1)));
+ iB = psub(pmul(C, vec4f_duplane(dB, 0)), iB);
+
+ // iC = B*|C| - A * (D#C)# = B*|C| - A*C#*D
+ iC = pmul(A, vec4f_swizzle2(DC, DC, 3, 0, 3, 0));
+ iC = psub(iC, pmul(vec4f_swizzle2(A, A, 1, 0, 3, 2), vec4f_swizzle2(DC, DC, 2, 1, 2, 1)));
+ iC = psub(pmul(B, vec4f_duplane(dC, 0)), iC);
+
+ const float sign_mask[4] = {0.0f, numext::bit_cast<float>(0x80000000u), numext::bit_cast<float>(0x80000000u), 0.0f};
+ const Packet4f p4f_sign_PNNP = ploadu<Packet4f>(sign_mask);
+ rd = pxor(rd, p4f_sign_PNNP);
+ iA = pmul(iA, rd);
+ iB = pmul(iB, rd);
+ iC = pmul(iC, rd);
+ iD = pmul(iD, rd);
+
+ Index res_stride = result.outerStride();
+ float *res = result.data();
+
+ pstoret<float, Packet4f, ResultAlignment>(res + 0, vec4f_swizzle2(iA, iB, 3, 1, 3, 1));
+ pstoret<float, Packet4f, ResultAlignment>(res + res_stride, vec4f_swizzle2(iA, iB, 2, 0, 2, 0));
+ pstoret<float, Packet4f, ResultAlignment>(res + 2 * res_stride, vec4f_swizzle2(iC, iD, 3, 1, 3, 1));
+ pstoret<float, Packet4f, ResultAlignment>(res + 3 * res_stride, vec4f_swizzle2(iC, iD, 2, 0, 2, 0));
+ }
+};
+
+#if !(defined EIGEN_VECTORIZE_NEON && !(EIGEN_ARCH_ARM64 && !EIGEN_APPLE_DOUBLE_NEON_BUG))
+// same algorithm as above, except that each operand is split into
+// halves for two registers to hold.
+template <typename MatrixType, typename ResultType>
+struct compute_inverse_size4<Architecture::Target, double, MatrixType, ResultType>
+{
+ enum
+ {
+ MatrixAlignment = traits<MatrixType>::Alignment,
+ ResultAlignment = traits<ResultType>::Alignment,
+ StorageOrdersMatch = (MatrixType::Flags & RowMajorBit) == (ResultType::Flags & RowMajorBit)
+ };
+ typedef typename conditional<(MatrixType::Flags & LinearAccessBit),
+ MatrixType const &,
+ typename MatrixType::PlainObject>::type
+ ActualMatrixType;
+
+ static void run(const MatrixType &mat, ResultType &result)
+ {
+ ActualMatrixType matrix(mat);
+
+ // Four 2x2 sub-matrices of the input matrix, each is further divided into upper and lower
+ // row e.g. A1, upper row of A, A2, lower row of A
+ // input = [[A, B], = [[[A1, [B1,
+ // [C, D]] A2], B2]],
+ // [[C1, [D1,
+ // C2], D2]]]
+
+ Packet2d A1, A2, B1, B2, C1, C2, D1, D2;
+
+ const double* data = matrix.data();
+ const Index stride = matrix.innerStride();
+ if (StorageOrdersMatch)
+ {
+ A1 = ploadt<Packet2d,MatrixAlignment>(data + stride*0);
+ B1 = ploadt<Packet2d,MatrixAlignment>(data + stride*2);
+ A2 = ploadt<Packet2d,MatrixAlignment>(data + stride*4);
+ B2 = ploadt<Packet2d,MatrixAlignment>(data + stride*6);
+ C1 = ploadt<Packet2d,MatrixAlignment>(data + stride*8);
+ D1 = ploadt<Packet2d,MatrixAlignment>(data + stride*10);
+ C2 = ploadt<Packet2d,MatrixAlignment>(data + stride*12);
+ D2 = ploadt<Packet2d,MatrixAlignment>(data + stride*14);
+ }
+ else
+ {
+ Packet2d temp;
+ A1 = ploadt<Packet2d,MatrixAlignment>(data + stride*0);
+ C1 = ploadt<Packet2d,MatrixAlignment>(data + stride*2);
+ A2 = ploadt<Packet2d,MatrixAlignment>(data + stride*4);
+ C2 = ploadt<Packet2d,MatrixAlignment>(data + stride*6);
+ temp = A1;
+ A1 = vec2d_unpacklo(A1, A2);
+ A2 = vec2d_unpackhi(temp, A2);
+
+ temp = C1;
+ C1 = vec2d_unpacklo(C1, C2);
+ C2 = vec2d_unpackhi(temp, C2);
+
+ B1 = ploadt<Packet2d,MatrixAlignment>(data + stride*8);
+ D1 = ploadt<Packet2d,MatrixAlignment>(data + stride*10);
+ B2 = ploadt<Packet2d,MatrixAlignment>(data + stride*12);
+ D2 = ploadt<Packet2d,MatrixAlignment>(data + stride*14);
+
+ temp = B1;
+ B1 = vec2d_unpacklo(B1, B2);
+ B2 = vec2d_unpackhi(temp, B2);
+
+ temp = D1;
+ D1 = vec2d_unpacklo(D1, D2);
+ D2 = vec2d_unpackhi(temp, D2);
+ }
+
+ // determinants of the sub-matrices
+ Packet2d dA, dB, dC, dD;
+
+ dA = vec2d_swizzle2(A2, A2, 1);
+ dA = pmul(A1, dA);
+ dA = psub(dA, vec2d_duplane(dA, 1));
+
+ dB = vec2d_swizzle2(B2, B2, 1);
+ dB = pmul(B1, dB);
+ dB = psub(dB, vec2d_duplane(dB, 1));
+
+ dC = vec2d_swizzle2(C2, C2, 1);
+ dC = pmul(C1, dC);
+ dC = psub(dC, vec2d_duplane(dC, 1));
+
+ dD = vec2d_swizzle2(D2, D2, 1);
+ dD = pmul(D1, dD);
+ dD = psub(dD, vec2d_duplane(dD, 1));
+
+ Packet2d DC1, DC2, AB1, AB2;
+
+ // AB = A# * B, where A# denotes the adjugate of A, and * denotes matrix product.
+ AB1 = pmul(B1, vec2d_duplane(A2, 1));
+ AB2 = pmul(B2, vec2d_duplane(A1, 0));
+ AB1 = psub(AB1, pmul(B2, vec2d_duplane(A1, 1)));
+ AB2 = psub(AB2, pmul(B1, vec2d_duplane(A2, 0)));
+
+ // DC = D#*C
+ DC1 = pmul(C1, vec2d_duplane(D2, 1));
+ DC2 = pmul(C2, vec2d_duplane(D1, 0));
+ DC1 = psub(DC1, pmul(C2, vec2d_duplane(D1, 1)));
+ DC2 = psub(DC2, pmul(C1, vec2d_duplane(D2, 0)));
+
+ Packet2d d1, d2;
+
+ // determinant of the input matrix, det = |A||D| + |B||C| - trace(A#*B*D#*C)
+ Packet2d det;
+
+ // reciprocal of the determinant of the input matrix, rd = 1/det
+ Packet2d rd;
+
+ d1 = pmul(AB1, vec2d_swizzle2(DC1, DC2, 0));
+ d2 = pmul(AB2, vec2d_swizzle2(DC1, DC2, 3));
+ rd = padd(d1, d2);
+ rd = padd(rd, vec2d_duplane(rd, 1));
+
+ d1 = pmul(dA, dD);
+ d2 = pmul(dB, dC);
+
+ det = padd(d1, d2);
+ det = psub(det, rd);
+ det = vec2d_duplane(det, 0);
+ rd = pdiv(pset1<Packet2d>(1.0), det);
+
+ // rows of four sub-matrices of the inverse
+ Packet2d iA1, iA2, iB1, iB2, iC1, iC2, iD1, iD2;
+
+ // iD = D*|A| - C*A#*B
+ iD1 = pmul(AB1, vec2d_duplane(C1, 0));
+ iD2 = pmul(AB1, vec2d_duplane(C2, 0));
+ iD1 = padd(iD1, pmul(AB2, vec2d_duplane(C1, 1)));
+ iD2 = padd(iD2, pmul(AB2, vec2d_duplane(C2, 1)));
+ dA = vec2d_duplane(dA, 0);
+ iD1 = psub(pmul(D1, dA), iD1);
+ iD2 = psub(pmul(D2, dA), iD2);
+
+ // iA = A*|D| - B*D#*C
+ iA1 = pmul(DC1, vec2d_duplane(B1, 0));
+ iA2 = pmul(DC1, vec2d_duplane(B2, 0));
+ iA1 = padd(iA1, pmul(DC2, vec2d_duplane(B1, 1)));
+ iA2 = padd(iA2, pmul(DC2, vec2d_duplane(B2, 1)));
+ dD = vec2d_duplane(dD, 0);
+ iA1 = psub(pmul(A1, dD), iA1);
+ iA2 = psub(pmul(A2, dD), iA2);
+
+ // iB = C*|B| - D * (A#B)# = C*|B| - D*B#*A
+ iB1 = pmul(D1, vec2d_swizzle2(AB2, AB1, 1));
+ iB2 = pmul(D2, vec2d_swizzle2(AB2, AB1, 1));
+ iB1 = psub(iB1, pmul(vec2d_swizzle2(D1, D1, 1), vec2d_swizzle2(AB2, AB1, 2)));
+ iB2 = psub(iB2, pmul(vec2d_swizzle2(D2, D2, 1), vec2d_swizzle2(AB2, AB1, 2)));
+ dB = vec2d_duplane(dB, 0);
+ iB1 = psub(pmul(C1, dB), iB1);
+ iB2 = psub(pmul(C2, dB), iB2);
+
+ // iC = B*|C| - A * (D#C)# = B*|C| - A*C#*D
+ iC1 = pmul(A1, vec2d_swizzle2(DC2, DC1, 1));
+ iC2 = pmul(A2, vec2d_swizzle2(DC2, DC1, 1));
+ iC1 = psub(iC1, pmul(vec2d_swizzle2(A1, A1, 1), vec2d_swizzle2(DC2, DC1, 2)));
+ iC2 = psub(iC2, pmul(vec2d_swizzle2(A2, A2, 1), vec2d_swizzle2(DC2, DC1, 2)));
+ dC = vec2d_duplane(dC, 0);
+ iC1 = psub(pmul(B1, dC), iC1);
+ iC2 = psub(pmul(B2, dC), iC2);
+
+ const double sign_mask1[2] = {0.0, numext::bit_cast<double>(0x8000000000000000ull)};
+ const double sign_mask2[2] = {numext::bit_cast<double>(0x8000000000000000ull), 0.0};
+ const Packet2d sign_PN = ploadu<Packet2d>(sign_mask1);
+ const Packet2d sign_NP = ploadu<Packet2d>(sign_mask2);
+ d1 = pxor(rd, sign_PN);
+ d2 = pxor(rd, sign_NP);
+
+ Index res_stride = result.outerStride();
+ double *res = result.data();
+ pstoret<double, Packet2d, ResultAlignment>(res + 0, pmul(vec2d_swizzle2(iA2, iA1, 3), d1));
+ pstoret<double, Packet2d, ResultAlignment>(res + res_stride, pmul(vec2d_swizzle2(iA2, iA1, 0), d2));
+ pstoret<double, Packet2d, ResultAlignment>(res + 2, pmul(vec2d_swizzle2(iB2, iB1, 3), d1));
+ pstoret<double, Packet2d, ResultAlignment>(res + res_stride + 2, pmul(vec2d_swizzle2(iB2, iB1, 0), d2));
+ pstoret<double, Packet2d, ResultAlignment>(res + 2 * res_stride, pmul(vec2d_swizzle2(iC2, iC1, 3), d1));
+ pstoret<double, Packet2d, ResultAlignment>(res + 3 * res_stride, pmul(vec2d_swizzle2(iC2, iC1, 0), d2));
+ pstoret<double, Packet2d, ResultAlignment>(res + 2 * res_stride + 2, pmul(vec2d_swizzle2(iD2, iD1, 3), d1));
+ pstoret<double, Packet2d, ResultAlignment>(res + 3 * res_stride + 2, pmul(vec2d_swizzle2(iD2, iD1, 0), d2));
+ }
+};
+#endif
+} // namespace internal
+} // namespace Eigen
+#endif
diff --git a/examples/ThirdPartyLibs/Eigen/src/LU/arch/Inverse_SSE.h b/examples/ThirdPartyLibs/Eigen/src/LU/arch/Inverse_SSE.h
deleted file mode 100644
index ebb64a62b..000000000
--- a/examples/ThirdPartyLibs/Eigen/src/LU/arch/Inverse_SSE.h
+++ /dev/null
@@ -1,338 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2001 Intel Corporation
-// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-// The SSE code for the 4x4 float and double matrix inverse in this file
-// comes from the following Intel's library:
-// http://software.intel.com/en-us/articles/optimized-matrix-library-for-use-with-the-intel-pentiumr-4-processors-sse2-instructions/
-//
-// Here is the respective copyright and license statement:
-//
-// Copyright (c) 2001 Intel Corporation.
-//
-// Permition is granted to use, copy, distribute and prepare derivative works
-// of this library for any purpose and without fee, provided, that the above
-// copyright notice and this statement appear in all copies.
-// Intel makes no representations about the suitability of this software for
-// any purpose, and specifically disclaims all warranties.
-// See LEGAL.TXT for all the legal information.
-
-#ifndef EIGEN_INVERSE_SSE_H
-#define EIGEN_INVERSE_SSE_H
-
-namespace Eigen {
-
-namespace internal {
-
-template<typename MatrixType, typename ResultType>
-struct compute_inverse_size4<Architecture::SSE, float, MatrixType, ResultType>
-{
- enum {
- MatrixAlignment = traits<MatrixType>::Alignment,
- ResultAlignment = traits<ResultType>::Alignment,
- StorageOrdersMatch = (MatrixType::Flags&RowMajorBit) == (ResultType::Flags&RowMajorBit)
- };
- typedef typename conditional<(MatrixType::Flags&LinearAccessBit),MatrixType const &,typename MatrixType::PlainObject>::type ActualMatrixType;
-
- static void run(const MatrixType& mat, ResultType& result)
- {
- ActualMatrixType matrix(mat);
- EIGEN_ALIGN16 const unsigned int _Sign_PNNP[4] = { 0x00000000, 0x80000000, 0x80000000, 0x00000000 };
-
- // Load the full matrix into registers
- __m128 _L1 = matrix.template packet<MatrixAlignment>( 0);
- __m128 _L2 = matrix.template packet<MatrixAlignment>( 4);
- __m128 _L3 = matrix.template packet<MatrixAlignment>( 8);
- __m128 _L4 = matrix.template packet<MatrixAlignment>(12);
-
- // The inverse is calculated using "Divide and Conquer" technique. The
- // original matrix is divide into four 2x2 sub-matrices. Since each
- // register holds four matrix element, the smaller matrices are
- // represented as a registers. Hence we get a better locality of the
- // calculations.
-
- __m128 A, B, C, D; // the four sub-matrices
- if(!StorageOrdersMatch)
- {
- A = _mm_unpacklo_ps(_L1, _L2);
- B = _mm_unpacklo_ps(_L3, _L4);
- C = _mm_unpackhi_ps(_L1, _L2);
- D = _mm_unpackhi_ps(_L3, _L4);
- }
- else
- {
- A = _mm_movelh_ps(_L1, _L2);
- B = _mm_movehl_ps(_L2, _L1);
- C = _mm_movelh_ps(_L3, _L4);
- D = _mm_movehl_ps(_L4, _L3);
- }
-
- __m128 iA, iB, iC, iD, // partial inverse of the sub-matrices
- DC, AB;
- __m128 dA, dB, dC, dD; // determinant of the sub-matrices
- __m128 det, d, d1, d2;
- __m128 rd; // reciprocal of the determinant
-
- // AB = A# * B
- AB = _mm_mul_ps(_mm_shuffle_ps(A,A,0x0F), B);
- AB = _mm_sub_ps(AB,_mm_mul_ps(_mm_shuffle_ps(A,A,0xA5), _mm_shuffle_ps(B,B,0x4E)));
- // DC = D# * C
- DC = _mm_mul_ps(_mm_shuffle_ps(D,D,0x0F), C);
- DC = _mm_sub_ps(DC,_mm_mul_ps(_mm_shuffle_ps(D,D,0xA5), _mm_shuffle_ps(C,C,0x4E)));
-
- // dA = |A|
- dA = _mm_mul_ps(_mm_shuffle_ps(A, A, 0x5F),A);
- dA = _mm_sub_ss(dA, _mm_movehl_ps(dA,dA));
- // dB = |B|
- dB = _mm_mul_ps(_mm_shuffle_ps(B, B, 0x5F),B);
- dB = _mm_sub_ss(dB, _mm_movehl_ps(dB,dB));
-
- // dC = |C|
- dC = _mm_mul_ps(_mm_shuffle_ps(C, C, 0x5F),C);
- dC = _mm_sub_ss(dC, _mm_movehl_ps(dC,dC));
- // dD = |D|
- dD = _mm_mul_ps(_mm_shuffle_ps(D, D, 0x5F),D);
- dD = _mm_sub_ss(dD, _mm_movehl_ps(dD,dD));
-
- // d = trace(AB*DC) = trace(A#*B*D#*C)
- d = _mm_mul_ps(_mm_shuffle_ps(DC,DC,0xD8),AB);
-
- // iD = C*A#*B
- iD = _mm_mul_ps(_mm_shuffle_ps(C,C,0xA0), _mm_movelh_ps(AB,AB));
- iD = _mm_add_ps(iD,_mm_mul_ps(_mm_shuffle_ps(C,C,0xF5), _mm_movehl_ps(AB,AB)));
- // iA = B*D#*C
- iA = _mm_mul_ps(_mm_shuffle_ps(B,B,0xA0), _mm_movelh_ps(DC,DC));
- iA = _mm_add_ps(iA,_mm_mul_ps(_mm_shuffle_ps(B,B,0xF5), _mm_movehl_ps(DC,DC)));
-
- // d = trace(AB*DC) = trace(A#*B*D#*C) [continue]
- d = _mm_add_ps(d, _mm_movehl_ps(d, d));
- d = _mm_add_ss(d, _mm_shuffle_ps(d, d, 1));
- d1 = _mm_mul_ss(dA,dD);
- d2 = _mm_mul_ss(dB,dC);
-
- // iD = D*|A| - C*A#*B
- iD = _mm_sub_ps(_mm_mul_ps(D,_mm_shuffle_ps(dA,dA,0)), iD);
-
- // iA = A*|D| - B*D#*C;
- iA = _mm_sub_ps(_mm_mul_ps(A,_mm_shuffle_ps(dD,dD,0)), iA);
-
- // det = |A|*|D| + |B|*|C| - trace(A#*B*D#*C)
- det = _mm_sub_ss(_mm_add_ss(d1,d2),d);
- rd = _mm_div_ss(_mm_set_ss(1.0f), det);
-
-// #ifdef ZERO_SINGULAR
-// rd = _mm_and_ps(_mm_cmpneq_ss(det,_mm_setzero_ps()), rd);
-// #endif
-
- // iB = D * (A#B)# = D*B#*A
- iB = _mm_mul_ps(D, _mm_shuffle_ps(AB,AB,0x33));
- iB = _mm_sub_ps(iB, _mm_mul_ps(_mm_shuffle_ps(D,D,0xB1), _mm_shuffle_ps(AB,AB,0x66)));
- // iC = A * (D#C)# = A*C#*D
- iC = _mm_mul_ps(A, _mm_shuffle_ps(DC,DC,0x33));
- iC = _mm_sub_ps(iC, _mm_mul_ps(_mm_shuffle_ps(A,A,0xB1), _mm_shuffle_ps(DC,DC,0x66)));
-
- rd = _mm_shuffle_ps(rd,rd,0);
- rd = _mm_xor_ps(rd, _mm_load_ps((float*)_Sign_PNNP));
-
- // iB = C*|B| - D*B#*A
- iB = _mm_sub_ps(_mm_mul_ps(C,_mm_shuffle_ps(dB,dB,0)), iB);
-
- // iC = B*|C| - A*C#*D;
- iC = _mm_sub_ps(_mm_mul_ps(B,_mm_shuffle_ps(dC,dC,0)), iC);
-
- // iX = iX / det
- iA = _mm_mul_ps(rd,iA);
- iB = _mm_mul_ps(rd,iB);
- iC = _mm_mul_ps(rd,iC);
- iD = _mm_mul_ps(rd,iD);
-
- Index res_stride = result.outerStride();
- float* res = result.data();
- pstoret<float, Packet4f, ResultAlignment>(res+0, _mm_shuffle_ps(iA,iB,0x77));
- pstoret<float, Packet4f, ResultAlignment>(res+res_stride, _mm_shuffle_ps(iA,iB,0x22));
- pstoret<float, Packet4f, ResultAlignment>(res+2*res_stride, _mm_shuffle_ps(iC,iD,0x77));
- pstoret<float, Packet4f, ResultAlignment>(res+3*res_stride, _mm_shuffle_ps(iC,iD,0x22));
- }
-
-};
-
-template<typename MatrixType, typename ResultType>
-struct compute_inverse_size4<Architecture::SSE, double, MatrixType, ResultType>
-{
- enum {
- MatrixAlignment = traits<MatrixType>::Alignment,
- ResultAlignment = traits<ResultType>::Alignment,
- StorageOrdersMatch = (MatrixType::Flags&RowMajorBit) == (ResultType::Flags&RowMajorBit)
- };
- typedef typename conditional<(MatrixType::Flags&LinearAccessBit),MatrixType const &,typename MatrixType::PlainObject>::type ActualMatrixType;
-
- static void run(const MatrixType& mat, ResultType& result)
- {
- ActualMatrixType matrix(mat);
- const __m128d _Sign_NP = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0));
- const __m128d _Sign_PN = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0));
-
- // The inverse is calculated using "Divide and Conquer" technique. The
- // original matrix is divide into four 2x2 sub-matrices. Since each
- // register of the matrix holds two elements, the smaller matrices are
- // consisted of two registers. Hence we get a better locality of the
- // calculations.
-
- // the four sub-matrices
- __m128d A1, A2, B1, B2, C1, C2, D1, D2;
-
- if(StorageOrdersMatch)
- {
- A1 = matrix.template packet<MatrixAlignment>( 0); B1 = matrix.template packet<MatrixAlignment>( 2);
- A2 = matrix.template packet<MatrixAlignment>( 4); B2 = matrix.template packet<MatrixAlignment>( 6);
- C1 = matrix.template packet<MatrixAlignment>( 8); D1 = matrix.template packet<MatrixAlignment>(10);
- C2 = matrix.template packet<MatrixAlignment>(12); D2 = matrix.template packet<MatrixAlignment>(14);
- }
- else
- {
- __m128d tmp;
- A1 = matrix.template packet<MatrixAlignment>( 0); C1 = matrix.template packet<MatrixAlignment>( 2);
- A2 = matrix.template packet<MatrixAlignment>( 4); C2 = matrix.template packet<MatrixAlignment>( 6);
- tmp = A1;
- A1 = _mm_unpacklo_pd(A1,A2);
- A2 = _mm_unpackhi_pd(tmp,A2);
- tmp = C1;
- C1 = _mm_unpacklo_pd(C1,C2);
- C2 = _mm_unpackhi_pd(tmp,C2);
-
- B1 = matrix.template packet<MatrixAlignment>( 8); D1 = matrix.template packet<MatrixAlignment>(10);
- B2 = matrix.template packet<MatrixAlignment>(12); D2 = matrix.template packet<MatrixAlignment>(14);
- tmp = B1;
- B1 = _mm_unpacklo_pd(B1,B2);
- B2 = _mm_unpackhi_pd(tmp,B2);
- tmp = D1;
- D1 = _mm_unpacklo_pd(D1,D2);
- D2 = _mm_unpackhi_pd(tmp,D2);
- }
-
- __m128d iA1, iA2, iB1, iB2, iC1, iC2, iD1, iD2, // partial invese of the sub-matrices
- DC1, DC2, AB1, AB2;
- __m128d dA, dB, dC, dD; // determinant of the sub-matrices
- __m128d det, d1, d2, rd;
-
- // dA = |A|
- dA = _mm_shuffle_pd(A2, A2, 1);
- dA = _mm_mul_pd(A1, dA);
- dA = _mm_sub_sd(dA, _mm_shuffle_pd(dA,dA,3));
- // dB = |B|
- dB = _mm_shuffle_pd(B2, B2, 1);
- dB = _mm_mul_pd(B1, dB);
- dB = _mm_sub_sd(dB, _mm_shuffle_pd(dB,dB,3));
-
- // AB = A# * B
- AB1 = _mm_mul_pd(B1, _mm_shuffle_pd(A2,A2,3));
- AB2 = _mm_mul_pd(B2, _mm_shuffle_pd(A1,A1,0));
- AB1 = _mm_sub_pd(AB1, _mm_mul_pd(B2, _mm_shuffle_pd(A1,A1,3)));
- AB2 = _mm_sub_pd(AB2, _mm_mul_pd(B1, _mm_shuffle_pd(A2,A2,0)));
-
- // dC = |C|
- dC = _mm_shuffle_pd(C2, C2, 1);
- dC = _mm_mul_pd(C1, dC);
- dC = _mm_sub_sd(dC, _mm_shuffle_pd(dC,dC,3));
- // dD = |D|
- dD = _mm_shuffle_pd(D2, D2, 1);
- dD = _mm_mul_pd(D1, dD);
- dD = _mm_sub_sd(dD, _mm_shuffle_pd(dD,dD,3));
-
- // DC = D# * C
- DC1 = _mm_mul_pd(C1, _mm_shuffle_pd(D2,D2,3));
- DC2 = _mm_mul_pd(C2, _mm_shuffle_pd(D1,D1,0));
- DC1 = _mm_sub_pd(DC1, _mm_mul_pd(C2, _mm_shuffle_pd(D1,D1,3)));
- DC2 = _mm_sub_pd(DC2, _mm_mul_pd(C1, _mm_shuffle_pd(D2,D2,0)));
-
- // rd = trace(AB*DC) = trace(A#*B*D#*C)
- d1 = _mm_mul_pd(AB1, _mm_shuffle_pd(DC1, DC2, 0));
- d2 = _mm_mul_pd(AB2, _mm_shuffle_pd(DC1, DC2, 3));
- rd = _mm_add_pd(d1, d2);
- rd = _mm_add_sd(rd, _mm_shuffle_pd(rd, rd,3));
-
- // iD = C*A#*B
- iD1 = _mm_mul_pd(AB1, _mm_shuffle_pd(C1,C1,0));
- iD2 = _mm_mul_pd(AB1, _mm_shuffle_pd(C2,C2,0));
- iD1 = _mm_add_pd(iD1, _mm_mul_pd(AB2, _mm_shuffle_pd(C1,C1,3)));
- iD2 = _mm_add_pd(iD2, _mm_mul_pd(AB2, _mm_shuffle_pd(C2,C2,3)));
-
- // iA = B*D#*C
- iA1 = _mm_mul_pd(DC1, _mm_shuffle_pd(B1,B1,0));
- iA2 = _mm_mul_pd(DC1, _mm_shuffle_pd(B2,B2,0));
- iA1 = _mm_add_pd(iA1, _mm_mul_pd(DC2, _mm_shuffle_pd(B1,B1,3)));
- iA2 = _mm_add_pd(iA2, _mm_mul_pd(DC2, _mm_shuffle_pd(B2,B2,3)));
-
- // iD = D*|A| - C*A#*B
- dA = _mm_shuffle_pd(dA,dA,0);
- iD1 = _mm_sub_pd(_mm_mul_pd(D1, dA), iD1);
- iD2 = _mm_sub_pd(_mm_mul_pd(D2, dA), iD2);
-
- // iA = A*|D| - B*D#*C;
- dD = _mm_shuffle_pd(dD,dD,0);
- iA1 = _mm_sub_pd(_mm_mul_pd(A1, dD), iA1);
- iA2 = _mm_sub_pd(_mm_mul_pd(A2, dD), iA2);
-
- d1 = _mm_mul_sd(dA, dD);
- d2 = _mm_mul_sd(dB, dC);
-
- // iB = D * (A#B)# = D*B#*A
- iB1 = _mm_mul_pd(D1, _mm_shuffle_pd(AB2,AB1,1));
- iB2 = _mm_mul_pd(D2, _mm_shuffle_pd(AB2,AB1,1));
- iB1 = _mm_sub_pd(iB1, _mm_mul_pd(_mm_shuffle_pd(D1,D1,1), _mm_shuffle_pd(AB2,AB1,2)));
- iB2 = _mm_sub_pd(iB2, _mm_mul_pd(_mm_shuffle_pd(D2,D2,1), _mm_shuffle_pd(AB2,AB1,2)));
-
- // det = |A|*|D| + |B|*|C| - trace(A#*B*D#*C)
- det = _mm_add_sd(d1, d2);
- det = _mm_sub_sd(det, rd);
-
- // iC = A * (D#C)# = A*C#*D
- iC1 = _mm_mul_pd(A1, _mm_shuffle_pd(DC2,DC1,1));
- iC2 = _mm_mul_pd(A2, _mm_shuffle_pd(DC2,DC1,1));
- iC1 = _mm_sub_pd(iC1, _mm_mul_pd(_mm_shuffle_pd(A1,A1,1), _mm_shuffle_pd(DC2,DC1,2)));
- iC2 = _mm_sub_pd(iC2, _mm_mul_pd(_mm_shuffle_pd(A2,A2,1), _mm_shuffle_pd(DC2,DC1,2)));
-
- rd = _mm_div_sd(_mm_set_sd(1.0), det);
-// #ifdef ZERO_SINGULAR
-// rd = _mm_and_pd(_mm_cmpneq_sd(det,_mm_setzero_pd()), rd);
-// #endif
- rd = _mm_shuffle_pd(rd,rd,0);
-
- // iB = C*|B| - D*B#*A
- dB = _mm_shuffle_pd(dB,dB,0);
- iB1 = _mm_sub_pd(_mm_mul_pd(C1, dB), iB1);
- iB2 = _mm_sub_pd(_mm_mul_pd(C2, dB), iB2);
-
- d1 = _mm_xor_pd(rd, _Sign_PN);
- d2 = _mm_xor_pd(rd, _Sign_NP);
-
- // iC = B*|C| - A*C#*D;
- dC = _mm_shuffle_pd(dC,dC,0);
- iC1 = _mm_sub_pd(_mm_mul_pd(B1, dC), iC1);
- iC2 = _mm_sub_pd(_mm_mul_pd(B2, dC), iC2);
-
- Index res_stride = result.outerStride();
- double* res = result.data();
- pstoret<double, Packet2d, ResultAlignment>(res+0, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 3), d1));
- pstoret<double, Packet2d, ResultAlignment>(res+res_stride, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 0), d2));
- pstoret<double, Packet2d, ResultAlignment>(res+2, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 3), d1));
- pstoret<double, Packet2d, ResultAlignment>(res+res_stride+2, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 0), d2));
- pstoret<double, Packet2d, ResultAlignment>(res+2*res_stride, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 3), d1));
- pstoret<double, Packet2d, ResultAlignment>(res+3*res_stride, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 0), d2));
- pstoret<double, Packet2d, ResultAlignment>(res+2*res_stride+2,_mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 3), d1));
- pstoret<double, Packet2d, ResultAlignment>(res+3*res_stride+2,_mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 0), d2));
- }
-};
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-#endif // EIGEN_INVERSE_SSE_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/OrderingMethods/Amd.h b/examples/ThirdPartyLibs/Eigen/src/OrderingMethods/Amd.h
index f91ecb24e..7ca3f33b1 100644
--- a/examples/ThirdPartyLibs/Eigen/src/OrderingMethods/Amd.h
+++ b/examples/ThirdPartyLibs/Eigen/src/OrderingMethods/Amd.h
@@ -2,32 +2,22 @@
// for linear algebra.
//
// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
/*
-
NOTE: this routine has been adapted from the CSparse library:
Copyright (c) 2006, Timothy A. Davis.
http://www.suitesparse.com
-CSparse is free software; you can redistribute it and/or
-modify it under the terms of the GNU Lesser General Public
-License as published by the Free Software Foundation; either
-version 2.1 of the License, or (at your option) any later version.
-
-CSparse is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-Lesser General Public License for more details.
-
-You should have received a copy of the GNU Lesser General Public
-License along with this Module; if not, write to the Free Software
-Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-
+The author of CSparse, Timothy A. Davis., has executed a license with Google LLC
+to permit distribution of this code and derivative works as part of Eigen under
+the Mozilla Public License v. 2.0, as stated at the top of this file.
*/
-#include "../Core/util/NonMPL2.h"
-
#ifndef EIGEN_SPARSE_AMD_H
#define EIGEN_SPARSE_AMD_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/OrderingMethods/Eigen_Colamd.h b/examples/ThirdPartyLibs/Eigen/src/OrderingMethods/Eigen_Colamd.h
index da85b4d6e..8e339a704 100644
--- a/examples/ThirdPartyLibs/Eigen/src/OrderingMethods/Eigen_Colamd.h
+++ b/examples/ThirdPartyLibs/Eigen/src/OrderingMethods/Eigen_Colamd.h
@@ -13,115 +13,119 @@
// Davis (davis@cise.ufl.edu), University of Florida. The algorithm was
// developed in collaboration with John Gilbert, Xerox PARC, and Esmond
// Ng, Oak Ridge National Laboratory.
-//
+//
// Date:
-//
+//
// September 8, 2003. Version 2.3.
-//
+//
// Acknowledgements:
-//
+//
// This work was supported by the National Science Foundation, under
// grants DMS-9504974 and DMS-9803599.
-//
+//
// Notice:
-//
+//
// Copyright (c) 1998-2003 by the University of Florida.
// All Rights Reserved.
-//
+//
// THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
// EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
-//
+//
// Permission is hereby granted to use, copy, modify, and/or distribute
// this program, provided that the Copyright, this License, and the
// Availability of the original version is retained on all copies and made
// accessible to the end-user of any code or package that includes COLAMD
-// or any modified version of COLAMD.
-//
+// or any modified version of COLAMD.
+//
// Availability:
-//
+//
// The colamd/symamd library is available at
-//
+//
// http://www.suitesparse.com
-
+
#ifndef EIGEN_COLAMD_H
#define EIGEN_COLAMD_H
namespace internal {
+
+namespace Colamd {
+
/* Ensure that debugging is turned off: */
#ifndef COLAMD_NDEBUG
#define COLAMD_NDEBUG
#endif /* NDEBUG */
+
+
/* ========================================================================== */
/* === Knob and statistics definitions ====================================== */
/* ========================================================================== */
/* size of the knobs [ ] array. Only knobs [0..1] are currently used. */
-#define COLAMD_KNOBS 20
+const int NKnobs = 20;
/* number of output statistics. Only stats [0..6] are currently used. */
-#define COLAMD_STATS 20
+const int NStats = 20;
-/* knobs [0] and stats [0]: dense row knob and output statistic. */
-#define COLAMD_DENSE_ROW 0
+/* Indices into knobs and stats array. */
+enum KnobsStatsIndex {
+ /* knobs [0] and stats [0]: dense row knob and output statistic. */
+ DenseRow = 0,
-/* knobs [1] and stats [1]: dense column knob and output statistic. */
-#define COLAMD_DENSE_COL 1
+ /* knobs [1] and stats [1]: dense column knob and output statistic. */
+ DenseCol = 1,
-/* stats [2]: memory defragmentation count output statistic */
-#define COLAMD_DEFRAG_COUNT 2
+ /* stats [2]: memory defragmentation count output statistic */
+ DefragCount = 2,
-/* stats [3]: colamd status: zero OK, > 0 warning or notice, < 0 error */
-#define COLAMD_STATUS 3
+ /* stats [3]: colamd status: zero OK, > 0 warning or notice, < 0 error */
+ Status = 3,
-/* stats [4..6]: error info, or info on jumbled columns */
-#define COLAMD_INFO1 4
-#define COLAMD_INFO2 5
-#define COLAMD_INFO3 6
+ /* stats [4..6]: error info, or info on jumbled columns */
+ Info1 = 4,
+ Info2 = 5,
+ Info3 = 6
+};
/* error codes returned in stats [3]: */
-#define COLAMD_OK (0)
-#define COLAMD_OK_BUT_JUMBLED (1)
-#define COLAMD_ERROR_A_not_present (-1)
-#define COLAMD_ERROR_p_not_present (-2)
-#define COLAMD_ERROR_nrow_negative (-3)
-#define COLAMD_ERROR_ncol_negative (-4)
-#define COLAMD_ERROR_nnz_negative (-5)
-#define COLAMD_ERROR_p0_nonzero (-6)
-#define COLAMD_ERROR_A_too_small (-7)
-#define COLAMD_ERROR_col_length_negative (-8)
-#define COLAMD_ERROR_row_index_out_of_bounds (-9)
-#define COLAMD_ERROR_out_of_memory (-10)
-#define COLAMD_ERROR_internal_error (-999)
-
+enum Status {
+ Ok = 0,
+ OkButJumbled = 1,
+ ErrorANotPresent = -1,
+ ErrorPNotPresent = -2,
+ ErrorNrowNegative = -3,
+ ErrorNcolNegative = -4,
+ ErrorNnzNegative = -5,
+ ErrorP0Nonzero = -6,
+ ErrorATooSmall = -7,
+ ErrorColLengthNegative = -8,
+ ErrorRowIndexOutOfBounds = -9,
+ ErrorOutOfMemory = -10,
+ ErrorInternalError = -999
+};
/* ========================================================================== */
/* === Definitions ========================================================== */
/* ========================================================================== */
-#define ONES_COMPLEMENT(r) (-(r)-1)
+template <typename IndexType>
+IndexType ones_complement(const IndexType r) {
+ return (-(r)-1);
+}
/* -------------------------------------------------------------------------- */
-
-#define COLAMD_EMPTY (-1)
+const int Empty = -1;
/* Row and column status */
-#define ALIVE (0)
-#define DEAD (-1)
+enum RowColumnStatus {
+ Alive = 0,
+ Dead = -1
+};
/* Column status */
-#define DEAD_PRINCIPAL (-1)
-#define DEAD_NON_PRINCIPAL (-2)
-
-/* Macros for row and column status update and checking. */
-#define ROW_IS_DEAD(r) ROW_IS_MARKED_DEAD (Row[r].shared2.mark)
-#define ROW_IS_MARKED_DEAD(row_mark) (row_mark < ALIVE)
-#define ROW_IS_ALIVE(r) (Row [r].shared2.mark >= ALIVE)
-#define COL_IS_DEAD(c) (Col [c].start < ALIVE)
-#define COL_IS_ALIVE(c) (Col [c].start >= ALIVE)
-#define COL_IS_DEAD_PRINCIPAL(c) (Col [c].start == DEAD_PRINCIPAL)
-#define KILL_ROW(r) { Row [r].shared2.mark = DEAD ; }
-#define KILL_PRINCIPAL_COL(c) { Col [c].start = DEAD_PRINCIPAL ; }
-#define KILL_NON_PRINCIPAL_COL(c) { Col [c].start = DEAD_NON_PRINCIPAL ; }
+enum ColumnStatus {
+ DeadPrincipal = -1,
+ DeadNonPrincipal = -2
+};
/* ========================================================================== */
/* === Colamd reporting mechanism =========================================== */
@@ -129,9 +133,9 @@ namespace internal {
// == Row and Column structures ==
template <typename IndexType>
-struct colamd_col
+struct ColStructure
{
- IndexType start ; /* index for A of first row in this column, or DEAD */
+ IndexType start ; /* index for A of first row in this column, or Dead */
/* if column is dead */
IndexType length ; /* number of rows in this column */
union
@@ -159,11 +163,21 @@ struct colamd_col
IndexType degree_next ; /* next column, if col is in a degree list */
IndexType hash_next ; /* next column, if col is in a hash list */
} shared4 ;
-
+
+ inline bool is_dead() const { return start < Alive; }
+
+ inline bool is_alive() const { return start >= Alive; }
+
+ inline bool is_dead_principal() const { return start == DeadPrincipal; }
+
+ inline void kill_principal() { start = DeadPrincipal; }
+
+ inline void kill_non_principal() { start = DeadNonPrincipal; }
+
};
-
+
template <typename IndexType>
-struct Colamd_Row
+struct RowStructure
{
IndexType start ; /* index for A of first col in this row */
IndexType length ; /* number of principal columns in this row */
@@ -177,13 +191,19 @@ struct Colamd_Row
IndexType mark ; /* for computing set differences and marking dead rows*/
IndexType first_column ;/* first column in row (used in garbage collection) */
} shared2 ;
-
+
+ inline bool is_dead() const { return shared2.mark < Alive; }
+
+ inline bool is_alive() const { return shared2.mark >= Alive; }
+
+ inline void kill() { shared2.mark = Dead; }
+
};
-
+
/* ========================================================================== */
/* === Colamd recommended memory size ======================================= */
/* ========================================================================== */
-
+
/*
The recommended length Alen of the array A passed to colamd is given by
the COLAMD_RECOMMENDED (nnz, n_row, n_col) macro. It returns -1 if any
@@ -192,41 +212,41 @@ struct Colamd_Row
required for the Col and Row arrays, respectively, which are internal to
colamd. An additional n_col space is the minimal amount of "elbow room",
and nnz/5 more space is recommended for run time efficiency.
-
+
This macro is not needed when using symamd.
-
+
Explicit typecast to IndexType added Sept. 23, 2002, COLAMD version 2.2, to avoid
gcc -pedantic warning messages.
*/
template <typename IndexType>
-inline IndexType colamd_c(IndexType n_col)
-{ return IndexType( ((n_col) + 1) * sizeof (colamd_col<IndexType>) / sizeof (IndexType) ) ; }
+inline IndexType colamd_c(IndexType n_col)
+{ return IndexType( ((n_col) + 1) * sizeof (ColStructure<IndexType>) / sizeof (IndexType) ) ; }
template <typename IndexType>
inline IndexType colamd_r(IndexType n_row)
-{ return IndexType(((n_row) + 1) * sizeof (Colamd_Row<IndexType>) / sizeof (IndexType)); }
+{ return IndexType(((n_row) + 1) * sizeof (RowStructure<IndexType>) / sizeof (IndexType)); }
// Prototypes of non-user callable routines
template <typename IndexType>
-static IndexType init_rows_cols (IndexType n_row, IndexType n_col, Colamd_Row<IndexType> Row [], colamd_col<IndexType> col [], IndexType A [], IndexType p [], IndexType stats[COLAMD_STATS] );
+static IndexType init_rows_cols (IndexType n_row, IndexType n_col, RowStructure<IndexType> Row [], ColStructure<IndexType> col [], IndexType A [], IndexType p [], IndexType stats[NStats] );
template <typename IndexType>
-static void init_scoring (IndexType n_row, IndexType n_col, Colamd_Row<IndexType> Row [], colamd_col<IndexType> Col [], IndexType A [], IndexType head [], double knobs[COLAMD_KNOBS], IndexType *p_n_row2, IndexType *p_n_col2, IndexType *p_max_deg);
+static void init_scoring (IndexType n_row, IndexType n_col, RowStructure<IndexType> Row [], ColStructure<IndexType> Col [], IndexType A [], IndexType head [], double knobs[NKnobs], IndexType *p_n_row2, IndexType *p_n_col2, IndexType *p_max_deg);
template <typename IndexType>
-static IndexType find_ordering (IndexType n_row, IndexType n_col, IndexType Alen, Colamd_Row<IndexType> Row [], colamd_col<IndexType> Col [], IndexType A [], IndexType head [], IndexType n_col2, IndexType max_deg, IndexType pfree);
+static IndexType find_ordering (IndexType n_row, IndexType n_col, IndexType Alen, RowStructure<IndexType> Row [], ColStructure<IndexType> Col [], IndexType A [], IndexType head [], IndexType n_col2, IndexType max_deg, IndexType pfree);
template <typename IndexType>
-static void order_children (IndexType n_col, colamd_col<IndexType> Col [], IndexType p []);
+static void order_children (IndexType n_col, ColStructure<IndexType> Col [], IndexType p []);
template <typename IndexType>
-static void detect_super_cols (colamd_col<IndexType> Col [], IndexType A [], IndexType head [], IndexType row_start, IndexType row_length ) ;
+static void detect_super_cols (ColStructure<IndexType> Col [], IndexType A [], IndexType head [], IndexType row_start, IndexType row_length ) ;
template <typename IndexType>
-static IndexType garbage_collection (IndexType n_row, IndexType n_col, Colamd_Row<IndexType> Row [], colamd_col<IndexType> Col [], IndexType A [], IndexType *pfree) ;
+static IndexType garbage_collection (IndexType n_row, IndexType n_col, RowStructure<IndexType> Row [], ColStructure<IndexType> Col [], IndexType A [], IndexType *pfree) ;
template <typename IndexType>
-static inline IndexType clear_mark (IndexType n_row, Colamd_Row<IndexType> Row [] ) ;
+static inline IndexType clear_mark (IndexType n_row, RowStructure<IndexType> Row [] ) ;
/* === No debugging ========================================================= */
@@ -240,37 +260,37 @@ static inline IndexType clear_mark (IndexType n_row, Colamd_Row<IndexType> Row
/**
- * \brief Returns the recommended value of Alen
- *
- * Returns recommended value of Alen for use by colamd.
- * Returns -1 if any input argument is negative.
- * The use of this routine or macro is optional.
- * Note that the macro uses its arguments more than once,
- * so be careful for side effects, if you pass expressions as arguments to COLAMD_RECOMMENDED.
- *
+ * \brief Returns the recommended value of Alen
+ *
+ * Returns recommended value of Alen for use by colamd.
+ * Returns -1 if any input argument is negative.
+ * The use of this routine or macro is optional.
+ * Note that the macro uses its arguments more than once,
+ * so be careful for side effects, if you pass expressions as arguments to COLAMD_RECOMMENDED.
+ *
* \param nnz nonzeros in A
* \param n_row number of rows in A
* \param n_col number of columns in A
* \return recommended value of Alen for use by colamd
*/
template <typename IndexType>
-inline IndexType colamd_recommended ( IndexType nnz, IndexType n_row, IndexType n_col)
+inline IndexType recommended ( IndexType nnz, IndexType n_row, IndexType n_col)
{
if ((nnz) < 0 || (n_row) < 0 || (n_col) < 0)
return (-1);
else
- return (2 * (nnz) + colamd_c (n_col) + colamd_r (n_row) + (n_col) + ((nnz) / 5));
+ return (2 * (nnz) + colamd_c (n_col) + colamd_r (n_row) + (n_col) + ((nnz) / 5));
}
/**
* \brief set default parameters The use of this routine is optional.
- *
- * Colamd: rows with more than (knobs [COLAMD_DENSE_ROW] * n_col)
+ *
+ * Colamd: rows with more than (knobs [DenseRow] * n_col)
* entries are removed prior to ordering. Columns with more than
- * (knobs [COLAMD_DENSE_COL] * n_row) entries are removed prior to
- * ordering, and placed last in the output column ordering.
+ * (knobs [DenseCol] * n_row) entries are removed prior to
+ * ordering, and placed last in the output column ordering.
*
- * COLAMD_DENSE_ROW and COLAMD_DENSE_COL are defined as 0 and 1,
+ * DenseRow and DenseCol are defined as 0 and 1,
* respectively, in colamd.h. Default values of these two knobs
* are both 0.5. Currently, only knobs [0] and knobs [1] are
* used, but future versions may use more knobs. If so, they will
@@ -279,37 +299,37 @@ inline IndexType colamd_recommended ( IndexType nnz, IndexType n_row, IndexType
* not need to change, assuming that you either use
* colamd_set_defaults, or pass a (double *) NULL pointer as the
* knobs array to colamd or symamd.
- *
+ *
* \param knobs parameter settings for colamd
*/
-static inline void colamd_set_defaults(double knobs[COLAMD_KNOBS])
+static inline void set_defaults(double knobs[NKnobs])
{
/* === Local variables ================================================== */
-
+
int i ;
if (!knobs)
{
return ; /* no knobs to initialize */
}
- for (i = 0 ; i < COLAMD_KNOBS ; i++)
+ for (i = 0 ; i < NKnobs ; i++)
{
knobs [i] = 0 ;
}
- knobs [COLAMD_DENSE_ROW] = 0.5 ; /* ignore rows over 50% dense */
- knobs [COLAMD_DENSE_COL] = 0.5 ; /* ignore columns over 50% dense */
+ knobs [Colamd::DenseRow] = 0.5 ; /* ignore rows over 50% dense */
+ knobs [Colamd::DenseCol] = 0.5 ; /* ignore columns over 50% dense */
}
-/**
+/**
* \brief Computes a column ordering using the column approximate minimum degree ordering
- *
+ *
* Computes a column ordering (Q) of A such that P(AQ)=LU or
* (AQ)'AQ=LL' have less fill-in and require fewer floating point
* operations than factorizing the unpermuted matrix A or A'A,
* respectively.
- *
- *
+ *
+ *
* \param n_row number of rows in A
* \param n_col number of columns in A
* \param Alen, size of the array A
@@ -319,143 +339,143 @@ static inline void colamd_set_defaults(double knobs[COLAMD_KNOBS])
* \param stats colamd output statistics and error codes
*/
template <typename IndexType>
-static bool colamd(IndexType n_row, IndexType n_col, IndexType Alen, IndexType *A, IndexType *p, double knobs[COLAMD_KNOBS], IndexType stats[COLAMD_STATS])
+static bool compute_ordering(IndexType n_row, IndexType n_col, IndexType Alen, IndexType *A, IndexType *p, double knobs[NKnobs], IndexType stats[NStats])
{
/* === Local variables ================================================== */
-
+
IndexType i ; /* loop index */
IndexType nnz ; /* nonzeros in A */
IndexType Row_size ; /* size of Row [], in integers */
IndexType Col_size ; /* size of Col [], in integers */
IndexType need ; /* minimum required length of A */
- Colamd_Row<IndexType> *Row ; /* pointer into A of Row [0..n_row] array */
- colamd_col<IndexType> *Col ; /* pointer into A of Col [0..n_col] array */
+ Colamd::RowStructure<IndexType> *Row ; /* pointer into A of Row [0..n_row] array */
+ Colamd::ColStructure<IndexType> *Col ; /* pointer into A of Col [0..n_col] array */
IndexType n_col2 ; /* number of non-dense, non-empty columns */
IndexType n_row2 ; /* number of non-dense, non-empty rows */
IndexType ngarbage ; /* number of garbage collections performed */
IndexType max_deg ; /* maximum row degree */
- double default_knobs [COLAMD_KNOBS] ; /* default knobs array */
-
-
+ double default_knobs [NKnobs] ; /* default knobs array */
+
+
/* === Check the input arguments ======================================== */
-
+
if (!stats)
{
COLAMD_DEBUG0 (("colamd: stats not present\n")) ;
return (false) ;
}
- for (i = 0 ; i < COLAMD_STATS ; i++)
+ for (i = 0 ; i < NStats ; i++)
{
stats [i] = 0 ;
}
- stats [COLAMD_STATUS] = COLAMD_OK ;
- stats [COLAMD_INFO1] = -1 ;
- stats [COLAMD_INFO2] = -1 ;
-
+ stats [Colamd::Status] = Colamd::Ok ;
+ stats [Colamd::Info1] = -1 ;
+ stats [Colamd::Info2] = -1 ;
+
if (!A) /* A is not present */
{
- stats [COLAMD_STATUS] = COLAMD_ERROR_A_not_present ;
+ stats [Colamd::Status] = Colamd::ErrorANotPresent ;
COLAMD_DEBUG0 (("colamd: A not present\n")) ;
return (false) ;
}
-
+
if (!p) /* p is not present */
{
- stats [COLAMD_STATUS] = COLAMD_ERROR_p_not_present ;
+ stats [Colamd::Status] = Colamd::ErrorPNotPresent ;
COLAMD_DEBUG0 (("colamd: p not present\n")) ;
return (false) ;
}
-
+
if (n_row < 0) /* n_row must be >= 0 */
{
- stats [COLAMD_STATUS] = COLAMD_ERROR_nrow_negative ;
- stats [COLAMD_INFO1] = n_row ;
+ stats [Colamd::Status] = Colamd::ErrorNrowNegative ;
+ stats [Colamd::Info1] = n_row ;
COLAMD_DEBUG0 (("colamd: nrow negative %d\n", n_row)) ;
return (false) ;
}
-
+
if (n_col < 0) /* n_col must be >= 0 */
{
- stats [COLAMD_STATUS] = COLAMD_ERROR_ncol_negative ;
- stats [COLAMD_INFO1] = n_col ;
+ stats [Colamd::Status] = Colamd::ErrorNcolNegative ;
+ stats [Colamd::Info1] = n_col ;
COLAMD_DEBUG0 (("colamd: ncol negative %d\n", n_col)) ;
return (false) ;
}
-
+
nnz = p [n_col] ;
if (nnz < 0) /* nnz must be >= 0 */
{
- stats [COLAMD_STATUS] = COLAMD_ERROR_nnz_negative ;
- stats [COLAMD_INFO1] = nnz ;
+ stats [Colamd::Status] = Colamd::ErrorNnzNegative ;
+ stats [Colamd::Info1] = nnz ;
COLAMD_DEBUG0 (("colamd: number of entries negative %d\n", nnz)) ;
return (false) ;
}
-
+
if (p [0] != 0)
{
- stats [COLAMD_STATUS] = COLAMD_ERROR_p0_nonzero ;
- stats [COLAMD_INFO1] = p [0] ;
+ stats [Colamd::Status] = Colamd::ErrorP0Nonzero ;
+ stats [Colamd::Info1] = p [0] ;
COLAMD_DEBUG0 (("colamd: p[0] not zero %d\n", p [0])) ;
return (false) ;
}
-
+
/* === If no knobs, set default knobs =================================== */
-
+
if (!knobs)
{
- colamd_set_defaults (default_knobs) ;
+ set_defaults (default_knobs) ;
knobs = default_knobs ;
}
-
+
/* === Allocate the Row and Col arrays from array A ===================== */
-
+
Col_size = colamd_c (n_col) ;
Row_size = colamd_r (n_row) ;
need = 2*nnz + n_col + Col_size + Row_size ;
-
+
if (need > Alen)
{
/* not enough space in array A to perform the ordering */
- stats [COLAMD_STATUS] = COLAMD_ERROR_A_too_small ;
- stats [COLAMD_INFO1] = need ;
- stats [COLAMD_INFO2] = Alen ;
+ stats [Colamd::Status] = Colamd::ErrorATooSmall ;
+ stats [Colamd::Info1] = need ;
+ stats [Colamd::Info2] = Alen ;
COLAMD_DEBUG0 (("colamd: Need Alen >= %d, given only Alen = %d\n", need,Alen));
return (false) ;
}
-
+
Alen -= Col_size + Row_size ;
- Col = (colamd_col<IndexType> *) &A [Alen] ;
- Row = (Colamd_Row<IndexType> *) &A [Alen + Col_size] ;
+ Col = (ColStructure<IndexType> *) &A [Alen] ;
+ Row = (RowStructure<IndexType> *) &A [Alen + Col_size] ;
/* === Construct the row and column data structures ===================== */
-
- if (!Eigen::internal::init_rows_cols (n_row, n_col, Row, Col, A, p, stats))
+
+ if (!Colamd::init_rows_cols (n_row, n_col, Row, Col, A, p, stats))
{
/* input matrix is invalid */
COLAMD_DEBUG0 (("colamd: Matrix invalid\n")) ;
return (false) ;
}
-
+
/* === Initialize scores, kill dense rows/columns ======================= */
- Eigen::internal::init_scoring (n_row, n_col, Row, Col, A, p, knobs,
+ Colamd::init_scoring (n_row, n_col, Row, Col, A, p, knobs,
&n_row2, &n_col2, &max_deg) ;
-
+
/* === Order the supercolumns =========================================== */
-
- ngarbage = Eigen::internal::find_ordering (n_row, n_col, Alen, Row, Col, A, p,
+
+ ngarbage = Colamd::find_ordering (n_row, n_col, Alen, Row, Col, A, p,
n_col2, max_deg, 2*nnz) ;
-
+
/* === Order the non-principal columns ================================== */
-
- Eigen::internal::order_children (n_col, Col, p) ;
-
+
+ Colamd::order_children (n_col, Col, p) ;
+
/* === Return statistics in stats ======================================= */
-
- stats [COLAMD_DENSE_ROW] = n_row - n_row2 ;
- stats [COLAMD_DENSE_COL] = n_col - n_col2 ;
- stats [COLAMD_DEFRAG_COUNT] = ngarbage ;
- COLAMD_DEBUG0 (("colamd: done.\n")) ;
+
+ stats [Colamd::DenseRow] = n_row - n_row2 ;
+ stats [Colamd::DenseCol] = n_col - n_col2 ;
+ stats [Colamd::DefragCount] = ngarbage ;
+ COLAMD_DEBUG0 (("colamd: done.\n")) ;
return (true) ;
}
@@ -465,7 +485,6 @@ static bool colamd(IndexType n_row, IndexType n_col, IndexType Alen, IndexType *
/* There are no user-callable routines beyond this point in the file */
-
/* ========================================================================== */
/* === init_rows_cols ======================================================= */
/* ========================================================================== */
@@ -485,11 +504,11 @@ static IndexType init_rows_cols /* returns true if OK, or false otherwise */
IndexType n_row, /* number of rows of A */
IndexType n_col, /* number of columns of A */
- Colamd_Row<IndexType> Row [], /* of size n_row+1 */
- colamd_col<IndexType> Col [], /* of size n_col+1 */
+ RowStructure<IndexType> Row [], /* of size n_row+1 */
+ ColStructure<IndexType> Col [], /* of size n_col+1 */
IndexType A [], /* row indices of A, of size Alen */
IndexType p [], /* pointers to columns in A, of size n_col+1 */
- IndexType stats [COLAMD_STATS] /* colamd statistics */
+ IndexType stats [NStats] /* colamd statistics */
)
{
/* === Local variables ================================================== */
@@ -512,24 +531,24 @@ static IndexType init_rows_cols /* returns true if OK, or false otherwise */
if ((Col [col].length) < 0) // extra parentheses to work-around gcc bug 10200
{
/* column pointers must be non-decreasing */
- stats [COLAMD_STATUS] = COLAMD_ERROR_col_length_negative ;
- stats [COLAMD_INFO1] = col ;
- stats [COLAMD_INFO2] = Col [col].length ;
+ stats [Colamd::Status] = Colamd::ErrorColLengthNegative ;
+ stats [Colamd::Info1] = col ;
+ stats [Colamd::Info2] = Col [col].length ;
COLAMD_DEBUG0 (("colamd: col %d length %d < 0\n", col, Col [col].length)) ;
return (false) ;
}
Col [col].shared1.thickness = 1 ;
Col [col].shared2.score = 0 ;
- Col [col].shared3.prev = COLAMD_EMPTY ;
- Col [col].shared4.degree_next = COLAMD_EMPTY ;
+ Col [col].shared3.prev = Empty ;
+ Col [col].shared4.degree_next = Empty ;
}
/* p [0..n_col] no longer needed, used as "head" in subsequent routines */
/* === Scan columns, compute row degrees, and check row indices ========= */
- stats [COLAMD_INFO3] = 0 ; /* number of duplicate or unsorted row indices*/
+ stats [Info3] = 0 ; /* number of duplicate or unsorted row indices*/
for (row = 0 ; row < n_row ; row++)
{
@@ -551,10 +570,10 @@ static IndexType init_rows_cols /* returns true if OK, or false otherwise */
/* make sure row indices within range */
if (row < 0 || row >= n_row)
{
- stats [COLAMD_STATUS] = COLAMD_ERROR_row_index_out_of_bounds ;
- stats [COLAMD_INFO1] = col ;
- stats [COLAMD_INFO2] = row ;
- stats [COLAMD_INFO3] = n_row ;
+ stats [Colamd::Status] = Colamd::ErrorRowIndexOutOfBounds ;
+ stats [Colamd::Info1] = col ;
+ stats [Colamd::Info2] = row ;
+ stats [Colamd::Info3] = n_row ;
COLAMD_DEBUG0 (("colamd: row %d col %d out of bounds\n", row, col)) ;
return (false) ;
}
@@ -563,10 +582,10 @@ static IndexType init_rows_cols /* returns true if OK, or false otherwise */
{
/* row index are unsorted or repeated (or both), thus col */
/* is jumbled. This is a notice, not an error condition. */
- stats [COLAMD_STATUS] = COLAMD_OK_BUT_JUMBLED ;
- stats [COLAMD_INFO1] = col ;
- stats [COLAMD_INFO2] = row ;
- (stats [COLAMD_INFO3]) ++ ;
+ stats [Colamd::Status] = Colamd::OkButJumbled ;
+ stats [Colamd::Info1] = col ;
+ stats [Colamd::Info2] = row ;
+ (stats [Colamd::Info3]) ++ ;
COLAMD_DEBUG1 (("colamd: row %d col %d unsorted/duplicate\n",row,col));
}
@@ -604,7 +623,7 @@ static IndexType init_rows_cols /* returns true if OK, or false otherwise */
/* === Create row form ================================================== */
- if (stats [COLAMD_STATUS] == COLAMD_OK_BUT_JUMBLED)
+ if (stats [Status] == OkButJumbled)
{
/* if cols jumbled, watch for repeated row indices */
for (col = 0 ; col < n_col ; col++)
@@ -646,7 +665,7 @@ static IndexType init_rows_cols /* returns true if OK, or false otherwise */
/* === See if we need to re-create columns ============================== */
- if (stats [COLAMD_STATUS] == COLAMD_OK_BUT_JUMBLED)
+ if (stats [Status] == OkButJumbled)
{
COLAMD_DEBUG0 (("colamd: reconstructing column form, matrix jumbled\n")) ;
@@ -701,11 +720,11 @@ static void init_scoring
IndexType n_row, /* number of rows of A */
IndexType n_col, /* number of columns of A */
- Colamd_Row<IndexType> Row [], /* of size n_row+1 */
- colamd_col<IndexType> Col [], /* of size n_col+1 */
+ RowStructure<IndexType> Row [], /* of size n_row+1 */
+ ColStructure<IndexType> Col [], /* of size n_col+1 */
IndexType A [], /* column form and row form of A */
IndexType head [], /* of size n_col+1 */
- double knobs [COLAMD_KNOBS],/* parameters */
+ double knobs [NKnobs],/* parameters */
IndexType *p_n_row2, /* number of non-dense, non-empty rows */
IndexType *p_n_col2, /* number of non-dense, non-empty columns */
IndexType *p_max_deg /* maximum row degree */
@@ -732,8 +751,8 @@ static void init_scoring
/* === Extract knobs ==================================================== */
- dense_row_count = numext::maxi(IndexType(0), numext::mini(IndexType(knobs [COLAMD_DENSE_ROW] * n_col), n_col)) ;
- dense_col_count = numext::maxi(IndexType(0), numext::mini(IndexType(knobs [COLAMD_DENSE_COL] * n_row), n_row)) ;
+ dense_row_count = numext::maxi(IndexType(0), numext::mini(IndexType(knobs [Colamd::DenseRow] * n_col), n_col)) ;
+ dense_col_count = numext::maxi(IndexType(0), numext::mini(IndexType(knobs [Colamd::DenseCol] * n_row), n_row)) ;
COLAMD_DEBUG1 (("colamd: densecount: %d %d\n", dense_row_count, dense_col_count)) ;
max_deg = 0 ;
n_col2 = n_col ;
@@ -750,7 +769,7 @@ static void init_scoring
{
/* this is a empty column, kill and order it last */
Col [c].shared2.order = --n_col2 ;
- KILL_PRINCIPAL_COL (c) ;
+ Col[c].kill_principal() ;
}
}
COLAMD_DEBUG1 (("colamd: null columns killed: %d\n", n_col - n_col2)) ;
@@ -761,7 +780,7 @@ static void init_scoring
for (c = n_col-1 ; c >= 0 ; c--)
{
/* skip any dead columns */
- if (COL_IS_DEAD (c))
+ if (Col[c].is_dead())
{
continue ;
}
@@ -777,7 +796,7 @@ static void init_scoring
{
Row [*cp++].shared1.degree-- ;
}
- KILL_PRINCIPAL_COL (c) ;
+ Col[c].kill_principal() ;
}
}
COLAMD_DEBUG1 (("colamd: Dense and null columns killed: %d\n", n_col - n_col2)) ;
@@ -791,7 +810,7 @@ static void init_scoring
if (deg > dense_row_count || deg == 0)
{
/* kill a dense or empty row */
- KILL_ROW (r) ;
+ Row[r].kill() ;
--n_row2 ;
}
else
@@ -813,7 +832,7 @@ static void init_scoring
for (c = n_col-1 ; c >= 0 ; c--)
{
/* skip dead column */
- if (COL_IS_DEAD (c))
+ if (Col[c].is_dead())
{
continue ;
}
@@ -826,7 +845,7 @@ static void init_scoring
/* get a row */
row = *cp++ ;
/* skip if dead */
- if (ROW_IS_DEAD (row))
+ if (Row[row].is_dead())
{
continue ;
}
@@ -845,7 +864,7 @@ static void init_scoring
/* and have already been killed) */
COLAMD_DEBUG2 (("Newly null killed: %d\n", c)) ;
Col [c].shared2.order = --n_col2 ;
- KILL_PRINCIPAL_COL (c) ;
+ Col[c].kill_principal() ;
}
else
{
@@ -870,7 +889,7 @@ static void init_scoring
/* clear the hash buckets */
for (c = 0 ; c <= n_col ; c++)
{
- head [c] = COLAMD_EMPTY ;
+ head [c] = Empty ;
}
min_score = n_col ;
/* place in reverse order, so low column indices are at the front */
@@ -878,7 +897,7 @@ static void init_scoring
for (c = n_col-1 ; c >= 0 ; c--)
{
/* only add principal columns to degree lists */
- if (COL_IS_ALIVE (c))
+ if (Col[c].is_alive())
{
COLAMD_DEBUG4 (("place %d score %d minscore %d ncol %d\n",
c, Col [c].shared2.score, min_score, n_col)) ;
@@ -891,16 +910,16 @@ static void init_scoring
COLAMD_ASSERT (min_score <= n_col) ;
COLAMD_ASSERT (score >= 0) ;
COLAMD_ASSERT (score <= n_col) ;
- COLAMD_ASSERT (head [score] >= COLAMD_EMPTY) ;
+ COLAMD_ASSERT (head [score] >= Empty) ;
/* now add this column to dList at proper score location */
next_col = head [score] ;
- Col [c].shared3.prev = COLAMD_EMPTY ;
+ Col [c].shared3.prev = Empty ;
Col [c].shared4.degree_next = next_col ;
/* if there already was a column with the same score, set its */
/* previous pointer to this new column */
- if (next_col != COLAMD_EMPTY)
+ if (next_col != Empty)
{
Col [next_col].shared3.prev = c ;
}
@@ -939,8 +958,8 @@ static IndexType find_ordering /* return the number of garbage collections */
IndexType n_row, /* number of rows of A */
IndexType n_col, /* number of columns of A */
IndexType Alen, /* size of A, 2*nnz + n_col or larger */
- Colamd_Row<IndexType> Row [], /* of size n_row+1 */
- colamd_col<IndexType> Col [], /* of size n_col+1 */
+ RowStructure<IndexType> Row [], /* of size n_row+1 */
+ ColStructure<IndexType> Col [], /* of size n_col+1 */
IndexType A [], /* column form and row form of A */
IndexType head [], /* of size n_col+1 */
IndexType n_col2, /* Remaining columns to order */
@@ -986,7 +1005,7 @@ static IndexType find_ordering /* return the number of garbage collections */
/* === Initialization and clear mark ==================================== */
max_mark = INT_MAX - n_col ; /* INT_MAX defined in <limits.h> */
- tag_mark = Eigen::internal::clear_mark (n_row, Row) ;
+ tag_mark = Colamd::clear_mark (n_row, Row) ;
min_score = 0 ;
ngarbage = 0 ;
COLAMD_DEBUG1 (("colamd: Ordering, n_col2=%d\n", n_col2)) ;
@@ -1001,10 +1020,10 @@ static IndexType find_ordering /* return the number of garbage collections */
/* make sure degree list isn't empty */
COLAMD_ASSERT (min_score >= 0) ;
COLAMD_ASSERT (min_score <= n_col) ;
- COLAMD_ASSERT (head [min_score] >= COLAMD_EMPTY) ;
+ COLAMD_ASSERT (head [min_score] >= Empty) ;
/* get pivot column from head of minimum degree list */
- while (min_score < n_col && head [min_score] == COLAMD_EMPTY)
+ while (min_score < n_col && head [min_score] == Empty)
{
min_score++ ;
}
@@ -1012,12 +1031,12 @@ static IndexType find_ordering /* return the number of garbage collections */
COLAMD_ASSERT (pivot_col >= 0 && pivot_col <= n_col) ;
next_col = Col [pivot_col].shared4.degree_next ;
head [min_score] = next_col ;
- if (next_col != COLAMD_EMPTY)
+ if (next_col != Empty)
{
- Col [next_col].shared3.prev = COLAMD_EMPTY ;
+ Col [next_col].shared3.prev = Empty ;
}
- COLAMD_ASSERT (COL_IS_ALIVE (pivot_col)) ;
+ COLAMD_ASSERT (Col[pivot_col].is_alive()) ;
COLAMD_DEBUG3 (("Pivot col: %d\n", pivot_col)) ;
/* remember score for defrag check */
@@ -1036,12 +1055,12 @@ static IndexType find_ordering /* return the number of garbage collections */
needed_memory = numext::mini(pivot_col_score, n_col - k) ;
if (pfree + needed_memory >= Alen)
{
- pfree = Eigen::internal::garbage_collection (n_row, n_col, Row, Col, A, &A [pfree]) ;
+ pfree = Colamd::garbage_collection (n_row, n_col, Row, Col, A, &A [pfree]) ;
ngarbage++ ;
/* after garbage collection we will have enough */
COLAMD_ASSERT (pfree + needed_memory < Alen) ;
/* garbage collection has wiped out the Row[].shared2.mark array */
- tag_mark = Eigen::internal::clear_mark (n_row, Row) ;
+ tag_mark = Colamd::clear_mark (n_row, Row) ;
}
@@ -1064,9 +1083,9 @@ static IndexType find_ordering /* return the number of garbage collections */
{
/* get a row */
row = *cp++ ;
- COLAMD_DEBUG4 (("Pivot col pattern %d %d\n", ROW_IS_ALIVE (row), row)) ;
+ COLAMD_DEBUG4 (("Pivot col pattern %d %d\n", Row[row].is_alive(), row)) ;
/* skip if row is dead */
- if (ROW_IS_DEAD (row))
+ if (Row[row].is_dead())
{
continue ;
}
@@ -1078,7 +1097,7 @@ static IndexType find_ordering /* return the number of garbage collections */
col = *rp++ ;
/* add the column, if alive and untagged */
col_thickness = Col [col].shared1.thickness ;
- if (col_thickness > 0 && COL_IS_ALIVE (col))
+ if (col_thickness > 0 && Col[col].is_alive())
{
/* tag column in pivot row */
Col [col].shared1.thickness = -col_thickness ;
@@ -1105,7 +1124,7 @@ static IndexType find_ordering /* return the number of garbage collections */
/* may be killing an already dead row */
row = *cp++ ;
COLAMD_DEBUG3 (("Kill row in pivot col: %d\n", row)) ;
- KILL_ROW (row) ;
+ Row[row].kill() ;
}
/* === Select a row index to use as the new pivot row =============== */
@@ -1120,7 +1139,7 @@ static IndexType find_ordering /* return the number of garbage collections */
else
{
/* there is no pivot row, since it is of zero length */
- pivot_row = COLAMD_EMPTY ;
+ pivot_row = Empty ;
COLAMD_ASSERT (pivot_row_length == 0) ;
}
COLAMD_ASSERT (Col [pivot_col].length > 0 || pivot_row_length == 0) ;
@@ -1157,7 +1176,7 @@ static IndexType find_ordering /* return the number of garbage collections */
while (rp < rp_end)
{
col = *rp++ ;
- COLAMD_ASSERT (COL_IS_ALIVE (col) && col != pivot_col) ;
+ COLAMD_ASSERT (Col[col].is_alive() && col != pivot_col) ;
COLAMD_DEBUG3 (("Col: %d\n", col)) ;
/* clear tags used to construct pivot row pattern */
@@ -1172,8 +1191,8 @@ static IndexType find_ordering /* return the number of garbage collections */
next_col = Col [col].shared4.degree_next ;
COLAMD_ASSERT (cur_score >= 0) ;
COLAMD_ASSERT (cur_score <= n_col) ;
- COLAMD_ASSERT (cur_score >= COLAMD_EMPTY) ;
- if (prev_col == COLAMD_EMPTY)
+ COLAMD_ASSERT (cur_score >= Empty) ;
+ if (prev_col == Empty)
{
head [cur_score] = next_col ;
}
@@ -1181,7 +1200,7 @@ static IndexType find_ordering /* return the number of garbage collections */
{
Col [prev_col].shared4.degree_next = next_col ;
}
- if (next_col != COLAMD_EMPTY)
+ if (next_col != Empty)
{
Col [next_col].shared3.prev = prev_col ;
}
@@ -1194,12 +1213,12 @@ static IndexType find_ordering /* return the number of garbage collections */
{
/* get a row */
row = *cp++ ;
- row_mark = Row [row].shared2.mark ;
/* skip if dead */
- if (ROW_IS_MARKED_DEAD (row_mark))
+ if (Row[row].is_dead())
{
continue ;
}
+ row_mark = Row [row].shared2.mark ;
COLAMD_ASSERT (row != pivot_row) ;
set_difference = row_mark - tag_mark ;
/* check if the row has been seen yet */
@@ -1215,7 +1234,7 @@ static IndexType find_ordering /* return the number of garbage collections */
if (set_difference == 0)
{
COLAMD_DEBUG3 (("aggressive absorption. Row: %d\n", row)) ;
- KILL_ROW (row) ;
+ Row[row].kill() ;
}
else
{
@@ -1237,7 +1256,7 @@ static IndexType find_ordering /* return the number of garbage collections */
{
/* get a column */
col = *rp++ ;
- COLAMD_ASSERT (COL_IS_ALIVE (col) && col != pivot_col) ;
+ COLAMD_ASSERT (Col[col].is_alive() && col != pivot_col) ;
hash = 0 ;
cur_score = 0 ;
cp = &A [Col [col].start] ;
@@ -1252,12 +1271,12 @@ static IndexType find_ordering /* return the number of garbage collections */
/* get a row */
row = *cp++ ;
COLAMD_ASSERT(row >= 0 && row < n_row) ;
- row_mark = Row [row].shared2.mark ;
/* skip if dead */
- if (ROW_IS_MARKED_DEAD (row_mark))
+ if (Row [row].is_dead())
{
continue ;
}
+ row_mark = Row [row].shared2.mark ;
COLAMD_ASSERT (row_mark > tag_mark) ;
/* compact the column */
*new_cp++ = row ;
@@ -1278,7 +1297,7 @@ static IndexType find_ordering /* return the number of garbage collections */
{
COLAMD_DEBUG4 (("further mass elimination. Col: %d\n", col)) ;
/* nothing left but the pivot row in this column */
- KILL_PRINCIPAL_COL (col) ;
+ Col[col].kill_principal() ;
pivot_row_degree -= Col [col].shared1.thickness ;
COLAMD_ASSERT (pivot_row_degree >= 0) ;
/* order it */
@@ -1302,7 +1321,7 @@ static IndexType find_ordering /* return the number of garbage collections */
COLAMD_ASSERT (hash <= n_col) ;
head_column = head [hash] ;
- if (head_column > COLAMD_EMPTY)
+ if (head_column > Empty)
{
/* degree list "hash" is non-empty, use prev (shared3) of */
/* first column in degree list as head of hash bucket */
@@ -1319,7 +1338,7 @@ static IndexType find_ordering /* return the number of garbage collections */
/* save hash function in Col [col].shared3.hash */
Col [col].shared3.hash = (IndexType) hash ;
- COLAMD_ASSERT (COL_IS_ALIVE (col)) ;
+ COLAMD_ASSERT (Col[col].is_alive()) ;
}
}
@@ -1329,11 +1348,11 @@ static IndexType find_ordering /* return the number of garbage collections */
COLAMD_DEBUG3 (("** Supercolumn detection phase. **\n")) ;
- Eigen::internal::detect_super_cols (Col, A, head, pivot_row_start, pivot_row_length) ;
+ Colamd::detect_super_cols (Col, A, head, pivot_row_start, pivot_row_length) ;
/* === Kill the pivotal column ====================================== */
- KILL_PRINCIPAL_COL (pivot_col) ;
+ Col[pivot_col].kill_principal() ;
/* === Clear mark =================================================== */
@@ -1341,7 +1360,7 @@ static IndexType find_ordering /* return the number of garbage collections */
if (tag_mark >= max_mark)
{
COLAMD_DEBUG2 (("clearing tag_mark\n")) ;
- tag_mark = Eigen::internal::clear_mark (n_row, Row) ;
+ tag_mark = Colamd::clear_mark (n_row, Row) ;
}
/* === Finalize the new pivot row, and column scores ================ */
@@ -1357,7 +1376,7 @@ static IndexType find_ordering /* return the number of garbage collections */
{
col = *rp++ ;
/* skip dead columns */
- if (COL_IS_DEAD (col))
+ if (Col[col].is_dead())
{
continue ;
}
@@ -1391,11 +1410,11 @@ static IndexType find_ordering /* return the number of garbage collections */
COLAMD_ASSERT (min_score <= n_col) ;
COLAMD_ASSERT (cur_score >= 0) ;
COLAMD_ASSERT (cur_score <= n_col) ;
- COLAMD_ASSERT (head [cur_score] >= COLAMD_EMPTY) ;
+ COLAMD_ASSERT (head [cur_score] >= Empty) ;
next_col = head [cur_score] ;
Col [col].shared4.degree_next = next_col ;
- Col [col].shared3.prev = COLAMD_EMPTY ;
- if (next_col != COLAMD_EMPTY)
+ Col [col].shared3.prev = Empty ;
+ if (next_col != Empty)
{
Col [next_col].shared3.prev = col ;
}
@@ -1448,7 +1467,7 @@ static inline void order_children
/* === Parameters ======================================================= */
IndexType n_col, /* number of columns of A */
- colamd_col<IndexType> Col [], /* of size n_col+1 */
+ ColStructure<IndexType> Col [], /* of size n_col+1 */
IndexType p [] /* p [0 ... n_col-1] is the column permutation*/
)
{
@@ -1464,15 +1483,15 @@ static inline void order_children
for (i = 0 ; i < n_col ; i++)
{
/* find an un-ordered non-principal column */
- COLAMD_ASSERT (COL_IS_DEAD (i)) ;
- if (!COL_IS_DEAD_PRINCIPAL (i) && Col [i].shared2.order == COLAMD_EMPTY)
+ COLAMD_ASSERT (col_is_dead(Col, i)) ;
+ if (!Col[i].is_dead_principal() && Col [i].shared2.order == Empty)
{
parent = i ;
/* once found, find its principal parent */
do
{
parent = Col [parent].shared1.parent ;
- } while (!COL_IS_DEAD_PRINCIPAL (parent)) ;
+ } while (!Col[parent].is_dead_principal()) ;
/* now, order all un-ordered non-principal columns along path */
/* to this parent. collapse tree at the same time */
@@ -1482,7 +1501,7 @@ static inline void order_children
do
{
- COLAMD_ASSERT (Col [c].shared2.order == COLAMD_EMPTY) ;
+ COLAMD_ASSERT (Col [c].shared2.order == Empty) ;
/* order this column */
Col [c].shared2.order = order++ ;
@@ -1493,9 +1512,9 @@ static inline void order_children
c = Col [c].shared1.parent ;
/* continue until we hit an ordered column. There are */
- /* guarranteed not to be anymore unordered columns */
+ /* guaranteed not to be anymore unordered columns */
/* above an ordered column */
- } while (Col [c].shared2.order == COLAMD_EMPTY) ;
+ } while (Col [c].shared2.order == Empty) ;
/* re-order the super_col parent to largest order for this group */
Col [parent].shared2.order = order ;
@@ -1547,8 +1566,8 @@ template <typename IndexType>
static void detect_super_cols
(
/* === Parameters ======================================================= */
-
- colamd_col<IndexType> Col [], /* of size n_col+1 */
+
+ ColStructure<IndexType> Col [], /* of size n_col+1 */
IndexType A [], /* row indices of A */
IndexType head [], /* head of degree lists and hash buckets */
IndexType row_start, /* pointer to set of columns to check */
@@ -1578,7 +1597,7 @@ static void detect_super_cols
while (rp < rp_end)
{
col = *rp++ ;
- if (COL_IS_DEAD (col))
+ if (Col[col].is_dead())
{
continue ;
}
@@ -1590,7 +1609,7 @@ static void detect_super_cols
/* === Get the first column in this hash bucket ===================== */
head_column = head [hash] ;
- if (head_column > COLAMD_EMPTY)
+ if (head_column > Empty)
{
first_col = Col [head_column].shared3.headhash ;
}
@@ -1601,10 +1620,10 @@ static void detect_super_cols
/* === Consider each column in the hash bucket ====================== */
- for (super_c = first_col ; super_c != COLAMD_EMPTY ;
+ for (super_c = first_col ; super_c != Empty ;
super_c = Col [super_c].shared4.hash_next)
{
- COLAMD_ASSERT (COL_IS_ALIVE (super_c)) ;
+ COLAMD_ASSERT (Col [super_c].is_alive()) ;
COLAMD_ASSERT (Col [super_c].shared3.hash == hash) ;
length = Col [super_c].length ;
@@ -1614,10 +1633,10 @@ static void detect_super_cols
/* === Compare super_c with all columns after it ================ */
for (c = Col [super_c].shared4.hash_next ;
- c != COLAMD_EMPTY ; c = Col [c].shared4.hash_next)
+ c != Empty ; c = Col [c].shared4.hash_next)
{
COLAMD_ASSERT (c != super_c) ;
- COLAMD_ASSERT (COL_IS_ALIVE (c)) ;
+ COLAMD_ASSERT (Col[c].is_alive()) ;
COLAMD_ASSERT (Col [c].shared3.hash == hash) ;
/* not identical if lengths or scores are different */
@@ -1635,10 +1654,10 @@ static void detect_super_cols
for (i = 0 ; i < length ; i++)
{
/* the columns are "clean" (no dead rows) */
- COLAMD_ASSERT (ROW_IS_ALIVE (*cp1)) ;
- COLAMD_ASSERT (ROW_IS_ALIVE (*cp2)) ;
+ COLAMD_ASSERT ( cp1->is_alive() );
+ COLAMD_ASSERT ( cp2->is_alive() );
/* row indices will same order for both supercols, */
- /* no gather scatter nessasary */
+ /* no gather scatter necessary */
if (*cp1++ != *cp2++)
{
break ;
@@ -1658,9 +1677,9 @@ static void detect_super_cols
Col [super_c].shared1.thickness += Col [c].shared1.thickness ;
Col [c].shared1.parent = super_c ;
- KILL_NON_PRINCIPAL_COL (c) ;
+ Col[c].kill_non_principal() ;
/* order c later, in order_children() */
- Col [c].shared2.order = COLAMD_EMPTY ;
+ Col [c].shared2.order = Empty ;
/* remove c from hash bucket */
Col [prev_c].shared4.hash_next = Col [c].shared4.hash_next ;
}
@@ -1668,15 +1687,15 @@ static void detect_super_cols
/* === Empty this hash bucket ======================================= */
- if (head_column > COLAMD_EMPTY)
+ if (head_column > Empty)
{
/* corresponding degree list "hash" is not empty */
- Col [head_column].shared3.headhash = COLAMD_EMPTY ;
+ Col [head_column].shared3.headhash = Empty ;
}
else
{
/* corresponding degree list "hash" is empty */
- head [hash] = COLAMD_EMPTY ;
+ head [hash] = Empty ;
}
}
}
@@ -1688,7 +1707,7 @@ static void detect_super_cols
/*
Defragments and compacts columns and rows in the workspace A. Used when
- all avaliable memory has been used while performing row merging. Returns
+ all available memory has been used while performing row merging. Returns
the index of the first free position in A, after garbage collection. The
time taken by this routine is linear is the size of the array A, which is
itself linear in the number of nonzeros in the input matrix.
@@ -1698,11 +1717,11 @@ template <typename IndexType>
static IndexType garbage_collection /* returns the new value of pfree */
(
/* === Parameters ======================================================= */
-
+
IndexType n_row, /* number of rows */
IndexType n_col, /* number of columns */
- Colamd_Row<IndexType> Row [], /* row info */
- colamd_col<IndexType> Col [], /* column info */
+ RowStructure<IndexType> Row [], /* row info */
+ ColStructure<IndexType> Col [], /* column info */
IndexType A [], /* A [0 ... Alen-1] holds the matrix */
IndexType *pfree /* &A [0] ... pfree is in use */
)
@@ -1721,7 +1740,7 @@ static IndexType garbage_collection /* returns the new value of pfree */
pdest = &A[0] ;
for (c = 0 ; c < n_col ; c++)
{
- if (COL_IS_ALIVE (c))
+ if (Col[c].is_alive())
{
psrc = &A [Col [c].start] ;
@@ -1732,7 +1751,7 @@ static IndexType garbage_collection /* returns the new value of pfree */
for (j = 0 ; j < length ; j++)
{
r = *psrc++ ;
- if (ROW_IS_ALIVE (r))
+ if (Row[r].is_alive())
{
*pdest++ = r ;
}
@@ -1745,22 +1764,22 @@ static IndexType garbage_collection /* returns the new value of pfree */
for (r = 0 ; r < n_row ; r++)
{
- if (ROW_IS_ALIVE (r))
+ if (Row[r].is_alive())
{
if (Row [r].length == 0)
{
- /* this row is of zero length. cannot compact it, so kill it */
- COLAMD_DEBUG3 (("Defrag row kill\n")) ;
- KILL_ROW (r) ;
+ /* this row is of zero length. cannot compact it, so kill it */
+ COLAMD_DEBUG3 (("Defrag row kill\n")) ;
+ Row[r].kill() ;
}
else
{
- /* save first column index in Row [r].shared2.first_column */
- psrc = &A [Row [r].start] ;
- Row [r].shared2.first_column = *psrc ;
- COLAMD_ASSERT (ROW_IS_ALIVE (r)) ;
- /* flag the start of the row with the one's complement of row */
- *psrc = ONES_COMPLEMENT (r) ;
+ /* save first column index in Row [r].shared2.first_column */
+ psrc = &A [Row [r].start] ;
+ Row [r].shared2.first_column = *psrc ;
+ COLAMD_ASSERT (Row[r].is_alive()) ;
+ /* flag the start of the row with the one's complement of row */
+ *psrc = ones_complement(r) ;
}
}
@@ -1776,11 +1795,11 @@ static IndexType garbage_collection /* returns the new value of pfree */
{
psrc-- ;
/* get the row index */
- r = ONES_COMPLEMENT (*psrc) ;
+ r = ones_complement(*psrc) ;
COLAMD_ASSERT (r >= 0 && r < n_row) ;
/* restore first column index */
*psrc = Row [r].shared2.first_column ;
- COLAMD_ASSERT (ROW_IS_ALIVE (r)) ;
+ COLAMD_ASSERT (Row[r].is_alive()) ;
/* move and compact the row */
COLAMD_ASSERT (pdest <= psrc) ;
@@ -1789,7 +1808,7 @@ static IndexType garbage_collection /* returns the new value of pfree */
for (j = 0 ; j < length ; j++)
{
c = *psrc++ ;
- if (COL_IS_ALIVE (c))
+ if (Col[c].is_alive())
{
*pdest++ = c ;
}
@@ -1821,7 +1840,7 @@ static inline IndexType clear_mark /* return the new value for tag_mark */
/* === Parameters ======================================================= */
IndexType n_row, /* number of rows in A */
- Colamd_Row<IndexType> Row [] /* Row [0 ... n_row-1].shared2.mark is set to zero */
+ RowStructure<IndexType> Row [] /* Row [0 ... n_row-1].shared2.mark is set to zero */
)
{
/* === Local variables ================================================== */
@@ -1830,7 +1849,7 @@ static inline IndexType clear_mark /* return the new value for tag_mark */
for (r = 0 ; r < n_row ; r++)
{
- if (ROW_IS_ALIVE (r))
+ if (Row[r].is_alive())
{
Row [r].shared2.mark = 0 ;
}
@@ -1838,6 +1857,7 @@ static inline IndexType clear_mark /* return the new value for tag_mark */
return (1) ;
}
+} // namespace Colamd
-} // namespace internal
+} // namespace internal
#endif
diff --git a/examples/ThirdPartyLibs/Eigen/src/OrderingMethods/Ordering.h b/examples/ThirdPartyLibs/Eigen/src/OrderingMethods/Ordering.h
index 7ea9b14d7..c57897014 100644
--- a/examples/ThirdPartyLibs/Eigen/src/OrderingMethods/Ordering.h
+++ b/examples/ThirdPartyLibs/Eigen/src/OrderingMethods/Ordering.h
@@ -31,15 +31,13 @@ void ordering_helper_at_plus_a(const MatrixType& A, MatrixType& symmat)
for (int i = 0; i < C.rows(); i++)
{
for (typename MatrixType::InnerIterator it(C, i); it; ++it)
- it.valueRef() = 0.0;
+ it.valueRef() = typename MatrixType::Scalar(0);
}
symmat = C + A;
}
}
-#ifndef EIGEN_MPL2_ONLY
-
/** \ingroup OrderingMethods_Module
* \class AMDOrdering
*
@@ -81,8 +79,6 @@ class AMDOrdering
}
};
-#endif // EIGEN_MPL2_ONLY
-
/** \ingroup OrderingMethods_Module
* \class NaturalOrdering
*
@@ -133,17 +129,17 @@ class COLAMDOrdering
StorageIndex n = StorageIndex(mat.cols());
StorageIndex nnz = StorageIndex(mat.nonZeros());
// Get the recommended value of Alen to be used by colamd
- StorageIndex Alen = internal::colamd_recommended(nnz, m, n);
+ StorageIndex Alen = internal::Colamd::recommended(nnz, m, n);
// Set the default parameters
- double knobs [COLAMD_KNOBS];
- StorageIndex stats [COLAMD_STATS];
- internal::colamd_set_defaults(knobs);
+ double knobs [internal::Colamd::NKnobs];
+ StorageIndex stats [internal::Colamd::NStats];
+ internal::Colamd::set_defaults(knobs);
IndexVector p(n+1), A(Alen);
for(StorageIndex i=0; i <= n; i++) p(i) = mat.outerIndexPtr()[i];
for(StorageIndex i=0; i < nnz; i++) A(i) = mat.innerIndexPtr()[i];
// Call Colamd routine to compute the ordering
- StorageIndex info = internal::colamd(m, n, Alen, A.data(), p.data(), knobs, stats);
+ StorageIndex info = internal::Colamd::compute_ordering(m, n, Alen, A.data(), p.data(), knobs, stats);
EIGEN_UNUSED_VARIABLE(info);
eigen_assert( info && "COLAMD failed " );
diff --git a/examples/ThirdPartyLibs/Eigen/src/PaStiXSupport/PaStiXSupport.h b/examples/ThirdPartyLibs/Eigen/src/PaStiXSupport/PaStiXSupport.h
index d2ebfd7bb..37426877a 100644
--- a/examples/ThirdPartyLibs/Eigen/src/PaStiXSupport/PaStiXSupport.h
+++ b/examples/ThirdPartyLibs/Eigen/src/PaStiXSupport/PaStiXSupport.h
@@ -64,28 +64,28 @@ namespace internal
typedef typename _MatrixType::StorageIndex StorageIndex;
};
- void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, float *vals, int *perm, int * invp, float *x, int nbrhs, int *iparm, double *dparm)
+ inline void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, float *vals, int *perm, int * invp, float *x, int nbrhs, int *iparm, double *dparm)
{
if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
if (nbrhs == 0) {x = NULL; nbrhs=1;}
s_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm);
}
- void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, double *vals, int *perm, int * invp, double *x, int nbrhs, int *iparm, double *dparm)
+ inline void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, double *vals, int *perm, int * invp, double *x, int nbrhs, int *iparm, double *dparm)
{
if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
if (nbrhs == 0) {x = NULL; nbrhs=1;}
d_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm);
}
- void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex<float> *vals, int *perm, int * invp, std::complex<float> *x, int nbrhs, int *iparm, double *dparm)
+ inline void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex<float> *vals, int *perm, int * invp, std::complex<float> *x, int nbrhs, int *iparm, double *dparm)
{
if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
if (nbrhs == 0) {x = NULL; nbrhs=1;}
c_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast<PASTIX_COMPLEX*>(vals), perm, invp, reinterpret_cast<PASTIX_COMPLEX*>(x), nbrhs, iparm, dparm);
}
- void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex<double> *vals, int *perm, int * invp, std::complex<double> *x, int nbrhs, int *iparm, double *dparm)
+ inline void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex<double> *vals, int *perm, int * invp, std::complex<double> *x, int nbrhs, int *iparm, double *dparm)
{
if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
if (nbrhs == 0) {x = NULL; nbrhs=1;}
@@ -203,7 +203,7 @@ class PastixBase : public SparseSolverBase<Derived>
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the PaStiX reports a problem
* \c InvalidInput if the input matrix is invalid
*
diff --git a/examples/ThirdPartyLibs/Eigen/src/PardisoSupport/PardisoSupport.h b/examples/ThirdPartyLibs/Eigen/src/PardisoSupport/PardisoSupport.h
index 091c3970e..f89b79bd5 100644
--- a/examples/ThirdPartyLibs/Eigen/src/PardisoSupport/PardisoSupport.h
+++ b/examples/ThirdPartyLibs/Eigen/src/PardisoSupport/PardisoSupport.h
@@ -123,6 +123,7 @@ class PardisoImpl : public SparseSolverBase<Derived>
};
PardisoImpl()
+ : m_analysisIsOk(false), m_factorizationIsOk(false)
{
eigen_assert((sizeof(StorageIndex) >= sizeof(_INTEGER_t) && sizeof(StorageIndex) <= 8) && "Non-supported index type");
m_iparm.setZero();
@@ -140,7 +141,7 @@ class PardisoImpl : public SparseSolverBase<Derived>
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the matrix appears to be negative.
*/
ComputationInfo info() const
@@ -385,14 +386,15 @@ class PardisoLU : public PardisoImpl< PardisoLU<MatrixType> >
{
protected:
typedef PardisoImpl<PardisoLU> Base;
- typedef typename Base::Scalar Scalar;
- typedef typename Base::RealScalar RealScalar;
using Base::pardisoInit;
using Base::m_matrix;
friend class PardisoImpl< PardisoLU<MatrixType> >;
public:
+ typedef typename Base::Scalar Scalar;
+ typedef typename Base::RealScalar RealScalar;
+
using Base::compute;
using Base::solve;
@@ -440,14 +442,14 @@ class PardisoLLT : public PardisoImpl< PardisoLLT<MatrixType,_UpLo> >
{
protected:
typedef PardisoImpl< PardisoLLT<MatrixType,_UpLo> > Base;
- typedef typename Base::Scalar Scalar;
- typedef typename Base::RealScalar RealScalar;
using Base::pardisoInit;
using Base::m_matrix;
friend class PardisoImpl< PardisoLLT<MatrixType,_UpLo> >;
public:
+ typedef typename Base::Scalar Scalar;
+ typedef typename Base::RealScalar RealScalar;
typedef typename Base::StorageIndex StorageIndex;
enum { UpLo = _UpLo };
using Base::compute;
@@ -503,14 +505,14 @@ class PardisoLDLT : public PardisoImpl< PardisoLDLT<MatrixType,Options> >
{
protected:
typedef PardisoImpl< PardisoLDLT<MatrixType,Options> > Base;
- typedef typename Base::Scalar Scalar;
- typedef typename Base::RealScalar RealScalar;
using Base::pardisoInit;
using Base::m_matrix;
friend class PardisoImpl< PardisoLDLT<MatrixType,Options> >;
public:
+ typedef typename Base::Scalar Scalar;
+ typedef typename Base::RealScalar RealScalar;
typedef typename Base::StorageIndex StorageIndex;
using Base::compute;
enum { UpLo = Options&(Upper|Lower) };
diff --git a/examples/ThirdPartyLibs/Eigen/src/QR/ColPivHouseholderQR.h b/examples/ThirdPartyLibs/Eigen/src/QR/ColPivHouseholderQR.h
index 5270eaca2..9b677e9bf 100644
--- a/examples/ThirdPartyLibs/Eigen/src/QR/ColPivHouseholderQR.h
+++ b/examples/ThirdPartyLibs/Eigen/src/QR/ColPivHouseholderQR.h
@@ -17,6 +17,9 @@ namespace internal {
template<typename _MatrixType> struct traits<ColPivHouseholderQR<_MatrixType> >
: traits<_MatrixType>
{
+ typedef MatrixXpr XprKind;
+ typedef SolverStorage StorageKind;
+ typedef int StorageIndex;
enum { Flags = 0 };
};
@@ -46,20 +49,19 @@ template<typename _MatrixType> struct traits<ColPivHouseholderQR<_MatrixType> >
* \sa MatrixBase::colPivHouseholderQr()
*/
template<typename _MatrixType> class ColPivHouseholderQR
+ : public SolverBase<ColPivHouseholderQR<_MatrixType> >
{
public:
typedef _MatrixType MatrixType;
+ typedef SolverBase<ColPivHouseholderQR> Base;
+ friend class SolverBase<ColPivHouseholderQR>;
+
+ EIGEN_GENERIC_PUBLIC_INTERFACE(ColPivHouseholderQR)
enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
- typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::RealScalar RealScalar;
- // FIXME should be int
- typedef typename MatrixType::StorageIndex StorageIndex;
typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationType;
typedef typename internal::plain_row_type<MatrixType, Index>::type IntRowVectorType;
@@ -156,6 +158,7 @@ template<typename _MatrixType> class ColPivHouseholderQR
computeInPlace();
}
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
/** This method finds a solution x to the equation Ax=b, where A is the matrix of which
* *this is the QR decomposition, if any exists.
*
@@ -172,11 +175,8 @@ template<typename _MatrixType> class ColPivHouseholderQR
*/
template<typename Rhs>
inline const Solve<ColPivHouseholderQR, Rhs>
- solve(const MatrixBase<Rhs>& b) const
- {
- eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
- return Solve<ColPivHouseholderQR, Rhs>(*this, b.derived());
- }
+ solve(const MatrixBase<Rhs>& b) const;
+ #endif
HouseholderSequenceType householderQ() const;
HouseholderSequenceType matrixQ() const
@@ -402,7 +402,7 @@ template<typename _MatrixType> class ColPivHouseholderQR
*/
RealScalar maxPivot() const { return m_maxpivot; }
- /** \brief Reports whether the QR factorization was succesful.
+ /** \brief Reports whether the QR factorization was successful.
*
* \note This function always returns \c Success. It is provided for compatibility
* with other factorization routines.
@@ -417,6 +417,9 @@ template<typename _MatrixType> class ColPivHouseholderQR
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
void _solve_impl(const RhsType &rhs, DstType &dst) const;
+
+ template<bool Conjugate, typename RhsType, typename DstType>
+ void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const;
#endif
protected:
@@ -583,8 +586,6 @@ template<typename _MatrixType>
template<typename RhsType, typename DstType>
void ColPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const
{
- eigen_assert(rhs.rows() == rows());
-
const Index nonzero_pivots = nonzeroPivots();
if(nonzero_pivots == 0)
@@ -595,11 +596,7 @@ void ColPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &
typename RhsType::PlainObject c(rhs);
- // Note that the matrix Q = H_0^* H_1^*... so its inverse is Q^* = (H_0 H_1 ...)^T
- c.applyOnTheLeft(householderSequence(m_qr, m_hCoeffs)
- .setLength(nonzero_pivots)
- .transpose()
- );
+ c.applyOnTheLeft(householderQ().setLength(nonzero_pivots).adjoint() );
m_qr.topLeftCorner(nonzero_pivots, nonzero_pivots)
.template triangularView<Upper>()
@@ -608,6 +605,31 @@ void ColPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &
for(Index i = 0; i < nonzero_pivots; ++i) dst.row(m_colsPermutation.indices().coeff(i)) = c.row(i);
for(Index i = nonzero_pivots; i < cols(); ++i) dst.row(m_colsPermutation.indices().coeff(i)).setZero();
}
+
+template<typename _MatrixType>
+template<bool Conjugate, typename RhsType, typename DstType>
+void ColPivHouseholderQR<_MatrixType>::_solve_impl_transposed(const RhsType &rhs, DstType &dst) const
+{
+ const Index nonzero_pivots = nonzeroPivots();
+
+ if(nonzero_pivots == 0)
+ {
+ dst.setZero();
+ return;
+ }
+
+ typename RhsType::PlainObject c(m_colsPermutation.transpose()*rhs);
+
+ m_qr.topLeftCorner(nonzero_pivots, nonzero_pivots)
+ .template triangularView<Upper>()
+ .transpose().template conjugateIf<Conjugate>()
+ .solveInPlace(c.topRows(nonzero_pivots));
+
+ dst.topRows(nonzero_pivots) = c.topRows(nonzero_pivots);
+ dst.bottomRows(rows()-nonzero_pivots).setZero();
+
+ dst.applyOnTheLeft(householderQ().setLength(nonzero_pivots).template conjugateIf<!Conjugate>() );
+}
#endif
namespace internal {
diff --git a/examples/ThirdPartyLibs/Eigen/src/QR/CompleteOrthogonalDecomposition.h b/examples/ThirdPartyLibs/Eigen/src/QR/CompleteOrthogonalDecomposition.h
index 13b61fcdb..486d3373a 100644
--- a/examples/ThirdPartyLibs/Eigen/src/QR/CompleteOrthogonalDecomposition.h
+++ b/examples/ThirdPartyLibs/Eigen/src/QR/CompleteOrthogonalDecomposition.h
@@ -16,6 +16,9 @@ namespace internal {
template <typename _MatrixType>
struct traits<CompleteOrthogonalDecomposition<_MatrixType> >
: traits<_MatrixType> {
+ typedef MatrixXpr XprKind;
+ typedef SolverStorage StorageKind;
+ typedef int StorageIndex;
enum { Flags = 0 };
};
@@ -44,19 +47,21 @@ struct traits<CompleteOrthogonalDecomposition<_MatrixType> >
*
* \sa MatrixBase::completeOrthogonalDecomposition()
*/
-template <typename _MatrixType>
-class CompleteOrthogonalDecomposition {
+template <typename _MatrixType> class CompleteOrthogonalDecomposition
+ : public SolverBase<CompleteOrthogonalDecomposition<_MatrixType> >
+{
public:
typedef _MatrixType MatrixType;
+ typedef SolverBase<CompleteOrthogonalDecomposition> Base;
+
+ template<typename Derived>
+ friend struct internal::solve_assertion;
+
+ EIGEN_GENERIC_PUBLIC_INTERFACE(CompleteOrthogonalDecomposition)
enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
- typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::RealScalar RealScalar;
- typedef typename MatrixType::StorageIndex StorageIndex;
typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime>
PermutationType;
@@ -131,9 +136,9 @@ class CompleteOrthogonalDecomposition {
m_temp(matrix.cols())
{
computeInPlace();
- }
-
+ }
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
/** This method computes the minimum-norm solution X to a least squares
* problem \f[\mathrm{minimize} \|A X - B\|, \f] where \b A is the matrix of
* which \c *this is the complete orthogonal decomposition.
@@ -145,11 +150,8 @@ class CompleteOrthogonalDecomposition {
*/
template <typename Rhs>
inline const Solve<CompleteOrthogonalDecomposition, Rhs> solve(
- const MatrixBase<Rhs>& b) const {
- eigen_assert(m_cpqr.m_isInitialized &&
- "CompleteOrthogonalDecomposition is not initialized.");
- return Solve<CompleteOrthogonalDecomposition, Rhs>(*this, b.derived());
- }
+ const MatrixBase<Rhs>& b) const;
+ #endif
HouseholderSequenceType householderQ(void) const;
HouseholderSequenceType matrixQ(void) const { return m_cpqr.householderQ(); }
@@ -158,8 +160,8 @@ class CompleteOrthogonalDecomposition {
*/
MatrixType matrixZ() const {
MatrixType Z = MatrixType::Identity(m_cpqr.cols(), m_cpqr.cols());
- applyZAdjointOnTheLeftInPlace(Z);
- return Z.adjoint();
+ applyZOnTheLeftInPlace<false>(Z);
+ return Z;
}
/** \returns a reference to the matrix where the complete orthogonal
@@ -275,6 +277,7 @@ class CompleteOrthogonalDecomposition {
*/
inline const Inverse<CompleteOrthogonalDecomposition> pseudoInverse() const
{
+ eigen_assert(m_cpqr.m_isInitialized && "CompleteOrthogonalDecomposition is not initialized.");
return Inverse<CompleteOrthogonalDecomposition>(*this);
}
@@ -353,7 +356,7 @@ class CompleteOrthogonalDecomposition {
inline RealScalar maxPivot() const { return m_cpqr.maxPivot(); }
/** \brief Reports whether the complete orthogonal decomposition was
- * succesful.
+ * successful.
*
* \note This function always returns \c Success. It is provided for
* compatibility
@@ -368,6 +371,9 @@ class CompleteOrthogonalDecomposition {
#ifndef EIGEN_PARSED_BY_DOXYGEN
template <typename RhsType, typename DstType>
void _solve_impl(const RhsType& rhs, DstType& dst) const;
+
+ template<bool Conjugate, typename RhsType, typename DstType>
+ void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const;
#endif
protected:
@@ -375,8 +381,22 @@ class CompleteOrthogonalDecomposition {
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
+ template<bool Transpose_, typename Rhs>
+ void _check_solve_assertion(const Rhs& b) const {
+ EIGEN_ONLY_USED_FOR_DEBUG(b);
+ eigen_assert(m_cpqr.m_isInitialized && "CompleteOrthogonalDecomposition is not initialized.");
+ eigen_assert((Transpose_?derived().cols():derived().rows())==b.rows() && "CompleteOrthogonalDecomposition::solve(): invalid number of rows of the right hand side matrix b");
+ }
+
void computeInPlace();
+ /** Overwrites \b rhs with \f$ \mathbf{Z} * \mathbf{rhs} \f$ or
+ * \f$ \mathbf{\overline Z} * \mathbf{rhs} \f$ if \c Conjugate
+ * is set to \c true.
+ */
+ template <bool Conjugate, typename Rhs>
+ void applyZOnTheLeftInPlace(Rhs& rhs) const;
+
/** Overwrites \b rhs with \f$ \mathbf{Z}^* * \mathbf{rhs} \f$.
*/
template <typename Rhs>
@@ -452,7 +472,7 @@ void CompleteOrthogonalDecomposition<MatrixType>::computeInPlace()
// Apply Z(k) to the first k rows of X_k
m_cpqr.m_qr.topRightCorner(k, cols - rank + 1)
.applyHouseholderOnTheRight(
- m_cpqr.m_qr.row(k).tail(cols - rank).transpose(), m_zCoeffs(k),
+ m_cpqr.m_qr.row(k).tail(cols - rank).adjoint(), m_zCoeffs(k),
&m_temp(0));
}
if (k != rank - 1) {
@@ -465,13 +485,35 @@ void CompleteOrthogonalDecomposition<MatrixType>::computeInPlace()
}
template <typename MatrixType>
+template <bool Conjugate, typename Rhs>
+void CompleteOrthogonalDecomposition<MatrixType>::applyZOnTheLeftInPlace(
+ Rhs& rhs) const {
+ const Index cols = this->cols();
+ const Index nrhs = rhs.cols();
+ const Index rank = this->rank();
+ Matrix<typename Rhs::Scalar, Dynamic, 1> temp((std::max)(cols, nrhs));
+ for (Index k = rank-1; k >= 0; --k) {
+ if (k != rank - 1) {
+ rhs.row(k).swap(rhs.row(rank - 1));
+ }
+ rhs.middleRows(rank - 1, cols - rank + 1)
+ .applyHouseholderOnTheLeft(
+ matrixQTZ().row(k).tail(cols - rank).transpose().template conjugateIf<!Conjugate>(), zCoeffs().template conjugateIf<Conjugate>()(k),
+ &temp(0));
+ if (k != rank - 1) {
+ rhs.row(k).swap(rhs.row(rank - 1));
+ }
+ }
+}
+
+template <typename MatrixType>
template <typename Rhs>
void CompleteOrthogonalDecomposition<MatrixType>::applyZAdjointOnTheLeftInPlace(
Rhs& rhs) const {
const Index cols = this->cols();
const Index nrhs = rhs.cols();
const Index rank = this->rank();
- Matrix<typename MatrixType::Scalar, Dynamic, 1> temp((std::max)(cols, nrhs));
+ Matrix<typename Rhs::Scalar, Dynamic, 1> temp((std::max)(cols, nrhs));
for (Index k = 0; k < rank; ++k) {
if (k != rank - 1) {
rhs.row(k).swap(rhs.row(rank - 1));
@@ -491,8 +533,6 @@ template <typename _MatrixType>
template <typename RhsType, typename DstType>
void CompleteOrthogonalDecomposition<_MatrixType>::_solve_impl(
const RhsType& rhs, DstType& dst) const {
- eigen_assert(rhs.rows() == this->rows());
-
const Index rank = this->rank();
if (rank == 0) {
dst.setZero();
@@ -500,11 +540,8 @@ void CompleteOrthogonalDecomposition<_MatrixType>::_solve_impl(
}
// Compute c = Q^* * rhs
- // Note that the matrix Q = H_0^* H_1^*... so its inverse is
- // Q^* = (H_0 H_1 ...)^T
typename RhsType::PlainObject c(rhs);
- c.applyOnTheLeft(
- householderSequence(matrixQTZ(), hCoeffs()).setLength(rank).transpose());
+ c.applyOnTheLeft(matrixQ().setLength(rank).adjoint());
// Solve T z = c(1:rank, :)
dst.topRows(rank) = matrixT()
@@ -523,10 +560,45 @@ void CompleteOrthogonalDecomposition<_MatrixType>::_solve_impl(
// Undo permutation to get x = P^{-1} * y.
dst = colsPermutation() * dst;
}
+
+template<typename _MatrixType>
+template<bool Conjugate, typename RhsType, typename DstType>
+void CompleteOrthogonalDecomposition<_MatrixType>::_solve_impl_transposed(const RhsType &rhs, DstType &dst) const
+{
+ const Index rank = this->rank();
+
+ if (rank == 0) {
+ dst.setZero();
+ return;
+ }
+
+ typename RhsType::PlainObject c(colsPermutation().transpose()*rhs);
+
+ if (rank < cols()) {
+ applyZOnTheLeftInPlace<!Conjugate>(c);
+ }
+
+ matrixT().topLeftCorner(rank, rank)
+ .template triangularView<Upper>()
+ .transpose().template conjugateIf<Conjugate>()
+ .solveInPlace(c.topRows(rank));
+
+ dst.topRows(rank) = c.topRows(rank);
+ dst.bottomRows(rows()-rank).setZero();
+
+ dst.applyOnTheLeft(householderQ().setLength(rank).template conjugateIf<!Conjugate>() );
+}
#endif
namespace internal {
+template<typename MatrixType>
+struct traits<Inverse<CompleteOrthogonalDecomposition<MatrixType> > >
+ : traits<typename Transpose<typename MatrixType::PlainObject>::PlainObject>
+{
+ enum { Flags = 0 };
+};
+
template<typename DstXprType, typename MatrixType>
struct Assignment<DstXprType, Inverse<CompleteOrthogonalDecomposition<MatrixType> >, internal::assign_op<typename DstXprType::Scalar,typename CompleteOrthogonalDecomposition<MatrixType>::Scalar>, Dense2Dense>
{
@@ -534,7 +606,8 @@ struct Assignment<DstXprType, Inverse<CompleteOrthogonalDecomposition<MatrixType
typedef Inverse<CodType> SrcXprType;
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename CodType::Scalar> &)
{
- dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.rows()));
+ typedef Matrix<typename CodType::Scalar, CodType::RowsAtCompileTime, CodType::RowsAtCompileTime, 0, CodType::MaxRowsAtCompileTime, CodType::MaxRowsAtCompileTime> IdentityMatrixType;
+ dst = src.nestedExpression().solve(IdentityMatrixType::Identity(src.cols(), src.cols()));
}
};
diff --git a/examples/ThirdPartyLibs/Eigen/src/QR/FullPivHouseholderQR.h b/examples/ThirdPartyLibs/Eigen/src/QR/FullPivHouseholderQR.h
index c31e47cc4..d0664a1d8 100644
--- a/examples/ThirdPartyLibs/Eigen/src/QR/FullPivHouseholderQR.h
+++ b/examples/ThirdPartyLibs/Eigen/src/QR/FullPivHouseholderQR.h
@@ -18,6 +18,9 @@ namespace internal {
template<typename _MatrixType> struct traits<FullPivHouseholderQR<_MatrixType> >
: traits<_MatrixType>
{
+ typedef MatrixXpr XprKind;
+ typedef SolverStorage StorageKind;
+ typedef int StorageIndex;
enum { Flags = 0 };
};
@@ -55,20 +58,19 @@ struct traits<FullPivHouseholderQRMatrixQReturnType<MatrixType> >
* \sa MatrixBase::fullPivHouseholderQr()
*/
template<typename _MatrixType> class FullPivHouseholderQR
+ : public SolverBase<FullPivHouseholderQR<_MatrixType> >
{
public:
typedef _MatrixType MatrixType;
+ typedef SolverBase<FullPivHouseholderQR> Base;
+ friend class SolverBase<FullPivHouseholderQR>;
+
+ EIGEN_GENERIC_PUBLIC_INTERFACE(FullPivHouseholderQR)
enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
- typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::RealScalar RealScalar;
- // FIXME should be int
- typedef typename MatrixType::StorageIndex StorageIndex;
typedef internal::FullPivHouseholderQRMatrixQReturnType<MatrixType> MatrixQReturnType;
typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
typedef Matrix<StorageIndex, 1,
@@ -156,6 +158,7 @@ template<typename _MatrixType> class FullPivHouseholderQR
computeInPlace();
}
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
/** This method finds a solution x to the equation Ax=b, where A is the matrix of which
* \c *this is the QR decomposition.
*
@@ -173,11 +176,8 @@ template<typename _MatrixType> class FullPivHouseholderQR
*/
template<typename Rhs>
inline const Solve<FullPivHouseholderQR, Rhs>
- solve(const MatrixBase<Rhs>& b) const
- {
- eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
- return Solve<FullPivHouseholderQR, Rhs>(*this, b.derived());
- }
+ solve(const MatrixBase<Rhs>& b) const;
+ #endif
/** \returns Expression object representing the matrix Q
*/
@@ -396,6 +396,9 @@ template<typename _MatrixType> class FullPivHouseholderQR
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
void _solve_impl(const RhsType &rhs, DstType &dst) const;
+
+ template<bool Conjugate, typename RhsType, typename DstType>
+ void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const;
#endif
protected:
@@ -498,15 +501,15 @@ void FullPivHouseholderQR<MatrixType>::computeInPlace()
m_nonzero_pivots = k;
for(Index i = k; i < size; i++)
{
- m_rows_transpositions.coeffRef(i) = i;
- m_cols_transpositions.coeffRef(i) = i;
+ m_rows_transpositions.coeffRef(i) = internal::convert_index<StorageIndex>(i);
+ m_cols_transpositions.coeffRef(i) = internal::convert_index<StorageIndex>(i);
m_hCoeffs.coeffRef(i) = Scalar(0);
}
break;
}
- m_rows_transpositions.coeffRef(k) = row_of_biggest_in_corner;
- m_cols_transpositions.coeffRef(k) = col_of_biggest_in_corner;
+ m_rows_transpositions.coeffRef(k) = internal::convert_index<StorageIndex>(row_of_biggest_in_corner);
+ m_cols_transpositions.coeffRef(k) = internal::convert_index<StorageIndex>(col_of_biggest_in_corner);
if(k != row_of_biggest_in_corner) {
m_qr.row(k).tail(cols-k).swap(m_qr.row(row_of_biggest_in_corner).tail(cols-k));
++number_of_transpositions;
@@ -540,7 +543,6 @@ template<typename _MatrixType>
template<typename RhsType, typename DstType>
void FullPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const
{
- eigen_assert(rhs.rows() == rows());
const Index l_rank = rank();
// FIXME introduce nonzeroPivots() and use it here. and more generally,
@@ -553,7 +555,7 @@ void FullPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType
typename RhsType::PlainObject c(rhs);
- Matrix<Scalar,1,RhsType::ColsAtCompileTime> temp(rhs.cols());
+ Matrix<typename RhsType::Scalar,1,RhsType::ColsAtCompileTime> temp(rhs.cols());
for (Index k = 0; k < l_rank; ++k)
{
Index remainingSize = rows()-k;
@@ -570,6 +572,42 @@ void FullPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType
for(Index i = 0; i < l_rank; ++i) dst.row(m_cols_permutation.indices().coeff(i)) = c.row(i);
for(Index i = l_rank; i < cols(); ++i) dst.row(m_cols_permutation.indices().coeff(i)).setZero();
}
+
+template<typename _MatrixType>
+template<bool Conjugate, typename RhsType, typename DstType>
+void FullPivHouseholderQR<_MatrixType>::_solve_impl_transposed(const RhsType &rhs, DstType &dst) const
+{
+ const Index l_rank = rank();
+
+ if(l_rank == 0)
+ {
+ dst.setZero();
+ return;
+ }
+
+ typename RhsType::PlainObject c(m_cols_permutation.transpose()*rhs);
+
+ m_qr.topLeftCorner(l_rank, l_rank)
+ .template triangularView<Upper>()
+ .transpose().template conjugateIf<Conjugate>()
+ .solveInPlace(c.topRows(l_rank));
+
+ dst.topRows(l_rank) = c.topRows(l_rank);
+ dst.bottomRows(rows()-l_rank).setZero();
+
+ Matrix<Scalar, 1, DstType::ColsAtCompileTime> temp(dst.cols());
+ const Index size = (std::min)(rows(), cols());
+ for (Index k = size-1; k >= 0; --k)
+ {
+ Index remainingSize = rows()-k;
+
+ dst.bottomRightCorner(remainingSize, dst.cols())
+ .applyHouseholderOnTheLeft(m_qr.col(k).tail(remainingSize-1).template conjugateIf<!Conjugate>(),
+ m_hCoeffs.template conjugateIf<Conjugate>().coeff(k), &temp.coeffRef(0));
+
+ dst.row(k).swap(dst.row(m_rows_transpositions.coeff(k)));
+ }
+}
#endif
namespace internal {
diff --git a/examples/ThirdPartyLibs/Eigen/src/QR/HouseholderQR.h b/examples/ThirdPartyLibs/Eigen/src/QR/HouseholderQR.h
index 762b21c36..801739fbd 100644
--- a/examples/ThirdPartyLibs/Eigen/src/QR/HouseholderQR.h
+++ b/examples/ThirdPartyLibs/Eigen/src/QR/HouseholderQR.h
@@ -14,6 +14,18 @@
namespace Eigen {
+namespace internal {
+template<typename _MatrixType> struct traits<HouseholderQR<_MatrixType> >
+ : traits<_MatrixType>
+{
+ typedef MatrixXpr XprKind;
+ typedef SolverStorage StorageKind;
+ typedef int StorageIndex;
+ enum { Flags = 0 };
+};
+
+} // end namespace internal
+
/** \ingroup QR_Module
*
*
@@ -42,20 +54,19 @@ namespace Eigen {
* \sa MatrixBase::householderQr()
*/
template<typename _MatrixType> class HouseholderQR
+ : public SolverBase<HouseholderQR<_MatrixType> >
{
public:
typedef _MatrixType MatrixType;
+ typedef SolverBase<HouseholderQR> Base;
+ friend class SolverBase<HouseholderQR>;
+
+ EIGEN_GENERIC_PUBLIC_INTERFACE(HouseholderQR)
enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
- typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::RealScalar RealScalar;
- // FIXME should be int
- typedef typename MatrixType::StorageIndex StorageIndex;
typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, (MatrixType::Flags&RowMajorBit) ? RowMajor : ColMajor, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixQType;
typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
typedef typename internal::plain_row_type<MatrixType>::type RowVectorType;
@@ -121,6 +132,7 @@ template<typename _MatrixType> class HouseholderQR
computeInPlace();
}
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
/** This method finds a solution x to the equation Ax=b, where A is the matrix of which
* *this is the QR decomposition, if any exists.
*
@@ -137,11 +149,8 @@ template<typename _MatrixType> class HouseholderQR
*/
template<typename Rhs>
inline const Solve<HouseholderQR, Rhs>
- solve(const MatrixBase<Rhs>& b) const
- {
- eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
- return Solve<HouseholderQR, Rhs>(*this, b.derived());
- }
+ solve(const MatrixBase<Rhs>& b) const;
+ #endif
/** This method returns an expression of the unitary matrix Q as a sequence of Householder transformations.
*
@@ -214,6 +223,9 @@ template<typename _MatrixType> class HouseholderQR
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
void _solve_impl(const RhsType &rhs, DstType &dst) const;
+
+ template<bool Conjugate, typename RhsType, typename DstType>
+ void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const;
#endif
protected:
@@ -291,7 +303,7 @@ template<typename MatrixQR, typename HCoeffs,
bool InnerStrideIsOne = (MatrixQR::InnerStrideAtCompileTime == 1 && HCoeffs::InnerStrideAtCompileTime == 1)>
struct householder_qr_inplace_blocked
{
- // This is specialized for MKL-supported Scalar types in HouseholderQR_MKL.h
+ // This is specialized for LAPACK-supported Scalar types in HouseholderQR_LAPACKE.h
static void run(MatrixQR& mat, HCoeffs& hCoeffs, Index maxBlockSize=32,
typename MatrixQR::Scalar* tempData = 0)
{
@@ -349,15 +361,10 @@ template<typename RhsType, typename DstType>
void HouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const
{
const Index rank = (std::min)(rows(), cols());
- eigen_assert(rhs.rows() == rows());
typename RhsType::PlainObject c(rhs);
- // Note that the matrix Q = H_0^* H_1^*... so its inverse is Q^* = (H_0 H_1 ...)^T
- c.applyOnTheLeft(householderSequence(
- m_qr.leftCols(rank),
- m_hCoeffs.head(rank)).transpose()
- );
+ c.applyOnTheLeft(householderQ().setLength(rank).adjoint() );
m_qr.topLeftCorner(rank, rank)
.template triangularView<Upper>()
@@ -366,6 +373,25 @@ void HouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) c
dst.topRows(rank) = c.topRows(rank);
dst.bottomRows(cols()-rank).setZero();
}
+
+template<typename _MatrixType>
+template<bool Conjugate, typename RhsType, typename DstType>
+void HouseholderQR<_MatrixType>::_solve_impl_transposed(const RhsType &rhs, DstType &dst) const
+{
+ const Index rank = (std::min)(rows(), cols());
+
+ typename RhsType::PlainObject c(rhs);
+
+ m_qr.topLeftCorner(rank, rank)
+ .template triangularView<Upper>()
+ .transpose().template conjugateIf<Conjugate>()
+ .solveInPlace(c.topRows(rank));
+
+ dst.topRows(rank) = c.topRows(rank);
+ dst.bottomRows(rows()-rank).setZero();
+
+ dst.applyOnTheLeft(householderQ().setLength(rank).template conjugateIf<!Conjugate>() );
+}
#endif
/** Performs the QR factorization of the given matrix \a matrix. The result of
diff --git a/examples/ThirdPartyLibs/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h b/examples/ThirdPartyLibs/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h
index 953d57c9d..013c7ae7a 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h
@@ -74,13 +74,35 @@ class SPQR : public SparseSolverBase<SPQR<_MatrixType> >
};
public:
SPQR()
- : m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits<Scalar>::epsilon()), m_useDefaultThreshold(true)
+ : m_analysisIsOk(false),
+ m_factorizationIsOk(false),
+ m_isRUpToDate(false),
+ m_ordering(SPQR_ORDERING_DEFAULT),
+ m_allow_tol(SPQR_DEFAULT_TOL),
+ m_tolerance (NumTraits<Scalar>::epsilon()),
+ m_cR(0),
+ m_E(0),
+ m_H(0),
+ m_HPinv(0),
+ m_HTau(0),
+ m_useDefaultThreshold(true)
{
cholmod_l_start(&m_cc);
}
explicit SPQR(const _MatrixType& matrix)
- : m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits<Scalar>::epsilon()), m_useDefaultThreshold(true)
+ : m_analysisIsOk(false),
+ m_factorizationIsOk(false),
+ m_isRUpToDate(false),
+ m_ordering(SPQR_ORDERING_DEFAULT),
+ m_allow_tol(SPQR_DEFAULT_TOL),
+ m_tolerance (NumTraits<Scalar>::epsilon()),
+ m_cR(0),
+ m_E(0),
+ m_H(0),
+ m_HPinv(0),
+ m_HTau(0),
+ m_useDefaultThreshold(true)
{
cholmod_l_start(&m_cc);
compute(matrix);
@@ -220,7 +242,7 @@ class SPQR : public SparseSolverBase<SPQR<_MatrixType> >
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the sparse QR can not be computed
*/
ComputationInfo info() const
diff --git a/examples/ThirdPartyLibs/Eigen/src/SVD/BDCSVD.h b/examples/ThirdPartyLibs/Eigen/src/SVD/BDCSVD.h
index b8c41c560..a76a8dd04 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SVD/BDCSVD.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SVD/BDCSVD.h
@@ -11,7 +11,7 @@
// Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
// Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
// Copyright (C) 2013 Jitse Niesen <jitse@maths.leeds.ac.uk>
-// Copyright (C) 2014-2016 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2014-2017 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -22,6 +22,15 @@
// #define EIGEN_BDCSVD_DEBUG_VERBOSE
// #define EIGEN_BDCSVD_SANITY_CHECKS
+#ifdef EIGEN_BDCSVD_SANITY_CHECKS
+#undef eigen_internal_assert
+#define eigen_internal_assert(X) assert(X);
+#endif
+
+#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
+#include <iostream>
+#endif
+
namespace Eigen {
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
@@ -34,6 +43,7 @@ namespace internal {
template<typename _MatrixType>
struct traits<BDCSVD<_MatrixType> >
+ : traits<_MatrixType>
{
typedef _MatrixType MatrixType;
};
@@ -57,7 +67,7 @@ struct traits<BDCSVD<_MatrixType> >
* recommended and can several order of magnitude faster.
*
* \warning this algorithm is unlikely to provide accurate result when compiled with unsafe math optimizations.
- * For instance, this concerns Intel's compiler (ICC), which perfroms such optimization by default unless
+ * For instance, this concerns Intel's compiler (ICC), which performs such optimization by default unless
* you compile with the \c -fp-model \c precise option. Likewise, the \c -ffast-math option of GCC or clang will
* significantly degrade the accuracy.
*
@@ -105,7 +115,7 @@ public:
* The default constructor is useful in cases in which the user intends to
* perform decompositions via BDCSVD::compute(const MatrixType&).
*/
- BDCSVD() : m_algoswap(16), m_numIters(0)
+ BDCSVD() : m_algoswap(16), m_isTranspose(false), m_compU(false), m_compV(false), m_numIters(0)
{}
@@ -166,7 +176,7 @@ public:
void setSwitchSize(int s)
{
- eigen_assert(s>3 && "BDCSVD the size of the algo switch has to be greater than 3");
+ eigen_assert(s>=3 && "BDCSVD the size of the algo switch has to be at least 3.");
m_algoswap = s;
}
@@ -202,6 +212,7 @@ protected:
using Base::m_computeThinV;
using Base::m_matrixU;
using Base::m_matrixV;
+ using Base::m_info;
using Base::m_isInitialized;
using Base::m_nonzeroSingularValues;
@@ -212,7 +223,7 @@ public:
// Method to allocate and initialize matrix and attributes
template<typename MatrixType>
-void BDCSVD<MatrixType>::allocate(Index rows, Index cols, unsigned int computationOptions)
+void BDCSVD<MatrixType>::allocate(Eigen::Index rows, Eigen::Index cols, unsigned int computationOptions)
{
m_isTranspose = (cols > rows);
@@ -250,16 +261,25 @@ BDCSVD<MatrixType>& BDCSVD<MatrixType>::compute(const MatrixType& matrix, unsign
{
// FIXME this line involves temporaries
JacobiSVD<MatrixType> jsvd(matrix,computationOptions);
- if(computeU()) m_matrixU = jsvd.matrixU();
- if(computeV()) m_matrixV = jsvd.matrixV();
- m_singularValues = jsvd.singularValues();
- m_nonzeroSingularValues = jsvd.nonzeroSingularValues();
m_isInitialized = true;
+ m_info = jsvd.info();
+ if (m_info == Success || m_info == NoConvergence) {
+ if(computeU()) m_matrixU = jsvd.matrixU();
+ if(computeV()) m_matrixV = jsvd.matrixV();
+ m_singularValues = jsvd.singularValues();
+ m_nonzeroSingularValues = jsvd.nonzeroSingularValues();
+ }
return *this;
}
//**** step 0 - Copy the input matrix and apply scaling to reduce over/under-flows
- RealScalar scale = matrix.cwiseAbs().maxCoeff();
+ RealScalar scale = matrix.cwiseAbs().template maxCoeff<PropagateNaN>();
+ if (!(numext::isfinite)(scale)) {
+ m_isInitialized = true;
+ m_info = InvalidInput;
+ return *this;
+ }
+
if(scale==Literal(0)) scale = Literal(1);
MatrixX copy;
if (m_isTranspose) copy = matrix.adjoint()/scale;
@@ -276,7 +296,11 @@ BDCSVD<MatrixType>& BDCSVD<MatrixType>::compute(const MatrixType& matrix, unsign
m_computed.topRows(m_diagSize) = bid.bidiagonal().toDenseMatrix().transpose();
m_computed.template bottomRows<1>().setZero();
divide(0, m_diagSize - 1, 0, 0, 0);
-
+ if (m_info != Success && m_info != NoConvergence) {
+ m_isInitialized = true;
+ return *this;
+ }
+
//**** step 3 - Copy singular values and vectors
for (int i=0; i<m_diagSize; i++)
{
@@ -384,11 +408,11 @@ void BDCSVD<MatrixType>::structured_update(Block<MatrixXr,Dynamic,Dynamic> A, co
//@param lastCol : The Index of the last column of the submatrix of m_computed and for m_naiveU;
// lastCol + 1 - firstCol is the size of the submatrix.
//@param firstRowW : The Index of the first row of the matrix W that we are to change. (see the reference paper section 1 for more information on W)
-//@param firstRowW : Same as firstRowW with the column.
+//@param firstColW : Same as firstRowW with the column.
//@param shift : Each time one takes the left submatrix, one must add 1 to the shift. Why? Because! We actually want the last column of the U submatrix
// to become the first column (*coeff) and to shift all the other columns to the right. There are more details on the reference paper.
template<typename MatrixType>
-void BDCSVD<MatrixType>::divide (Index firstCol, Index lastCol, Index firstRowW, Index firstColW, Index shift)
+void BDCSVD<MatrixType>::divide(Eigen::Index firstCol, Eigen::Index lastCol, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index shift)
{
// requires rows = cols + 1;
using std::pow;
@@ -408,6 +432,8 @@ void BDCSVD<MatrixType>::divide (Index firstCol, Index lastCol, Index firstRowW,
{
// FIXME this line involves temporaries
JacobiSVD<MatrixXr> b(m_computed.block(firstCol, firstCol, n + 1, n), ComputeFullU | (m_compV ? ComputeFullV : 0));
+ m_info = b.info();
+ if (m_info != Success && m_info != NoConvergence) return;
if (m_compU)
m_naiveU.block(firstCol, firstCol, n + 1, n + 1).real() = b.matrixU();
else
@@ -427,7 +453,9 @@ void BDCSVD<MatrixType>::divide (Index firstCol, Index lastCol, Index firstRowW,
// and the divide of the right submatrice reads one column of the left submatrice. That's why we need to treat the
// right submatrix before the left one.
divide(k + 1 + firstCol, lastCol, k + 1 + firstRowW, k + 1 + firstColW, shift);
+ if (m_info != Success && m_info != NoConvergence) return;
divide(firstCol, k - 1 + firstCol, firstRowW, firstColW + 1, shift + 1);
+ if (m_info != Success && m_info != NoConvergence) return;
if (m_compU)
{
@@ -568,7 +596,7 @@ void BDCSVD<MatrixType>::divide (Index firstCol, Index lastCol, Index firstRowW,
// handling of round-off errors, be consistent in ordering
// For instance, to solve the secular equation using FMM, see http://www.stat.uchicago.edu/~lekheng/courses/302/classics/greengard-rokhlin.pdf
template <typename MatrixType>
-void BDCSVD<MatrixType>::computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V)
+void BDCSVD<MatrixType>::computeSVDofM(Eigen::Index firstCol, Eigen::Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V)
{
const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
using std::abs;
@@ -591,7 +619,7 @@ void BDCSVD<MatrixType>::computeSVDofM(Index firstCol, Index n, MatrixXr& U, Vec
// but others are interleaved and we must ignore them at this stage.
// To this end, let's compute a permutation skipping them:
Index actual_n = n;
- while(actual_n>1 && diag(actual_n-1)==Literal(0)) --actual_n;
+ while(actual_n>1 && diag(actual_n-1)==Literal(0)) {--actual_n; eigen_internal_assert(col0(actual_n)==Literal(0)); }
Index m = 0; // size of the deflated problem
for(Index k=0;k<actual_n;++k)
if(abs(col0(k))>considerZero)
@@ -618,13 +646,11 @@ void BDCSVD<MatrixType>::computeSVDofM(Index firstCol, Index n, MatrixXr& U, Vec
std::cout << " shift: " << shifts.transpose() << "\n";
{
- Index actual_n = n;
- while(actual_n>1 && abs(col0(actual_n-1))<considerZero) --actual_n;
std::cout << "\n\n mus: " << mus.head(actual_n).transpose() << "\n\n";
std::cout << " check1 (expect0) : " << ((singVals.array()-(shifts+mus)) / singVals.array()).head(actual_n).transpose() << "\n\n";
+ assert((((singVals.array()-(shifts+mus)) / singVals.array()).head(actual_n) >= 0).all());
std::cout << " check2 (>0) : " << ((singVals.array()-diag) / singVals.array()).head(actual_n).transpose() << "\n\n";
- std::cout << " check3 (>0) : " << ((diag.segment(1,actual_n-1)-singVals.head(actual_n-1).array()) / singVals.head(actual_n-1).array()).transpose() << "\n\n\n";
- std::cout << " check4 (>0) : " << ((singVals.segment(1,actual_n-1)-singVals.head(actual_n-1))).transpose() << "\n\n\n";
+ assert((((singVals.array()-diag) / singVals.array()).head(actual_n) >= 0).all());
}
#endif
@@ -652,13 +678,13 @@ void BDCSVD<MatrixType>::computeSVDofM(Index firstCol, Index n, MatrixXr& U, Vec
#endif
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
- assert(U.allFinite());
- assert(V.allFinite());
- assert((U.transpose() * U - MatrixXr(MatrixXr::Identity(U.cols(),U.cols()))).norm() < 1e-14 * n);
- assert((V.transpose() * V - MatrixXr(MatrixXr::Identity(V.cols(),V.cols()))).norm() < 1e-14 * n);
assert(m_naiveU.allFinite());
assert(m_naiveV.allFinite());
assert(m_computed.allFinite());
+ assert(U.allFinite());
+ assert(V.allFinite());
+// assert((U.transpose() * U - MatrixXr(MatrixXr::Identity(U.cols(),U.cols()))).norm() < 100*NumTraits<RealScalar>::epsilon() * n);
+// assert((V.transpose() * V - MatrixXr(MatrixXr::Identity(V.cols(),V.cols()))).norm() < 100*NumTraits<RealScalar>::epsilon() * n);
#endif
// Because of deflation, the singular values might not be completely sorted.
@@ -673,6 +699,15 @@ void BDCSVD<MatrixType>::computeSVDofM(Index firstCol, Index n, MatrixXr& U, Vec
if(m_compV) V.col(i).swap(V.col(i+1));
}
}
+
+#ifdef EIGEN_BDCSVD_SANITY_CHECKS
+ {
+ bool singular_values_sorted = (((singVals.segment(1,actual_n-1)-singVals.head(actual_n-1))).array() >= 0).all();
+ if(!singular_values_sorted)
+ std::cout << "Singular values are not sorted: " << singVals.segment(1,actual_n).transpose() << "\n";
+ assert(singular_values_sorted);
+ }
+#endif
// Reverse order so that singular values in increased order
// Because of deflation, the zeros singular-values are already at the end
@@ -696,7 +731,9 @@ typename BDCSVD<MatrixType>::RealScalar BDCSVD<MatrixType>::secularEq(RealScalar
for(Index i=0; i<m; ++i)
{
Index j = perm(i);
- res += numext::abs2(col0(j)) / ((diagShifted(j) - mu) * (diag(j) + shift + mu));
+ // The following expression could be rewritten to involve only a single division,
+ // but this would make the expression more sensitive to overflow.
+ res += (col0(j) / (diagShifted(j) - mu)) * (col0(j) / (diag(j) + shift + mu));
}
return res;
@@ -708,9 +745,12 @@ void BDCSVD<MatrixType>::computeSingVals(const ArrayRef& col0, const ArrayRef& d
{
using std::abs;
using std::swap;
+ using std::sqrt;
Index n = col0.size();
Index actual_n = n;
+ // Note that here actual_n is computed based on col0(i)==0 instead of diag(i)==0 as above
+ // because 1) we have diag(i)==0 => col0(i)==0 and 2) if col0(i)==0, then diag(i) is already a singular value.
while(actual_n>1 && col0(actual_n-1)==Literal(0)) --actual_n;
for (Index k = 0; k < n; ++k)
@@ -732,7 +772,9 @@ void BDCSVD<MatrixType>::computeSingVals(const ArrayRef& col0, const ArrayRef& d
right = (diag(actual_n-1) + col0.matrix().norm());
else
{
- // Skip deflated singular values
+ // Skip deflated singular values,
+ // recall that at this stage we assume that z[j]!=0 and all entries for which z[j]==0 have been put aside.
+ // This should be equivalent to using perm[]
Index l = k+1;
while(col0(l)==Literal(0)) { ++l; eigen_internal_assert(l<actual_n); }
right = diag(l);
@@ -742,25 +784,43 @@ void BDCSVD<MatrixType>::computeSingVals(const ArrayRef& col0, const ArrayRef& d
RealScalar mid = left + (right-left) / Literal(2);
RealScalar fMid = secularEq(mid, col0, diag, perm, diag, Literal(0));
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
- std::cout << right-left << "\n";
- std::cout << "fMid = " << fMid << " " << secularEq(mid-left, col0, diag, perm, diag-left, left) << " " << secularEq(mid-right, col0, diag, perm, diag-right, right) << "\n";
- std::cout << " = " << secularEq(0.1*(left+right), col0, diag, perm, diag, 0)
- << " " << secularEq(0.2*(left+right), col0, diag, perm, diag, 0)
- << " " << secularEq(0.3*(left+right), col0, diag, perm, diag, 0)
- << " " << secularEq(0.4*(left+right), col0, diag, perm, diag, 0)
- << " " << secularEq(0.49*(left+right), col0, diag, perm, diag, 0)
- << " " << secularEq(0.5*(left+right), col0, diag, perm, diag, 0)
- << " " << secularEq(0.51*(left+right), col0, diag, perm, diag, 0)
- << " " << secularEq(0.6*(left+right), col0, diag, perm, diag, 0)
- << " " << secularEq(0.7*(left+right), col0, diag, perm, diag, 0)
- << " " << secularEq(0.8*(left+right), col0, diag, perm, diag, 0)
- << " " << secularEq(0.9*(left+right), col0, diag, perm, diag, 0) << "\n";
+ std::cout << "right-left = " << right-left << "\n";
+// std::cout << "fMid = " << fMid << " " << secularEq(mid-left, col0, diag, perm, ArrayXr(diag-left), left)
+// << " " << secularEq(mid-right, col0, diag, perm, ArrayXr(diag-right), right) << "\n";
+ std::cout << " = " << secularEq(left+RealScalar(0.000001)*(right-left), col0, diag, perm, diag, 0)
+ << " " << secularEq(left+RealScalar(0.1) *(right-left), col0, diag, perm, diag, 0)
+ << " " << secularEq(left+RealScalar(0.2) *(right-left), col0, diag, perm, diag, 0)
+ << " " << secularEq(left+RealScalar(0.3) *(right-left), col0, diag, perm, diag, 0)
+ << " " << secularEq(left+RealScalar(0.4) *(right-left), col0, diag, perm, diag, 0)
+ << " " << secularEq(left+RealScalar(0.49) *(right-left), col0, diag, perm, diag, 0)
+ << " " << secularEq(left+RealScalar(0.5) *(right-left), col0, diag, perm, diag, 0)
+ << " " << secularEq(left+RealScalar(0.51) *(right-left), col0, diag, perm, diag, 0)
+ << " " << secularEq(left+RealScalar(0.6) *(right-left), col0, diag, perm, diag, 0)
+ << " " << secularEq(left+RealScalar(0.7) *(right-left), col0, diag, perm, diag, 0)
+ << " " << secularEq(left+RealScalar(0.8) *(right-left), col0, diag, perm, diag, 0)
+ << " " << secularEq(left+RealScalar(0.9) *(right-left), col0, diag, perm, diag, 0)
+ << " " << secularEq(left+RealScalar(0.999999)*(right-left), col0, diag, perm, diag, 0) << "\n";
#endif
RealScalar shift = (k == actual_n-1 || fMid > Literal(0)) ? left : right;
// measure everything relative to shift
Map<ArrayXr> diagShifted(m_workspace.data()+4*n, n);
diagShifted = diag - shift;
+
+ if(k!=actual_n-1)
+ {
+ // check that after the shift, f(mid) is still negative:
+ RealScalar midShifted = (right - left) / RealScalar(2);
+ if(shift==right)
+ midShifted = -midShifted;
+ RealScalar fMidShifted = secularEq(midShifted, col0, diag, perm, diagShifted, shift);
+ if(fMidShifted>0)
+ {
+ // fMid was erroneous, fix it:
+ shift = fMidShifted > Literal(0) ? left : right;
+ diagShifted = diag - shift;
+ }
+ }
// initial guess
RealScalar muPrev, muCur;
@@ -797,13 +857,16 @@ void BDCSVD<MatrixType>::computeSingVals(const ArrayRef& col0, const ArrayRef& d
// And find mu such that f(mu)==0:
RealScalar muZero = -a/b;
RealScalar fZero = secularEq(muZero, col0, diag, perm, diagShifted, shift);
+
+#ifdef EIGEN_BDCSVD_SANITY_CHECKS
+ assert((numext::isfinite)(fZero));
+#endif
muPrev = muCur;
fPrev = fCur;
muCur = muZero;
fCur = fZero;
-
if (shift == left && (muCur < Literal(0) || muCur > right - left)) useBisection = true;
if (shift == right && (muCur < -(right - left) || muCur > Literal(0))) useBisection = true;
if (abs(fCur)>abs(fPrev)) useBisection = true;
@@ -818,54 +881,100 @@ void BDCSVD<MatrixType>::computeSingVals(const ArrayRef& col0, const ArrayRef& d
RealScalar leftShifted, rightShifted;
if (shift == left)
{
- leftShifted = (std::numeric_limits<RealScalar>::min)();
+ // to avoid overflow, we must have mu > max(real_min, |z(k)|/sqrt(real_max)),
+ // the factor 2 is to be more conservative
+ leftShifted = numext::maxi<RealScalar>( (std::numeric_limits<RealScalar>::min)(), Literal(2) * abs(col0(k)) / sqrt((std::numeric_limits<RealScalar>::max)()) );
+
+ // check that we did it right:
+ eigen_internal_assert( (numext::isfinite)( (col0(k)/leftShifted)*(col0(k)/(diag(k)+shift+leftShifted)) ) );
// I don't understand why the case k==0 would be special there:
- // if (k == 0) rightShifted = right - left; else
- rightShifted = (k==actual_n-1) ? right : ((right - left) * RealScalar(0.6)); // theoretically we can take 0.5, but let's be safe
+ // if (k == 0) rightShifted = right - left; else
+ rightShifted = (k==actual_n-1) ? right : ((right - left) * RealScalar(0.51)); // theoretically we can take 0.5, but let's be safe
}
else
{
- leftShifted = -(right - left) * RealScalar(0.6);
- rightShifted = -(std::numeric_limits<RealScalar>::min)();
+ leftShifted = -(right - left) * RealScalar(0.51);
+ if(k+1<n)
+ rightShifted = -numext::maxi<RealScalar>( (std::numeric_limits<RealScalar>::min)(), abs(col0(k+1)) / sqrt((std::numeric_limits<RealScalar>::max)()) );
+ else
+ rightShifted = -(std::numeric_limits<RealScalar>::min)();
}
-
+
RealScalar fLeft = secularEq(leftShifted, col0, diag, perm, diagShifted, shift);
+ eigen_internal_assert(fLeft<Literal(0));
-#if defined EIGEN_INTERNAL_DEBUGGING || defined EIGEN_BDCSVD_DEBUG_VERBOSE
+#if defined EIGEN_BDCSVD_DEBUG_VERBOSE || defined EIGEN_BDCSVD_SANITY_CHECKS || defined EIGEN_INTERNAL_DEBUGGING
RealScalar fRight = secularEq(rightShifted, col0, diag, perm, diagShifted, shift);
#endif
+#ifdef EIGEN_BDCSVD_SANITY_CHECKS
+ if(!(numext::isfinite)(fLeft))
+ std::cout << "f(" << leftShifted << ") =" << fLeft << " ; " << left << " " << shift << " " << right << "\n";
+ assert((numext::isfinite)(fLeft));
+
+ if(!(numext::isfinite)(fRight))
+ std::cout << "f(" << rightShifted << ") =" << fRight << " ; " << left << " " << shift << " " << right << "\n";
+ // assert((numext::isfinite)(fRight));
+#endif
+
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
if(!(fLeft * fRight<0))
{
- std::cout << "fLeft: " << leftShifted << " - " << diagShifted.head(10).transpose() << "\n ; " << bool(left==shift) << " " << (left-shift) << "\n";
- std::cout << k << " : " << fLeft << " * " << fRight << " == " << fLeft * fRight << " ; " << left << " - " << right << " -> " << leftShifted << " " << rightShifted << " shift=" << shift << "\n";
+ std::cout << "f(leftShifted) using leftShifted=" << leftShifted << " ; diagShifted(1:10):" << diagShifted.head(10).transpose() << "\n ; "
+ << "left==shift=" << bool(left==shift) << " ; left-shift = " << (left-shift) << "\n";
+ std::cout << "k=" << k << ", " << fLeft << " * " << fRight << " == " << fLeft * fRight << " ; "
+ << "[" << left << " .. " << right << "] -> [" << leftShifted << " " << rightShifted << "], shift=" << shift
+ << " , f(right)=" << secularEq(0, col0, diag, perm, diagShifted, shift)
+ << " == " << secularEq(right, col0, diag, perm, diag, 0) << " == " << fRight << "\n";
}
#endif
eigen_internal_assert(fLeft * fRight < Literal(0));
-
- while (rightShifted - leftShifted > Literal(2) * NumTraits<RealScalar>::epsilon() * numext::maxi<RealScalar>(abs(leftShifted), abs(rightShifted)))
+
+ if(fLeft<Literal(0))
{
- RealScalar midShifted = (leftShifted + rightShifted) / Literal(2);
- fMid = secularEq(midShifted, col0, diag, perm, diagShifted, shift);
- if (fLeft * fMid < Literal(0))
- {
- rightShifted = midShifted;
- }
- else
+ while (rightShifted - leftShifted > Literal(2) * NumTraits<RealScalar>::epsilon() * numext::maxi<RealScalar>(abs(leftShifted), abs(rightShifted)))
{
- leftShifted = midShifted;
- fLeft = fMid;
+ RealScalar midShifted = (leftShifted + rightShifted) / Literal(2);
+ fMid = secularEq(midShifted, col0, diag, perm, diagShifted, shift);
+ eigen_internal_assert((numext::isfinite)(fMid));
+
+ if (fLeft * fMid < Literal(0))
+ {
+ rightShifted = midShifted;
+ }
+ else
+ {
+ leftShifted = midShifted;
+ fLeft = fMid;
+ }
}
+ muCur = (leftShifted + rightShifted) / Literal(2);
+ }
+ else
+ {
+ // We have a problem as shifting on the left or right give either a positive or negative value
+ // at the middle of [left,right]...
+ // Instead fo abbording or entering an infinite loop,
+ // let's just use the middle as the estimated zero-crossing:
+ muCur = (right - left) * RealScalar(0.5);
+ if(shift == right)
+ muCur = -muCur;
}
-
- muCur = (leftShifted + rightShifted) / Literal(2);
}
singVals[k] = shift + muCur;
shifts[k] = shift;
mus[k] = muCur;
+#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
+ if(k+1<n)
+ std::cout << "found " << singVals[k] << " == " << shift << " + " << muCur << " from " << diag(k) << " .. " << diag(k+1) << "\n";
+#endif
+#ifdef EIGEN_BDCSVD_SANITY_CHECKS
+ assert(k==0 || singVals[k]>=singVals[k-1]);
+ assert(singVals[k]>=diag(k));
+#endif
+
// perturb singular value slightly if it equals diagonal entry to avoid division by zero later
// (deflation is supposed to avoid this from happening)
// - this does no seem to be necessary anymore -
@@ -889,7 +998,7 @@ void BDCSVD<MatrixType>::perturbCol0
zhat.setZero();
return;
}
- Index last = perm(m-1);
+ Index lastIdx = perm(m-1);
// The offset permits to skip deflated entries while computing zhat
for (Index k = 0; k < n; ++k)
{
@@ -899,27 +1008,58 @@ void BDCSVD<MatrixType>::perturbCol0
{
// see equation (3.6)
RealScalar dk = diag(k);
- RealScalar prod = (singVals(last) + dk) * (mus(last) + (shifts(last) - dk));
+ RealScalar prod = (singVals(lastIdx) + dk) * (mus(lastIdx) + (shifts(lastIdx) - dk));
+#ifdef EIGEN_BDCSVD_SANITY_CHECKS
+ if(prod<0) {
+ std::cout << "k = " << k << " ; z(k)=" << col0(k) << ", diag(k)=" << dk << "\n";
+ std::cout << "prod = " << "(" << singVals(lastIdx) << " + " << dk << ") * (" << mus(lastIdx) << " + (" << shifts(lastIdx) << " - " << dk << "))" << "\n";
+ std::cout << " = " << singVals(lastIdx) + dk << " * " << mus(lastIdx) + (shifts(lastIdx) - dk) << "\n";
+ }
+ assert(prod>=0);
+#endif
for(Index l = 0; l<m; ++l)
{
Index i = perm(l);
if(i!=k)
{
+#ifdef EIGEN_BDCSVD_SANITY_CHECKS
+ if(i>=k && (l==0 || l-1>=m))
+ {
+ std::cout << "Error in perturbCol0\n";
+ std::cout << " " << k << "/" << n << " " << l << "/" << m << " " << i << "/" << n << " ; " << col0(k) << " " << diag(k) << " " << "\n";
+ std::cout << " " <<diag(i) << "\n";
+ Index j = (i<k /*|| l==0*/) ? i : perm(l-1);
+ std::cout << " " << "j=" << j << "\n";
+ }
+#endif
Index j = i<k ? i : perm(l-1);
+#ifdef EIGEN_BDCSVD_SANITY_CHECKS
+ if(!(dk!=Literal(0) || diag(i)!=Literal(0)))
+ {
+ std::cout << "k=" << k << ", i=" << i << ", l=" << l << ", perm.size()=" << perm.size() << "\n";
+ }
+ assert(dk!=Literal(0) || diag(i)!=Literal(0));
+#endif
prod *= ((singVals(j)+dk) / ((diag(i)+dk))) * ((mus(j)+(shifts(j)-dk)) / ((diag(i)-dk)));
+#ifdef EIGEN_BDCSVD_SANITY_CHECKS
+ assert(prod>=0);
+#endif
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
- if(i!=k && std::abs(((singVals(j)+dk)*(mus(j)+(shifts(j)-dk)))/((diag(i)+dk)*(diag(i)-dk)) - 1) > 0.9 )
+ if(i!=k && numext::abs(((singVals(j)+dk)*(mus(j)+(shifts(j)-dk)))/((diag(i)+dk)*(diag(i)-dk)) - 1) > 0.9 )
std::cout << " " << ((singVals(j)+dk)*(mus(j)+(shifts(j)-dk)))/((diag(i)+dk)*(diag(i)-dk)) << " == (" << (singVals(j)+dk) << " * " << (mus(j)+(shifts(j)-dk))
<< ") / (" << (diag(i)+dk) << " * " << (diag(i)-dk) << ")\n";
#endif
}
}
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
- std::cout << "zhat(" << k << ") = sqrt( " << prod << ") ; " << (singVals(last) + dk) << " * " << mus(last) + shifts(last) << " - " << dk << "\n";
+ std::cout << "zhat(" << k << ") = sqrt( " << prod << ") ; " << (singVals(lastIdx) + dk) << " * " << mus(lastIdx) + shifts(lastIdx) << " - " << dk << "\n";
#endif
RealScalar tmp = sqrt(prod);
- zhat(k) = col0(k) > Literal(0) ? tmp : -tmp;
+#ifdef EIGEN_BDCSVD_SANITY_CHECKS
+ assert((numext::isfinite)(tmp));
+#endif
+ zhat(k) = col0(k) > Literal(0) ? RealScalar(tmp) : RealScalar(-tmp);
}
}
}
@@ -972,7 +1112,7 @@ void BDCSVD<MatrixType>::computeSingVecs
// i >= 1, di almost null and zi non null.
// We use a rotation to zero out zi applied to the left of M
template <typename MatrixType>
-void BDCSVD<MatrixType>::deflation43(Index firstCol, Index shift, Index i, Index size)
+void BDCSVD<MatrixType>::deflation43(Eigen::Index firstCol, Eigen::Index shift, Eigen::Index i, Eigen::Index size)
{
using std::abs;
using std::sqrt;
@@ -980,7 +1120,7 @@ void BDCSVD<MatrixType>::deflation43(Index firstCol, Index shift, Index i, Index
Index start = firstCol + shift;
RealScalar c = m_computed(start, start);
RealScalar s = m_computed(start+i, start);
- RealScalar r = sqrt(numext::abs2(c) + numext::abs2(s));
+ RealScalar r = numext::hypot(c,s);
if (r == Literal(0))
{
m_computed(start+i, start+i) = Literal(0);
@@ -1001,7 +1141,7 @@ void BDCSVD<MatrixType>::deflation43(Index firstCol, Index shift, Index i, Index
// We apply two rotations to have zj = 0;
// TODO deflation44 is still broken and not properly tested
template <typename MatrixType>
-void BDCSVD<MatrixType>::deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size)
+void BDCSVD<MatrixType>::deflation44(Eigen::Index firstColu , Eigen::Index firstColm, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index i, Eigen::Index j, Eigen::Index size)
{
using std::abs;
using std::sqrt;
@@ -1028,7 +1168,7 @@ void BDCSVD<MatrixType>::deflation44(Index firstColu , Index firstColm, Index fi
}
c/=r;
s/=r;
- m_computed(firstColm + i, firstColm) = r;
+ m_computed(firstColm + i, firstColm) = r;
m_computed(firstColm + j, firstColm + j) = m_computed(firstColm + i, firstColm + i);
m_computed(firstColm + j, firstColm) = Literal(0);
@@ -1041,7 +1181,7 @@ void BDCSVD<MatrixType>::deflation44(Index firstColu , Index firstColm, Index fi
// acts on block from (firstCol+shift, firstCol+shift) to (lastCol+shift, lastCol+shift) [inclusive]
template <typename MatrixType>
-void BDCSVD<MatrixType>::deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift)
+void BDCSVD<MatrixType>::deflation(Eigen::Index firstCol, Eigen::Index lastCol, Eigen::Index k, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index shift)
{
using std::sqrt;
using std::abs;
@@ -1102,11 +1242,12 @@ void BDCSVD<MatrixType>::deflation(Index firstCol, Index lastCol, Index k, Index
#endif
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "to be sorted: " << diag.transpose() << "\n\n";
+ std::cout << " : " << col0.transpose() << "\n\n";
#endif
{
// Check for total deflation
- // If we have a total deflation, then we have to consider col0(0)==diag(0) as a singular value during sorting
- bool total_deflation = (col0.tail(length-1).array()<considerZero).all();
+ // If we have a total deflation, then we have to consider col0(0)==diag(0) as a singular value during sorting.
+ const bool total_deflation = (col0.tail(length-1).array().abs()<considerZero).all();
// Sort the diagonal entries, since diag(1:k-1) and diag(k:length) are already sorted, let's do a sorted merge.
// First, compute the respective permutation.
@@ -1192,7 +1333,7 @@ void BDCSVD<MatrixType>::deflation(Index firstCol, Index lastCol, Index k, Index
if( (diag(i) - diag(i-1)) < NumTraits<RealScalar>::epsilon()*maxDiag )
{
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
- std::cout << "deflation 4.4 with i = " << i << " because " << (diag(i) - diag(i-1)) << " < " << NumTraits<RealScalar>::epsilon()*diag(i) << "\n";
+ std::cout << "deflation 4.4 with i = " << i << " because " << diag(i) << " - " << diag(i-1) << " == " << (diag(i) - diag(i-1)) << " < " << NumTraits<RealScalar>::epsilon()*/*diag(i)*/maxDiag << "\n";
#endif
eigen_internal_assert(abs(diag(i) - diag(i-1))<epsilon_coarse && " diagonal entries are not properly sorted");
deflation44(firstCol, firstCol + shift, firstRowW, firstColW, i-1, i, length);
@@ -1211,7 +1352,6 @@ void BDCSVD<MatrixType>::deflation(Index firstCol, Index lastCol, Index k, Index
#endif
}//end deflation
-#ifndef EIGEN_CUDACC
/** \svd_module
*
* \return the singular value decomposition of \c *this computed by Divide & Conquer algorithm
@@ -1224,7 +1364,6 @@ MatrixBase<Derived>::bdcSvd(unsigned int computationOptions) const
{
return BDCSVD<PlainObject>(*this, computationOptions);
}
-#endif
} // end namespace Eigen
diff --git a/examples/ThirdPartyLibs/Eigen/src/SVD/JacobiSVD.h b/examples/ThirdPartyLibs/Eigen/src/SVD/JacobiSVD.h
index 43488b1e0..9d95acdf6 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SVD/JacobiSVD.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SVD/JacobiSVD.h
@@ -112,12 +112,12 @@ public:
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
- TrOptions = RowsAtCompileTime==1 ? (MatrixType::Options & ~(RowMajor))
- : ColsAtCompileTime==1 ? (MatrixType::Options | RowMajor)
- : MatrixType::Options
+ Options = MatrixType::Options
};
- typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, TrOptions, MaxColsAtCompileTime, MaxRowsAtCompileTime>
- TransposeTypeWithSameStorageOrder;
+
+ typedef typename internal::make_proper_matrix_type<
+ Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime
+ >::type TransposeTypeWithSameStorageOrder;
void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
{
@@ -202,13 +202,12 @@ public:
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
- TrOptions = RowsAtCompileTime==1 ? (MatrixType::Options & ~(RowMajor))
- : ColsAtCompileTime==1 ? (MatrixType::Options | RowMajor)
- : MatrixType::Options
+ Options = MatrixType::Options
};
- typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, TrOptions, MaxColsAtCompileTime, MaxRowsAtCompileTime>
- TransposeTypeWithSameStorageOrder;
+ typedef typename internal::make_proper_matrix_type<
+ Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime
+ >::type TransposeTypeWithSameStorageOrder;
void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)
{
@@ -303,8 +302,9 @@ public:
Options = MatrixType::Options
};
- typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
- TransposeTypeWithSameStorageOrder;
+ typedef typename internal::make_proper_matrix_type<
+ Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime
+ >::type TransposeTypeWithSameStorageOrder;
void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)
{
@@ -425,6 +425,7 @@ struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>
template<typename _MatrixType, int QRPreconditioner>
struct traits<JacobiSVD<_MatrixType,QRPreconditioner> >
+ : traits<_MatrixType>
{
typedef _MatrixType MatrixType;
};
@@ -584,6 +585,7 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
using Base::m_matrixU;
using Base::m_matrixV;
using Base::m_singularValues;
+ using Base::m_info;
using Base::m_isInitialized;
using Base::m_isAllocated;
using Base::m_usePrescribedThreshold;
@@ -610,7 +612,7 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
};
template<typename MatrixType, int QRPreconditioner>
-void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Index rows, Index cols, unsigned int computationOptions)
+void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Eigen::Index rows, Eigen::Index cols, unsigned int computationOptions)
{
eigen_assert(rows >= 0 && cols >= 0);
@@ -624,6 +626,7 @@ void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Index rows, Index cols, u
m_rows = rows;
m_cols = cols;
+ m_info = Success;
m_isInitialized = false;
m_isAllocated = true;
m_computationOptions = computationOptions;
@@ -673,7 +676,12 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)();
// Scaling factor to reduce over/under-flows
- RealScalar scale = matrix.cwiseAbs().maxCoeff();
+ RealScalar scale = matrix.cwiseAbs().template maxCoeff<PropagateNaN>();
+ if (!(numext::isfinite)(scale)) {
+ m_isInitialized = true;
+ m_info = InvalidInput;
+ return *this;
+ }
if(scale==RealScalar(0)) scale = RealScalar(1);
/*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */
diff --git a/examples/ThirdPartyLibs/Eigen/src/SVD/JacobiSVD_LAPACKE.h b/examples/ThirdPartyLibs/Eigen/src/SVD/JacobiSVD_LAPACKE.h
index 50272154f..ff0516f61 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SVD/JacobiSVD_LAPACKE.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SVD/JacobiSVD_LAPACKE.h
@@ -61,9 +61,10 @@ JacobiSVD<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>, ColPiv
u = (LAPACKE_TYPE*)m_matrixU.data(); \
} else { ldu=1; u=&dummy; }\
MatrixType localV; \
- ldvt = (m_computeFullV) ? internal::convert_index<lapack_int>(m_cols) : (m_computeThinV) ? internal::convert_index<lapack_int>(m_diagSize) : 1; \
+ lapack_int vt_rows = (m_computeFullV) ? internal::convert_index<lapack_int>(m_cols) : (m_computeThinV) ? internal::convert_index<lapack_int>(m_diagSize) : 1; \
if (computeV()) { \
- localV.resize(ldvt, m_cols); \
+ localV.resize(vt_rows, m_cols); \
+ ldvt = internal::convert_index<lapack_int>(localV.outerStride()); \
vt = (LAPACKE_TYPE*)localV.data(); \
} else { ldvt=1; vt=&dummy; }\
Matrix<LAPACKE_RTYPE, Dynamic, Dynamic> superb; superb.resize(m_diagSize, 1); \
diff --git a/examples/ThirdPartyLibs/Eigen/src/SVD/SVDBase.h b/examples/ThirdPartyLibs/Eigen/src/SVD/SVDBase.h
index 429414797..bc7ab88b4 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SVD/SVDBase.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SVD/SVDBase.h
@@ -17,6 +17,18 @@
#define EIGEN_SVDBASE_H
namespace Eigen {
+
+namespace internal {
+template<typename Derived> struct traits<SVDBase<Derived> >
+ : traits<Derived>
+{
+ typedef MatrixXpr XprKind;
+ typedef SolverStorage StorageKind;
+ typedef int StorageIndex;
+ enum { Flags = 0 };
+};
+}
+
/** \ingroup SVD_Module
*
*
@@ -39,20 +51,26 @@ namespace Eigen {
* smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual
* singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix,
* and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving.
+ *
+ * The status of the computation can be retrived using the \a info() method. Unless \a info() returns \a Success, the results should be not
+ * considered well defined.
*
- * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to
+ * If the input matrix has inf or nan coefficients, the result of the computation is undefined, and \a info() will return \a InvalidInput, but the computation is guaranteed to
* terminate in finite (and reasonable) time.
* \sa class BDCSVD, class JacobiSVD
*/
-template<typename Derived>
-class SVDBase
+template<typename Derived> class SVDBase
+ : public SolverBase<SVDBase<Derived> >
{
+public:
+
+ template<typename Derived_>
+ friend struct internal::solve_assertion;
-public:
typedef typename internal::traits<Derived>::MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
- typedef typename MatrixType::StorageIndex StorageIndex;
+ typedef typename Eigen::internal::traits<SVDBase>::StorageIndex StorageIndex;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
@@ -82,7 +100,7 @@ public:
*/
const MatrixUType& matrixU() const
{
- eigen_assert(m_isInitialized && "SVD is not initialized.");
+ _check_compute_assertions();
eigen_assert(computeU() && "This SVD decomposition didn't compute U. Did you ask for it?");
return m_matrixU;
}
@@ -98,7 +116,7 @@ public:
*/
const MatrixVType& matrixV() const
{
- eigen_assert(m_isInitialized && "SVD is not initialized.");
+ _check_compute_assertions();
eigen_assert(computeV() && "This SVD decomposition didn't compute V. Did you ask for it?");
return m_matrixV;
}
@@ -110,14 +128,14 @@ public:
*/
const SingularValuesType& singularValues() const
{
- eigen_assert(m_isInitialized && "SVD is not initialized.");
+ _check_compute_assertions();
return m_singularValues;
}
/** \returns the number of singular values that are not exactly 0 */
Index nonzeroSingularValues() const
{
- eigen_assert(m_isInitialized && "SVD is not initialized.");
+ _check_compute_assertions();
return m_nonzeroSingularValues;
}
@@ -130,7 +148,7 @@ public:
inline Index rank() const
{
using std::abs;
- eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
+ _check_compute_assertions();
if(m_singularValues.size()==0) return 0;
RealScalar premultiplied_threshold = numext::maxi<RealScalar>(m_singularValues.coeff(0) * threshold(), (std::numeric_limits<RealScalar>::min)());
Index i = m_nonzeroSingularValues-1;
@@ -180,8 +198,10 @@ public:
RealScalar threshold() const
{
eigen_assert(m_isInitialized || m_usePrescribedThreshold);
+ // this temporary is needed to workaround a MSVC issue
+ Index diagSize = (std::max<Index>)(1,m_diagSize);
return m_usePrescribedThreshold ? m_prescribedThreshold
- : (std::max<Index>)(1,m_diagSize)*NumTraits<Scalar>::epsilon();
+ : RealScalar(diagSize)*NumTraits<Scalar>::epsilon();
}
/** \returns true if \a U (full or thin) is asked for in this SVD decomposition */
@@ -192,6 +212,7 @@ public:
inline Index rows() const { return m_rows; }
inline Index cols() const { return m_cols; }
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
/** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A.
*
* \param b the right-hand-side of the equation to solve.
@@ -203,31 +224,55 @@ public:
*/
template<typename Rhs>
inline const Solve<Derived, Rhs>
- solve(const MatrixBase<Rhs>& b) const
+ solve(const MatrixBase<Rhs>& b) const;
+ #endif
+
+
+ /** \brief Reports whether previous computation was successful.
+ *
+ * \returns \c Success if computation was successful.
+ */
+ EIGEN_DEVICE_FUNC
+ ComputationInfo info() const
{
eigen_assert(m_isInitialized && "SVD is not initialized.");
- eigen_assert(computeU() && computeV() && "SVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice).");
- return Solve<Derived, Rhs>(derived(), b.derived());
+ return m_info;
}
-
+
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
void _solve_impl(const RhsType &rhs, DstType &dst) const;
+
+ template<bool Conjugate, typename RhsType, typename DstType>
+ void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const;
#endif
protected:
-
+
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
-
+
+ void _check_compute_assertions() const {
+ eigen_assert(m_isInitialized && "SVD is not initialized.");
+ }
+
+ template<bool Transpose_, typename Rhs>
+ void _check_solve_assertion(const Rhs& b) const {
+ EIGEN_ONLY_USED_FOR_DEBUG(b);
+ _check_compute_assertions();
+ eigen_assert(computeU() && computeV() && "SVDBase::solve(): Both unitaries U and V are required to be computed (thin unitaries suffice).");
+ eigen_assert((Transpose_?cols():rows())==b.rows() && "SVDBase::solve(): invalid number of rows of the right hand side matrix b");
+ }
+
// return true if already allocated
bool allocate(Index rows, Index cols, unsigned int computationOptions) ;
MatrixUType m_matrixU;
MatrixVType m_matrixV;
SingularValuesType m_singularValues;
+ ComputationInfo m_info;
bool m_isInitialized, m_isAllocated, m_usePrescribedThreshold;
bool m_computeFullU, m_computeThinU;
bool m_computeFullV, m_computeThinV;
@@ -240,9 +285,14 @@ protected:
* Default constructor of SVDBase
*/
SVDBase()
- : m_isInitialized(false),
+ : m_info(Success),
+ m_isInitialized(false),
m_isAllocated(false),
m_usePrescribedThreshold(false),
+ m_computeFullU(false),
+ m_computeThinU(false),
+ m_computeFullV(false),
+ m_computeThinV(false),
m_computationOptions(0),
m_rows(-1), m_cols(-1), m_diagSize(0)
{
@@ -257,17 +307,30 @@ template<typename Derived>
template<typename RhsType, typename DstType>
void SVDBase<Derived>::_solve_impl(const RhsType &rhs, DstType &dst) const
{
- eigen_assert(rhs.rows() == rows());
-
// A = U S V^*
// So A^{-1} = V S^{-1} U^*
- Matrix<Scalar, Dynamic, RhsType::ColsAtCompileTime, 0, MatrixType::MaxRowsAtCompileTime, RhsType::MaxColsAtCompileTime> tmp;
+ Matrix<typename RhsType::Scalar, Dynamic, RhsType::ColsAtCompileTime, 0, MatrixType::MaxRowsAtCompileTime, RhsType::MaxColsAtCompileTime> tmp;
Index l_rank = rank();
tmp.noalias() = m_matrixU.leftCols(l_rank).adjoint() * rhs;
tmp = m_singularValues.head(l_rank).asDiagonal().inverse() * tmp;
dst = m_matrixV.leftCols(l_rank) * tmp;
}
+
+template<typename Derived>
+template<bool Conjugate, typename RhsType, typename DstType>
+void SVDBase<Derived>::_solve_impl_transposed(const RhsType &rhs, DstType &dst) const
+{
+ // A = U S V^*
+ // So A^{-*} = U S^{-1} V^*
+ // And A^{-T} = U_conj S^{-1} V^T
+ Matrix<typename RhsType::Scalar, Dynamic, RhsType::ColsAtCompileTime, 0, MatrixType::MaxRowsAtCompileTime, RhsType::MaxColsAtCompileTime> tmp;
+ Index l_rank = rank();
+
+ tmp.noalias() = m_matrixV.leftCols(l_rank).transpose().template conjugateIf<Conjugate>() * rhs;
+ tmp = m_singularValues.head(l_rank).asDiagonal().inverse() * tmp;
+ dst = m_matrixU.template conjugateIf<!Conjugate>().leftCols(l_rank) * tmp;
+}
#endif
template<typename MatrixType>
@@ -285,6 +348,7 @@ bool SVDBase<MatrixType>::allocate(Index rows, Index cols, unsigned int computat
m_rows = rows;
m_cols = cols;
+ m_info = Success;
m_isInitialized = false;
m_isAllocated = true;
m_computationOptions = computationOptions;
diff --git a/examples/ThirdPartyLibs/Eigen/src/SVD/UpperBidiagonalization.h b/examples/ThirdPartyLibs/Eigen/src/SVD/UpperBidiagonalization.h
index 11ac847e1..997defc47 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SVD/UpperBidiagonalization.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SVD/UpperBidiagonalization.h
@@ -127,7 +127,7 @@ void upperbidiagonalization_inplace_unblocked(MatrixType& mat,
.makeHouseholderInPlace(mat.coeffRef(k,k+1), upper_diagonal[k]);
// apply householder transform to remaining part of mat on the left
mat.bottomRightCorner(remainingRows-1, remainingCols)
- .applyHouseholderOnTheRight(mat.row(k).tail(remainingCols-1).transpose(), mat.coeff(k,k+1), tempData);
+ .applyHouseholderOnTheRight(mat.row(k).tail(remainingCols-1).adjoint(), mat.coeff(k,k+1), tempData);
}
}
@@ -202,7 +202,7 @@ void upperbidiagonalization_blocked_helper(MatrixType& A,
{
SubColumnType y_k( Y.col(k).tail(remainingCols) );
- // let's use the begining of column k of Y as a temporary vector
+ // let's use the beginning of column k of Y as a temporary vector
SubColumnType tmp( Y.col(k).head(k) );
y_k.noalias() = A.block(k,k+1, remainingRows,remainingCols).adjoint() * v_k; // bottleneck
tmp.noalias() = V_k1.adjoint() * v_k;
@@ -231,7 +231,7 @@ void upperbidiagonalization_blocked_helper(MatrixType& A,
{
SubColumnType x_k ( X.col(k).tail(remainingRows-1) );
- // let's use the begining of column k of X as a temporary vectors
+ // let's use the beginning of column k of X as a temporary vectors
// note that tmp0 and tmp1 overlaps
SubColumnType tmp0 ( X.col(k).head(k) ),
tmp1 ( X.col(k).head(k+1) );
diff --git a/examples/ThirdPartyLibs/Eigen/src/SparseCholesky/SimplicialCholesky.h b/examples/ThirdPartyLibs/Eigen/src/SparseCholesky/SimplicialCholesky.h
index 2907f6529..9f93e3255 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SparseCholesky/SimplicialCholesky.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SparseCholesky/SimplicialCholesky.h
@@ -80,11 +80,19 @@ class SimplicialCholeskyBase : public SparseSolverBase<Derived>
/** Default constructor */
SimplicialCholeskyBase()
- : m_info(Success), m_shiftOffset(0), m_shiftScale(1)
+ : m_info(Success),
+ m_factorizationIsOk(false),
+ m_analysisIsOk(false),
+ m_shiftOffset(0),
+ m_shiftScale(1)
{}
explicit SimplicialCholeskyBase(const MatrixType& matrix)
- : m_info(Success), m_shiftOffset(0), m_shiftScale(1)
+ : m_info(Success),
+ m_factorizationIsOk(false),
+ m_analysisIsOk(false),
+ m_shiftOffset(0),
+ m_shiftScale(1)
{
derived().compute(matrix);
}
@@ -101,7 +109,7 @@ class SimplicialCholeskyBase : public SparseSolverBase<Derived>
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the matrix.appears to be negative.
*/
ComputationInfo info() const
@@ -210,7 +218,7 @@ class SimplicialCholeskyBase : public SparseSolverBase<Derived>
CholMatrixType tmp(size,size);
ConstCholMatrixPtr pmat;
- if(m_P.size()==0 && (UpLo&Upper)==Upper)
+ if(m_P.size() == 0 && (int(UpLo) & int(Upper)) == Upper)
{
// If there is no ordering, try to directly use the input matrix without any copy
internal::simplicial_cholesky_grab_input<CholMatrixType,MatrixType>::run(a, pmat, tmp);
@@ -279,8 +287,8 @@ template<typename _MatrixType, int _UpLo, typename _Ordering> struct traits<Simp
typedef SparseMatrix<Scalar, ColMajor, StorageIndex> CholMatrixType;
typedef TriangularView<const CholMatrixType, Eigen::Lower> MatrixL;
typedef TriangularView<const typename CholMatrixType::AdjointReturnType, Eigen::Upper> MatrixU;
- static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); }
- static inline MatrixU getU(const MatrixType& m) { return MatrixU(m.adjoint()); }
+ static inline MatrixL getL(const CholMatrixType& m) { return MatrixL(m); }
+ static inline MatrixU getU(const CholMatrixType& m) { return MatrixU(m.adjoint()); }
};
template<typename _MatrixType,int _UpLo, typename _Ordering> struct traits<SimplicialLDLT<_MatrixType,_UpLo,_Ordering> >
@@ -293,8 +301,8 @@ template<typename _MatrixType,int _UpLo, typename _Ordering> struct traits<Simpl
typedef SparseMatrix<Scalar, ColMajor, StorageIndex> CholMatrixType;
typedef TriangularView<const CholMatrixType, Eigen::UnitLower> MatrixL;
typedef TriangularView<const typename CholMatrixType::AdjointReturnType, Eigen::UnitUpper> MatrixU;
- static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); }
- static inline MatrixU getU(const MatrixType& m) { return MatrixU(m.adjoint()); }
+ static inline MatrixL getL(const CholMatrixType& m) { return MatrixL(m); }
+ static inline MatrixU getU(const CholMatrixType& m) { return MatrixU(m.adjoint()); }
};
template<typename _MatrixType, int _UpLo, typename _Ordering> struct traits<SimplicialCholesky<_MatrixType,_UpLo,_Ordering> >
@@ -608,7 +616,7 @@ public:
}
if(Base::m_diag.size()>0)
- dest = Base::m_diag.asDiagonal().inverse() * dest;
+ dest = Base::m_diag.real().asDiagonal().inverse() * dest;
if (Base::m_matrix.nonZeros()>0) // otherwise I==I
{
diff --git a/examples/ThirdPartyLibs/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h b/examples/ThirdPartyLibs/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h
index 31e06995b..72e1740c1 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h
@@ -2,46 +2,21 @@
// for linear algebra.
//
// Copyright (C) 2008-2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
/*
-
-NOTE: thes functions vave been adapted from the LDL library:
+NOTE: these functions have been adapted from the LDL library:
LDL Copyright (c) 2005 by Timothy A. Davis. All Rights Reserved.
-LDL License:
-
- Your use or distribution of LDL or any modified version of
- LDL implies that you agree to this License.
-
- This library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this library; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
- USA
-
- Permission is hereby granted to use or copy this program under the
- terms of the GNU LGPL, provided that the Copyright, this License,
- and the Availability of the original version is retained on all copies.
- User documentation of any code that uses this code or any modified
- version of this code must cite the Copyright, this License, the
- Availability note, and "Used by permission." Permission to modify
- the code and to distribute modified code is granted, provided the
- Copyright, this License, and the Availability note are retained,
- and a notice that the code was modified is included.
+The author of LDL, Timothy A. Davis., has executed a license with Google LLC
+to permit distribution of this code and derivative works as part of Eigen under
+the Mozilla Public License v. 2.0, as stated at the top of this file.
*/
-#include "../Core/util/NonMPL2.h"
-
#ifndef EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H
#define EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H
@@ -122,7 +97,7 @@ void SimplicialCholeskyBase<Derived>::factorize_preordered(const CholMatrixType&
for(StorageIndex k = 0; k < size; ++k)
{
// compute nonzero pattern of kth row of L, in topological order
- y[k] = 0.0; // Y(0:k) is now all zero
+ y[k] = Scalar(0); // Y(0:k) is now all zero
StorageIndex top = size; // stack for pattern is empty
tags[k] = k; // mark node k as visited
m_nonZerosPerCol[k] = 0; // count of nonzeros in column k of L
@@ -146,17 +121,17 @@ void SimplicialCholeskyBase<Derived>::factorize_preordered(const CholMatrixType&
/* compute numerical values kth row of L (a sparse triangular solve) */
RealScalar d = numext::real(y[k]) * m_shiftScale + m_shiftOffset; // get D(k,k), apply the shift function, and clear Y(k)
- y[k] = 0.0;
+ y[k] = Scalar(0);
for(; top < size; ++top)
{
Index i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */
Scalar yi = y[i]; /* get and clear Y(i) */
- y[i] = 0.0;
+ y[i] = Scalar(0);
/* the nonzero entry L(k,i) */
Scalar l_ki;
if(DoLDLT)
- l_ki = yi / m_diag[i];
+ l_ki = yi / numext::real(m_diag[i]);
else
yi = l_ki = yi / Lx[Lp[i]];
diff --git a/examples/ThirdPartyLibs/Eigen/src/SparseCore/AmbiVector.h b/examples/ThirdPartyLibs/Eigen/src/SparseCore/AmbiVector.h
index e0295f2af..2cb7747cc 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SparseCore/AmbiVector.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SparseCore/AmbiVector.h
@@ -28,7 +28,7 @@ class AmbiVector
typedef typename NumTraits<Scalar>::Real RealScalar;
explicit AmbiVector(Index size)
- : m_buffer(0), m_zero(0), m_size(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1)
+ : m_buffer(0), m_zero(0), m_size(0), m_end(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1)
{
resize(size);
}
@@ -147,7 +147,8 @@ template<typename _Scalar,typename _StorageIndex>
void AmbiVector<_Scalar,_StorageIndex>::init(int mode)
{
m_mode = mode;
- if (m_mode==IsSparse)
+ // This is only necessary in sparse mode, but we set these unconditionally to avoid some maybe-uninitialized warnings
+ // if (m_mode==IsSparse)
{
m_llSize = 0;
m_llStart = -1;
diff --git a/examples/ThirdPartyLibs/Eigen/src/SparseCore/CompressedStorage.h b/examples/ThirdPartyLibs/Eigen/src/SparseCore/CompressedStorage.h
index d89fa0dae..acd986fab 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SparseCore/CompressedStorage.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SparseCore/CompressedStorage.h
@@ -207,6 +207,22 @@ class CompressedStorage
return m_values[id];
}
+ void moveChunk(Index from, Index to, Index chunkSize)
+ {
+ eigen_internal_assert(to+chunkSize <= m_size);
+ if(to>from && from+chunkSize>to)
+ {
+ // move backward
+ internal::smart_memmove(m_values+from, m_values+from+chunkSize, m_values+to);
+ internal::smart_memmove(m_indices+from, m_indices+from+chunkSize, m_indices+to);
+ }
+ else
+ {
+ internal::smart_copy(m_values+from, m_values+from+chunkSize, m_values+to);
+ internal::smart_copy(m_indices+from, m_indices+from+chunkSize, m_indices+to);
+ }
+ }
+
void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
{
Index k = 0;
diff --git a/examples/ThirdPartyLibs/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h b/examples/ThirdPartyLibs/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
index 9db119b67..948650253 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
@@ -10,7 +10,7 @@
#ifndef EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
#define EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
-namespace Eigen {
+namespace Eigen {
namespace internal {
@@ -25,16 +25,16 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
Index rows = lhs.innerSize();
Index cols = rhs.outerSize();
eigen_assert(lhs.outerSize() == rhs.innerSize());
-
+
ei_declare_aligned_stack_constructed_variable(bool, mask, rows, 0);
ei_declare_aligned_stack_constructed_variable(ResScalar, values, rows, 0);
ei_declare_aligned_stack_constructed_variable(Index, indices, rows, 0);
-
+
std::memset(mask,0,sizeof(bool)*rows);
evaluator<Lhs> lhsEval(lhs);
evaluator<Rhs> rhsEval(rhs);
-
+
// estimate the number of non zero entries
// given a rhs column containing Y non zeros, we assume that the respective Y columns
// of the lhs differs in average of one non zeros, thus the number of non zeros for
@@ -141,7 +141,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,C
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrixAux;
typedef typename sparse_eval<ColMajorMatrixAux,ResultType::RowsAtCompileTime,ResultType::ColsAtCompileTime,ColMajorMatrixAux::Flags>::type ColMajorMatrix;
-
+
// If the result is tall and thin (in the extreme case a column vector)
// then it is faster to sort the coefficients inplace instead of transposing twice.
// FIXME, the following heuristic is probably not very good.
@@ -155,7 +155,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,C
else
{
ColMajorMatrixAux resCol(lhs.rows(),rhs.cols());
- // ressort to transpose to sort the entries
+ // resort to transpose to sort the entries
internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrixAux>(lhs, rhs, resCol, false);
RowMajorMatrix resRow(resCol);
res = resRow.markAsRValue();
diff --git a/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseAssign.h b/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseAssign.h
index 113463258..905485c88 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseAssign.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseAssign.h
@@ -134,8 +134,8 @@ struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Sparse>
};
// Generic Sparse to Dense assignment
-template< typename DstXprType, typename SrcXprType, typename Functor>
-struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense>
+template< typename DstXprType, typename SrcXprType, typename Functor, typename Weak>
+struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense, Weak>
{
static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
{
@@ -153,6 +153,73 @@ struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense>
}
};
+// Specialization for dense ?= dense +/- sparse and dense ?= sparse +/- dense
+template<typename DstXprType, typename Func1, typename Func2>
+struct assignment_from_dense_op_sparse
+{
+ template<typename SrcXprType, typename InitialFunc>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ void run(DstXprType &dst, const SrcXprType &src, const InitialFunc& /*func*/)
+ {
+ #ifdef EIGEN_SPARSE_ASSIGNMENT_FROM_DENSE_OP_SPARSE_PLUGIN
+ EIGEN_SPARSE_ASSIGNMENT_FROM_DENSE_OP_SPARSE_PLUGIN
+ #endif
+
+ call_assignment_no_alias(dst, src.lhs(), Func1());
+ call_assignment_no_alias(dst, src.rhs(), Func2());
+ }
+
+ // Specialization for dense1 = sparse + dense2; -> dense1 = dense2; dense1 += sparse;
+ template<typename Lhs, typename Rhs, typename Scalar>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ typename internal::enable_if<internal::is_same<typename internal::evaluator_traits<Rhs>::Shape,DenseShape>::value>::type
+ run(DstXprType &dst, const CwiseBinaryOp<internal::scalar_sum_op<Scalar,Scalar>, const Lhs, const Rhs> &src,
+ const internal::assign_op<typename DstXprType::Scalar,Scalar>& /*func*/)
+ {
+ #ifdef EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_ADD_DENSE_PLUGIN
+ EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_ADD_DENSE_PLUGIN
+ #endif
+
+ // Apply the dense matrix first, then the sparse one.
+ call_assignment_no_alias(dst, src.rhs(), Func1());
+ call_assignment_no_alias(dst, src.lhs(), Func2());
+ }
+
+ // Specialization for dense1 = sparse - dense2; -> dense1 = -dense2; dense1 += sparse;
+ template<typename Lhs, typename Rhs, typename Scalar>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ typename internal::enable_if<internal::is_same<typename internal::evaluator_traits<Rhs>::Shape,DenseShape>::value>::type
+ run(DstXprType &dst, const CwiseBinaryOp<internal::scalar_difference_op<Scalar,Scalar>, const Lhs, const Rhs> &src,
+ const internal::assign_op<typename DstXprType::Scalar,Scalar>& /*func*/)
+ {
+ #ifdef EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_SUB_DENSE_PLUGIN
+ EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_SUB_DENSE_PLUGIN
+ #endif
+
+ // Apply the dense matrix first, then the sparse one.
+ call_assignment_no_alias(dst, -src.rhs(), Func1());
+ call_assignment_no_alias(dst, src.lhs(), add_assign_op<typename DstXprType::Scalar,typename Lhs::Scalar>());
+ }
+};
+
+#define EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(ASSIGN_OP,BINOP,ASSIGN_OP2) \
+ template< typename DstXprType, typename Lhs, typename Rhs, typename Scalar> \
+ struct Assignment<DstXprType, CwiseBinaryOp<internal::BINOP<Scalar,Scalar>, const Lhs, const Rhs>, internal::ASSIGN_OP<typename DstXprType::Scalar,Scalar>, \
+ Sparse2Dense, \
+ typename internal::enable_if< internal::is_same<typename internal::evaluator_traits<Lhs>::Shape,DenseShape>::value \
+ || internal::is_same<typename internal::evaluator_traits<Rhs>::Shape,DenseShape>::value>::type> \
+ : assignment_from_dense_op_sparse<DstXprType, internal::ASSIGN_OP<typename DstXprType::Scalar,typename Lhs::Scalar>, internal::ASSIGN_OP2<typename DstXprType::Scalar,typename Rhs::Scalar> > \
+ {}
+
+EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(assign_op, scalar_sum_op,add_assign_op);
+EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(add_assign_op,scalar_sum_op,add_assign_op);
+EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(sub_assign_op,scalar_sum_op,sub_assign_op);
+
+EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(assign_op, scalar_difference_op,sub_assign_op);
+EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(add_assign_op,scalar_difference_op,sub_assign_op);
+EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(sub_assign_op,scalar_difference_op,add_assign_op);
+
+
// Specialization for "dst = dec.solve(rhs)"
// NOTE we need to specialize it for Sparse2Sparse to avoid ambiguous specialization error
template<typename DstXprType, typename DecType, typename RhsType, typename Scalar>
@@ -179,35 +246,22 @@ struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Sparse>
{
typedef typename DstXprType::StorageIndex StorageIndex;
typedef typename DstXprType::Scalar Scalar;
- typedef Array<StorageIndex,Dynamic,1> ArrayXI;
- typedef Array<Scalar,Dynamic,1> ArrayXS;
- template<int Options>
- static void run(SparseMatrix<Scalar,Options,StorageIndex> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
- {
- Index dstRows = src.rows();
- Index dstCols = src.cols();
- if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
- dst.resize(dstRows, dstCols);
- Index size = src.diagonal().size();
- dst.makeCompressed();
- dst.resizeNonZeros(size);
- Map<ArrayXI>(dst.innerIndexPtr(), size).setLinSpaced(0,StorageIndex(size)-1);
- Map<ArrayXI>(dst.outerIndexPtr(), size+1).setLinSpaced(0,StorageIndex(size));
- Map<ArrayXS>(dst.valuePtr(), size) = src.diagonal();
- }
+ template<int Options, typename AssignFunc>
+ static void run(SparseMatrix<Scalar,Options,StorageIndex> &dst, const SrcXprType &src, const AssignFunc &func)
+ { dst.assignDiagonal(src.diagonal(), func); }
template<typename DstDerived>
static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
- {
- dst.diagonal() = src.diagonal();
- }
+ { dst.derived().diagonal() = src.diagonal(); }
- static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
- { dst.diagonal() += src.diagonal(); }
+ template<typename DstDerived>
+ static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
+ { dst.derived().diagonal() += src.diagonal(); }
- static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
- { dst.diagonal() -= src.diagonal(); }
+ template<typename DstDerived>
+ static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
+ { dst.derived().diagonal() -= src.diagonal(); }
};
} // end namespace internal
diff --git a/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseBlock.h b/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseBlock.h
index 511e92b2f..5b4f6cc9f 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseBlock.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseBlock.h
@@ -164,7 +164,7 @@ public:
}
else
{
- if(m_matrix.isCompressed())
+ if(m_matrix.isCompressed() && nnz!=block_size)
{
// no need to realloc, simply copy the tail at its respective position and insert tmp
matrix.data().resize(start + nnz + tail_size);
@@ -326,46 +326,6 @@ private:
//----------
-/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
- * is col-major (resp. row-major).
- */
-template<typename Derived>
-typename SparseMatrixBase<Derived>::InnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer)
-{ return InnerVectorReturnType(derived(), outer); }
-
-/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
- * is col-major (resp. row-major). Read-only.
- */
-template<typename Derived>
-const typename SparseMatrixBase<Derived>::ConstInnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer) const
-{ return ConstInnerVectorReturnType(derived(), outer); }
-
-/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
- * is col-major (resp. row-major).
- */
-template<typename Derived>
-typename SparseMatrixBase<Derived>::InnerVectorsReturnType
-SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
-{
- return Block<Derived,Dynamic,Dynamic,true>(derived(),
- IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
- IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
-
-}
-
-/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
- * is col-major (resp. row-major). Read-only.
- */
-template<typename Derived>
-const typename SparseMatrixBase<Derived>::ConstInnerVectorsReturnType
-SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
-{
- return Block<const Derived,Dynamic,Dynamic,true>(derived(),
- IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
- IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
-
-}
-
/** Generic implementation of sparse Block expression.
* Real-only.
*/
@@ -486,9 +446,13 @@ struct unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBa
{}
inline Index nonZerosEstimate() const {
- Index nnz = m_block.nonZeros();
- if(nnz<0)
- return m_argImpl.nonZerosEstimate() * m_block.size() / m_block.nestedExpression().size();
+ const Index nnz = m_block.nonZeros();
+ if(nnz < 0) {
+ // Scale the non-zero estimate for the underlying expression linearly with block size.
+ // Return zero if the underlying block is empty.
+ const Index nested_sz = m_block.nestedExpression().size();
+ return nested_sz == 0 ? 0 : m_argImpl.nonZerosEstimate() * m_block.size() / nested_sz;
+ }
return nnz;
}
@@ -503,22 +467,25 @@ template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::InnerVectorInnerIterator
: public EvalIterator
{
- enum { IsRowMajor = unary_evaluator::IsRowMajor };
+ // NOTE MSVC fails to compile if we don't explicitely "import" IsRowMajor from unary_evaluator
+ // because the base class EvalIterator has a private IsRowMajor enum too. (bug #1786)
+ // NOTE We cannot call it IsRowMajor because it would shadow unary_evaluator::IsRowMajor
+ enum { XprIsRowMajor = unary_evaluator::IsRowMajor };
const XprType& m_block;
Index m_end;
public:
EIGEN_STRONG_INLINE InnerVectorInnerIterator(const unary_evaluator& aEval, Index outer)
- : EvalIterator(aEval.m_argImpl, outer + (IsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol())),
+ : EvalIterator(aEval.m_argImpl, outer + (XprIsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol())),
m_block(aEval.m_block),
- m_end(IsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows())
+ m_end(XprIsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows())
{
- while( (EvalIterator::operator bool()) && (EvalIterator::index() < (IsRowMajor ? m_block.startCol() : m_block.startRow())) )
+ while( (EvalIterator::operator bool()) && (EvalIterator::index() < (XprIsRowMajor ? m_block.startCol() : m_block.startRow())) )
EvalIterator::operator++();
}
- inline StorageIndex index() const { return EvalIterator::index() - convert_index<StorageIndex>(IsRowMajor ? m_block.startCol() : m_block.startRow()); }
- inline Index outer() const { return EvalIterator::outer() - (IsRowMajor ? m_block.startRow() : m_block.startCol()); }
+ inline StorageIndex index() const { return EvalIterator::index() - convert_index<StorageIndex>(XprIsRowMajor ? m_block.startCol() : m_block.startRow()); }
+ inline Index outer() const { return EvalIterator::outer() - (XprIsRowMajor ? m_block.startRow() : m_block.startCol()); }
inline Index row() const { return EvalIterator::row() - m_block.startRow(); }
inline Index col() const { return EvalIterator::col() - m_block.startCol(); }
@@ -528,7 +495,8 @@ public:
template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::OuterVectorInnerIterator
{
- enum { IsRowMajor = unary_evaluator::IsRowMajor };
+ // NOTE see above
+ enum { XprIsRowMajor = unary_evaluator::IsRowMajor };
const unary_evaluator& m_eval;
Index m_outerPos;
const Index m_innerIndex;
@@ -538,9 +506,9 @@ public:
EIGEN_STRONG_INLINE OuterVectorInnerIterator(const unary_evaluator& aEval, Index outer)
: m_eval(aEval),
- m_outerPos( (IsRowMajor ? aEval.m_block.startCol() : aEval.m_block.startRow()) ),
- m_innerIndex(IsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol()),
- m_end(IsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows()),
+ m_outerPos( (XprIsRowMajor ? aEval.m_block.startCol() : aEval.m_block.startRow()) ),
+ m_innerIndex(XprIsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol()),
+ m_end(XprIsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows()),
m_it(m_eval.m_argImpl, m_outerPos)
{
EIGEN_UNUSED_VARIABLE(outer);
@@ -551,10 +519,10 @@ public:
++(*this);
}
- inline StorageIndex index() const { return convert_index<StorageIndex>(m_outerPos - (IsRowMajor ? m_eval.m_block.startCol() : m_eval.m_block.startRow())); }
+ inline StorageIndex index() const { return convert_index<StorageIndex>(m_outerPos - (XprIsRowMajor ? m_eval.m_block.startCol() : m_eval.m_block.startRow())); }
inline Index outer() const { return 0; }
- inline Index row() const { return IsRowMajor ? 0 : index(); }
- inline Index col() const { return IsRowMajor ? index() : 0; }
+ inline Index row() const { return XprIsRowMajor ? 0 : index(); }
+ inline Index col() const { return XprIsRowMajor ? index() : 0; }
inline Scalar value() const { return m_it.value(); }
inline Scalar& valueRef() { return m_it.valueRef(); }
diff --git a/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseCompressedBase.h b/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseCompressedBase.h
index e0b3c22b6..6a2c7a8ce 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseCompressedBase.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseCompressedBase.h
@@ -128,6 +128,28 @@ class SparseCompressedBase
protected:
/** Default constructor. Do nothing. */
SparseCompressedBase() {}
+
+ /** \internal return the index of the coeff at (row,col) or just before if it does not exist.
+ * This is an analogue of std::lower_bound.
+ */
+ internal::LowerBoundIndex lower_bound(Index row, Index col) const
+ {
+ eigen_internal_assert(row>=0 && row<this->rows() && col>=0 && col<this->cols());
+
+ const Index outer = Derived::IsRowMajor ? row : col;
+ const Index inner = Derived::IsRowMajor ? col : row;
+
+ Index start = this->outerIndexPtr()[outer];
+ Index end = this->isCompressed() ? this->outerIndexPtr()[outer+1] : this->outerIndexPtr()[outer] + this->innerNonZeroPtr()[outer];
+ eigen_assert(end>=start && "you are using a non finalized sparse matrix or written coefficient does not exist");
+ internal::LowerBoundIndex p;
+ p.value = std::lower_bound(this->innerIndexPtr()+start, this->innerIndexPtr()+end,inner) - this->innerIndexPtr();
+ p.found = (p.value<end) && (this->innerIndexPtr()[p.value]==inner);
+ return p;
+ }
+
+ friend struct internal::evaluator<SparseCompressedBase<Derived> >;
+
private:
template<typename OtherDerived> explicit SparseCompressedBase(const SparseCompressedBase<OtherDerived>&);
};
@@ -333,17 +355,8 @@ protected:
Index find(Index row, Index col) const
{
- eigen_internal_assert(row>=0 && row<m_matrix->rows() && col>=0 && col<m_matrix->cols());
-
- const Index outer = Derived::IsRowMajor ? row : col;
- const Index inner = Derived::IsRowMajor ? col : row;
-
- Index start = m_matrix->outerIndexPtr()[outer];
- Index end = m_matrix->isCompressed() ? m_matrix->outerIndexPtr()[outer+1] : m_matrix->outerIndexPtr()[outer] + m_matrix->innerNonZeroPtr()[outer];
- eigen_assert(end>=start && "you are using a non finalized sparse matrix or written coefficient does not exist");
- const Index p = std::lower_bound(m_matrix->innerIndexPtr()+start, m_matrix->innerIndexPtr()+end,inner) - m_matrix->innerIndexPtr();
-
- return ((p<end) && (m_matrix->innerIndexPtr()[p]==inner)) ? p : Dynamic;
+ internal::LowerBoundIndex p = m_matrix->lower_bound(row,col);
+ return p.found ? p.value : Dynamic;
}
const Derived *m_matrix;
diff --git a/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseCwiseBinaryOp.h b/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
index c41c07af1..9b0d3f98d 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
@@ -101,7 +101,7 @@ public:
}
else
{
- m_value = 0; // this is to avoid a compilation warning
+ m_value = Scalar(0); // this is to avoid a compilation warning
m_id = -1;
}
return *this;
@@ -126,7 +126,7 @@ public:
enum {
- CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
+ CoeffReadCost = int(evaluator<Lhs>::CoeffReadCost) + int(evaluator<Rhs>::CoeffReadCost) + int(functor_traits<BinaryOp>::Cost),
Flags = XprType::Flags
};
@@ -211,7 +211,7 @@ public:
enum {
- CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
+ CoeffReadCost = int(evaluator<Lhs>::CoeffReadCost) + int(evaluator<Rhs>::CoeffReadCost) + int(functor_traits<BinaryOp>::Cost),
Flags = XprType::Flags
};
@@ -298,7 +298,7 @@ public:
enum {
- CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
+ CoeffReadCost = int(evaluator<Lhs>::CoeffReadCost) + int(evaluator<Rhs>::CoeffReadCost) + int(functor_traits<BinaryOp>::Cost),
Flags = XprType::Flags
};
@@ -457,7 +457,7 @@ public:
enum {
- CoeffReadCost = evaluator<LhsArg>::CoeffReadCost + evaluator<RhsArg>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
+ CoeffReadCost = int(evaluator<LhsArg>::CoeffReadCost) + int(evaluator<RhsArg>::CoeffReadCost) + int(functor_traits<BinaryOp>::Cost),
Flags = XprType::Flags
};
@@ -530,7 +530,7 @@ public:
enum {
- CoeffReadCost = evaluator<LhsArg>::CoeffReadCost + evaluator<RhsArg>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
+ CoeffReadCost = int(evaluator<LhsArg>::CoeffReadCost) + int(evaluator<RhsArg>::CoeffReadCost) + int(functor_traits<BinaryOp>::Cost),
Flags = XprType::Flags
};
@@ -604,7 +604,7 @@ public:
enum {
- CoeffReadCost = evaluator<LhsArg>::CoeffReadCost + evaluator<RhsArg>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
+ CoeffReadCost = int(evaluator<LhsArg>::CoeffReadCost) + int(evaluator<RhsArg>::CoeffReadCost) + int(functor_traits<BinaryOp>::Cost),
Flags = XprType::Flags
};
diff --git a/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseCwiseUnaryOp.h b/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
index ea7973790..32dac0f78 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
@@ -24,7 +24,7 @@ struct unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>
class InnerIterator;
enum {
- CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<UnaryOp>::Cost,
+ CoeffReadCost = int(evaluator<ArgType>::CoeffReadCost) + int(functor_traits<UnaryOp>::Cost),
Flags = XprType::Flags
};
@@ -49,6 +49,7 @@ template<typename UnaryOp, typename ArgType>
class unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::InnerIterator
: public unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::EvalIterator
{
+ protected:
typedef typename XprType::Scalar Scalar;
typedef typename unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::EvalIterator Base;
public:
@@ -78,7 +79,7 @@ struct unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>
class InnerIterator;
enum {
- CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<ViewOp>::Cost,
+ CoeffReadCost = int(evaluator<ArgType>::CoeffReadCost) + int(functor_traits<ViewOp>::Cost),
Flags = XprType::Flags
};
@@ -99,6 +100,7 @@ template<typename ViewOp, typename ArgType>
class unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::InnerIterator
: public unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::EvalIterator
{
+ protected:
typedef typename XprType::Scalar Scalar;
typedef typename unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::EvalIterator Base;
public:
diff --git a/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseDenseProduct.h b/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseDenseProduct.h
index 0547db596..f005a18a1 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseDenseProduct.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseDenseProduct.h
@@ -88,10 +88,11 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, A
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
- typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
+ typedef evaluator<Lhs> LhsEval;
+ typedef typename LhsEval::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
{
- evaluator<Lhs> lhsEval(lhs);
+ LhsEval lhsEval(lhs);
for(Index c=0; c<rhs.cols(); ++c)
{
for(Index j=0; j<lhs.outerSize(); ++j)
@@ -111,17 +112,38 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, t
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
- typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
+ typedef evaluator<Lhs> LhsEval;
+ typedef typename LhsEval::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
- evaluator<Lhs> lhsEval(lhs);
- for(Index j=0; j<lhs.outerSize(); ++j)
+ Index n = lhs.rows();
+ LhsEval lhsEval(lhs);
+
+#ifdef EIGEN_HAS_OPENMP
+ Eigen::initParallel();
+ Index threads = Eigen::nbThreads();
+ // This 20000 threshold has been found experimentally on 2D and 3D Poisson problems.
+ // It basically represents the minimal amount of work to be done to be worth it.
+ if(threads>1 && lhsEval.nonZerosEstimate()*rhs.cols() > 20000)
{
- typename Res::RowXpr res_j(res.row(j));
- for(LhsInnerIterator it(lhsEval,j); it ;++it)
- res_j += (alpha*it.value()) * rhs.row(it.index());
+ #pragma omp parallel for schedule(dynamic,(n+threads*4-1)/(threads*4)) num_threads(threads)
+ for(Index i=0; i<n; ++i)
+ processRow(lhsEval,rhs,res,alpha,i);
+ }
+ else
+#endif
+ {
+ for(Index i=0; i<n; ++i)
+ processRow(lhsEval, rhs, res, alpha, i);
}
}
+
+ static void processRow(const LhsEval& lhsEval, const DenseRhsType& rhs, Res& res, const typename Res::Scalar& alpha, Index i)
+ {
+ typename Res::RowXpr res_i(res.row(i));
+ for(LhsInnerIterator it(lhsEval,i); it ;++it)
+ res_i += (alpha*it.value()) * rhs.row(it.index());
+ }
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
diff --git a/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseMatrix.h b/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseMatrix.h
index 323c2323b..616b4a0c2 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseMatrix.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseMatrix.h
@@ -21,7 +21,7 @@ namespace Eigen {
* This class implements a more versatile variants of the common \em compressed row/column storage format.
* Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index.
* All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra
- * space inbetween the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
+ * space in between the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
* can be done with limited memory reallocation and copies.
*
* A call to the function makeCompressed() turns the matrix into the standard \em compressed format
@@ -99,6 +99,8 @@ class SparseMatrix
typedef SparseCompressedBase<SparseMatrix> Base;
using Base::convert_index;
friend class SparseVector<_Scalar,0,_StorageIndex>;
+ template<typename, typename, typename, typename, typename>
+ friend struct internal::Assignment;
public:
using Base::isCompressed;
using Base::nonZeros;
@@ -327,7 +329,8 @@ class SparseMatrix
m_outerIndex[j] = newOuterIndex[j];
m_innerNonZeros[j] = innerNNZ;
}
- m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
+ if(m_outerSize>0)
+ m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
m_data.resize(m_outerIndex[m_outerSize]);
}
@@ -502,8 +505,8 @@ class SparseMatrix
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
}
}
-
- /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerence \a epsilon */
+
+ /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerance \a epsilon */
void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
{
prune(default_prunning_func(reference,epsilon));
@@ -576,10 +579,12 @@ class SparseMatrix
else if (innerChange < 0)
{
// Inner size decreased: allocate a new m_innerNonZeros
- m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize+outerChange+1) * sizeof(StorageIndex)));
+ m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize + outerChange) * sizeof(StorageIndex)));
if (!m_innerNonZeros) internal::throw_std_bad_alloc();
- for(Index i = 0; i < m_outerSize; i++)
+ for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
+ for(Index i = m_outerSize; i < m_outerSize + outerChange; i++)
+ m_innerNonZeros[i] = 0;
}
// Change the m_innerNonZeros in case of a decrease of inner size
@@ -604,9 +609,9 @@ class SparseMatrix
m_outerIndex = newOuterIndex;
if (outerChange > 0)
{
- StorageIndex last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
+ StorageIndex lastIdx = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
- m_outerIndex[i] = last;
+ m_outerIndex[i] = lastIdx;
}
m_outerSize += outerChange;
}
@@ -780,6 +785,9 @@ class SparseMatrix
template<typename OtherDerived>
inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
{ return Base::operator=(other.derived()); }
+
+ template<typename Lhs, typename Rhs>
+ inline SparseMatrix& operator=(const Product<Lhs,Rhs,AliasFreeProduct>& other);
#endif // EIGEN_PARSED_BY_DOXYGEN
template<typename OtherDerived>
@@ -893,7 +901,114 @@ public:
Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
m_data.index(p) = convert_index(inner);
- return (m_data.value(p) = 0);
+ return (m_data.value(p) = Scalar(0));
+ }
+protected:
+ struct IndexPosPair {
+ IndexPosPair(Index a_i, Index a_p) : i(a_i), p(a_p) {}
+ Index i;
+ Index p;
+ };
+
+ /** \internal assign \a diagXpr to the diagonal of \c *this
+ * There are different strategies:
+ * 1 - if *this is overwritten (Func==assign_op) or *this is empty, then we can work treat *this as a dense vector expression.
+ * 2 - otherwise, for each diagonal coeff,
+ * 2.a - if it already exists, then we update it,
+ * 2.b - otherwise, if *this is uncompressed and that the current inner-vector has empty room for at least 1 element, then we perform an in-place insertion.
+ * 2.c - otherwise, we'll have to reallocate and copy everything, so instead of doing so for each new element, it is recorded in a std::vector.
+ * 3 - at the end, if some entries failed to be inserted in-place, then we alloc a new buffer, copy each chunk at the right position, and insert the new elements.
+ *
+ * TODO: some piece of code could be isolated and reused for a general in-place update strategy.
+ * TODO: if we start to defer the insertion of some elements (i.e., case 2.c executed once),
+ * then it *might* be better to disable case 2.b since they will have to be copied anyway.
+ */
+ template<typename DiagXpr, typename Func>
+ void assignDiagonal(const DiagXpr diagXpr, const Func& assignFunc)
+ {
+ Index n = diagXpr.size();
+
+ const bool overwrite = internal::is_same<Func, internal::assign_op<Scalar,Scalar> >::value;
+ if(overwrite)
+ {
+ if((this->rows()!=n) || (this->cols()!=n))
+ this->resize(n, n);
+ }
+
+ if(m_data.size()==0 || overwrite)
+ {
+ typedef Array<StorageIndex,Dynamic,1> ArrayXI;
+ this->makeCompressed();
+ this->resizeNonZeros(n);
+ Eigen::Map<ArrayXI>(this->innerIndexPtr(), n).setLinSpaced(0,StorageIndex(n)-1);
+ Eigen::Map<ArrayXI>(this->outerIndexPtr(), n+1).setLinSpaced(0,StorageIndex(n));
+ Eigen::Map<Array<Scalar,Dynamic,1> > values = this->coeffs();
+ values.setZero();
+ internal::call_assignment_no_alias(values, diagXpr, assignFunc);
+ }
+ else
+ {
+ bool isComp = isCompressed();
+ internal::evaluator<DiagXpr> diaEval(diagXpr);
+ std::vector<IndexPosPair> newEntries;
+
+ // 1 - try in-place update and record insertion failures
+ for(Index i = 0; i<n; ++i)
+ {
+ internal::LowerBoundIndex lb = this->lower_bound(i,i);
+ Index p = lb.value;
+ if(lb.found)
+ {
+ // the coeff already exists
+ assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
+ }
+ else if((!isComp) && m_innerNonZeros[i] < (m_outerIndex[i+1]-m_outerIndex[i]))
+ {
+ // non compressed mode with local room for inserting one element
+ m_data.moveChunk(p, p+1, m_outerIndex[i]+m_innerNonZeros[i]-p);
+ m_innerNonZeros[i]++;
+ m_data.value(p) = Scalar(0);
+ m_data.index(p) = StorageIndex(i);
+ assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
+ }
+ else
+ {
+ // defer insertion
+ newEntries.push_back(IndexPosPair(i,p));
+ }
+ }
+ // 2 - insert deferred entries
+ Index n_entries = Index(newEntries.size());
+ if(n_entries>0)
+ {
+ Storage newData(m_data.size()+n_entries);
+ Index prev_p = 0;
+ Index prev_i = 0;
+ for(Index k=0; k<n_entries;++k)
+ {
+ Index i = newEntries[k].i;
+ Index p = newEntries[k].p;
+ internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+p, newData.valuePtr()+prev_p+k);
+ internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+p, newData.indexPtr()+prev_p+k);
+ for(Index j=prev_i;j<i;++j)
+ m_outerIndex[j+1] += k;
+ if(!isComp)
+ m_innerNonZeros[i]++;
+ prev_p = p;
+ prev_i = i;
+ newData.value(p+k) = Scalar(0);
+ newData.index(p+k) = StorageIndex(i);
+ assignFunc.assignCoeff(newData.value(p+k), diaEval.coeff(i));
+ }
+ {
+ internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+m_data.size(), newData.valuePtr()+prev_p+n_entries);
+ internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+m_data.size(), newData.indexPtr()+prev_p+n_entries);
+ for(Index j=prev_i+1;j<=m_outerSize;++j)
+ m_outerIndex[j] += n_entries;
+ }
+ m_data.swap(newData);
+ }
+ }
}
private:
@@ -973,7 +1088,7 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa
* \code
typedef Triplet<double> T;
std::vector<T> tripletList;
- triplets.reserve(estimation_of_entries);
+ tripletList.reserve(estimation_of_entries);
for(...)
{
// ...
@@ -986,7 +1101,7 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa
*
* \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define
* an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
- * be explicitely stored into a std::vector for instance.
+ * be explicitly stored into a std::vector for instance.
*/
template<typename Scalar, int _Options, typename _StorageIndex>
template<typename InputIterators>
@@ -1232,7 +1347,7 @@ typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Sca
}
m_data.index(p) = convert_index(inner);
- return (m_data.value(p) = 0);
+ return (m_data.value(p) = Scalar(0));
}
if(m_data.size() != m_data.allocatedSize())
@@ -1274,7 +1389,7 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar&
m_innerNonZeros[outer]++;
m_data.index(p) = inner;
- return (m_data.value(p) = 0);
+ return (m_data.value(p) = Scalar(0));
}
template<typename _Scalar, int _Options, typename _StorageIndex>
@@ -1381,7 +1496,7 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar&
}
m_data.index(p) = inner;
- return (m_data.value(p) = 0);
+ return (m_data.value(p) = Scalar(0));
}
namespace internal {
diff --git a/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseMatrixBase.h b/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseMatrixBase.h
index c6b548f11..229449f02 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseMatrixBase.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseMatrixBase.h
@@ -87,6 +87,11 @@ template<typename Derived> class SparseMatrixBase
* we are dealing with a column-vector (if there is only one column) or with
* a row-vector (if there is only one row). */
+ NumDimensions = int(MaxSizeAtCompileTime) == 1 ? 0 : bool(IsVectorAtCompileTime) ? 1 : 2,
+ /**< This value is equal to Tensor::NumDimensions, i.e. 0 for scalars, 1 for vectors,
+ * and 2 for matrices.
+ */
+
Flags = internal::traits<Derived>::Flags,
/**< This stores expression \ref flags flags which may or may not be inherited by new expressions
* constructed from this one. See the \ref flags "list of flags".
@@ -350,18 +355,6 @@ template<typename Derived> class SparseMatrixBase
const ConstTransposeReturnType transpose() const { return ConstTransposeReturnType(derived()); }
const AdjointReturnType adjoint() const { return AdjointReturnType(transpose()); }
- // inner-vector
- typedef Block<Derived,IsRowMajor?1:Dynamic,IsRowMajor?Dynamic:1,true> InnerVectorReturnType;
- typedef Block<const Derived,IsRowMajor?1:Dynamic,IsRowMajor?Dynamic:1,true> ConstInnerVectorReturnType;
- InnerVectorReturnType innerVector(Index outer);
- const ConstInnerVectorReturnType innerVector(Index outer) const;
-
- // set of inner-vectors
- typedef Block<Derived,Dynamic,Dynamic,true> InnerVectorsReturnType;
- typedef Block<const Derived,Dynamic,Dynamic,true> ConstInnerVectorsReturnType;
- InnerVectorsReturnType innerVectors(Index outerStart, Index outerSize);
- const ConstInnerVectorsReturnType innerVectors(Index outerStart, Index outerSize) const;
-
DenseMatrixType toDense() const
{
return DenseMatrixType(derived());
diff --git a/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseProduct.h b/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseProduct.h
index 4cbf68781..af8a7744d 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseProduct.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseProduct.h
@@ -17,7 +17,7 @@ namespace Eigen {
* The automatic pruning of the small values can be achieved by calling the pruned() function
* in which case a totally different product algorithm is employed:
* \code
- * C = (A*B).pruned(); // supress numerical zeros (exact)
+ * C = (A*B).pruned(); // suppress numerical zeros (exact)
* C = (A*B).pruned(ref);
* C = (A*B).pruned(ref,epsilon);
* \endcode
@@ -164,6 +164,18 @@ protected:
} // end namespace internal
+// sparse matrix = sparse-product (can be sparse*sparse, sparse*perm, etc.)
+template<typename Scalar, int _Options, typename _StorageIndex>
+template<typename Lhs, typename Rhs>
+SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scalar,_Options,_StorageIndex>::operator=(const Product<Lhs,Rhs,AliasFreeProduct>& src)
+{
+ // std::cout << "in Assignment : " << DstOptions << "\n";
+ SparseMatrix dst(src.rows(),src.cols());
+ internal::generic_product_impl<Lhs, Rhs>::evalTo(dst,src.lhs(),src.rhs());
+ this->swap(dst);
+ return *this;
+}
+
} // end namespace Eigen
#endif // EIGEN_SPARSEPRODUCT_H
diff --git a/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseRef.h b/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseRef.h
index d91f38f97..748f87d62 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseRef.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseRef.h
@@ -201,7 +201,7 @@ class Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType
~Ref() {
if(m_hasCopy) {
- TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
+ TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(&m_storage);
obj->~TPlainObjectType();
}
}
@@ -213,7 +213,7 @@ class Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType
{
if((Options & int(StandardCompressedFormat)) && (!expr.isCompressed()))
{
- TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
+ TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(&m_storage);
::new (obj) TPlainObjectType(expr);
m_hasCopy = true;
Base::construct(*obj);
@@ -227,14 +227,14 @@ class Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType
template<typename Expression>
void construct(const Expression& expr, internal::false_type)
{
- TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
+ TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(&m_storage);
::new (obj) TPlainObjectType(expr);
m_hasCopy = true;
Base::construct(*obj);
}
protected:
- char m_object_bytes[sizeof(TPlainObjectType)];
+ typename internal::aligned_storage<sizeof(TPlainObjectType), EIGEN_ALIGNOF(TPlainObjectType)>::type m_storage;
bool m_hasCopy;
};
@@ -319,7 +319,7 @@ class Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType
~Ref() {
if(m_hasCopy) {
- TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
+ TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(&m_storage);
obj->~TPlainObjectType();
}
}
@@ -335,14 +335,14 @@ class Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType
template<typename Expression>
void construct(const Expression& expr, internal::false_type)
{
- TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
+ TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(&m_storage);
::new (obj) TPlainObjectType(expr);
m_hasCopy = true;
Base::construct(*obj);
}
protected:
- char m_object_bytes[sizeof(TPlainObjectType)];
+ typename internal::aligned_storage<sizeof(TPlainObjectType), EIGEN_ALIGNOF(TPlainObjectType)>::type m_storage;
bool m_hasCopy;
};
diff --git a/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseSelfAdjointView.h b/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseSelfAdjointView.h
index 5ab64f1a8..85b00e10e 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseSelfAdjointView.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseSelfAdjointView.h
@@ -142,6 +142,9 @@ template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
return *this = src.twistedBy(pnull);
}
+ // Since we override the copy-assignment operator, we need to explicitly re-declare the copy-constructor
+ EIGEN_DEFAULT_COPY_CONSTRUCTOR(SparseSelfAdjointView)
+
template<typename SrcMatrixType,unsigned int SrcMode>
SparseSelfAdjointView& operator=(const SparseSelfAdjointView<SrcMatrixType,SrcMode>& src)
{
@@ -311,7 +314,7 @@ inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, cons
while (i && i.index()<j) ++i;
if(i && i.index()==j)
{
- res(j,k) += alpha * i.value() * rhs(j,k);
+ res.coeffRef(j,k) += alpha * i.value() * rhs.coeff(j,k);
++i;
}
}
@@ -324,14 +327,14 @@ inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, cons
{
LhsScalar lhs_ij = i.value();
if(!LhsIsRowMajor) lhs_ij = numext::conj(lhs_ij);
- res_j += lhs_ij * rhs(i.index(),k);
+ res_j += lhs_ij * rhs.coeff(i.index(),k);
res(i.index(),k) += numext::conj(lhs_ij) * rhs_j;
}
- res(j,k) += alpha * res_j;
+ res.coeffRef(j,k) += alpha * res_j;
// handle diagonal coeff
if (ProcessFirstHalf && i && (i.index()==j))
- res(j,k) += alpha * i.value() * rhs(j,k);
+ res.coeffRef(j,k) += alpha * i.value() * rhs.coeff(j,k);
}
}
}
@@ -453,7 +456,7 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
Index r = it.row();
Index c = it.col();
Index ip = perm ? perm[i] : i;
- if(Mode==(Upper|Lower))
+ if(Mode==int(Upper|Lower))
count[StorageOrderMatch ? jp : ip]++;
else if(r==c)
count[ip]++;
@@ -486,7 +489,7 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
StorageIndex jp = perm ? perm[j] : j;
StorageIndex ip = perm ? perm[i] : i;
- if(Mode==(Upper|Lower))
+ if(Mode==int(Upper|Lower))
{
Index k = count[StorageOrderMatch ? jp : ip]++;
dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
diff --git a/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseUtil.h b/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseUtil.h
index 74df0d496..ceb936887 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseUtil.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseUtil.h
@@ -140,6 +140,14 @@ struct SparseSelfAdjointShape { static std::string debugName() { return "SparseS
template<> struct glue_shapes<SparseShape,SelfAdjointShape> { typedef SparseSelfAdjointShape type; };
template<> struct glue_shapes<SparseShape,TriangularShape > { typedef SparseTriangularShape type; };
+// return type of SparseCompressedBase::lower_bound;
+struct LowerBoundIndex {
+ LowerBoundIndex() : value(-1), found(false) {}
+ LowerBoundIndex(Index val, bool ok) : value(val), found(ok) {}
+ Index value;
+ bool found;
+};
+
} // end namespace internal
/** \ingroup SparseCore_Module
diff --git a/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseVector.h b/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseVector.h
index 19b0fbc9d..05779be68 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseVector.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseVector.h
@@ -281,7 +281,7 @@ class SparseVector
}
/** Swaps the values of \c *this and \a other.
- * Overloaded for performance: this version performs a \em shallow swap by swaping pointers and attributes only.
+ * Overloaded for performance: this version performs a \em shallow swap by swapping pointers and attributes only.
* \sa SparseMatrixBase::swap()
*/
inline void swap(SparseVector& other)
diff --git a/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseView.h b/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseView.h
index 7c4aea743..92b3d1f7b 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseView.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SparseCore/SparseView.h
@@ -90,6 +90,7 @@ struct unary_evaluator<SparseView<ArgType>, IteratorBased>
class InnerIterator : public EvalIterator
{
+ protected:
typedef typename XprType::Scalar Scalar;
public:
diff --git a/examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU.h b/examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU.h
index f883ab383..0c8d8939b 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU.h
@@ -18,6 +18,63 @@ template <typename _MatrixType, typename _OrderingType = COLAMDOrdering<typename
template <typename MappedSparseMatrixType> struct SparseLUMatrixLReturnType;
template <typename MatrixLType, typename MatrixUType> struct SparseLUMatrixUReturnType;
+template <bool Conjugate,class SparseLUType>
+class SparseLUTransposeView : public SparseSolverBase<SparseLUTransposeView<Conjugate,SparseLUType> >
+{
+protected:
+ typedef SparseSolverBase<SparseLUTransposeView<Conjugate,SparseLUType> > APIBase;
+ using APIBase::m_isInitialized;
+public:
+ typedef typename SparseLUType::Scalar Scalar;
+ typedef typename SparseLUType::StorageIndex StorageIndex;
+ typedef typename SparseLUType::MatrixType MatrixType;
+ typedef typename SparseLUType::OrderingType OrderingType;
+
+ enum {
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+ };
+
+ SparseLUTransposeView() : m_sparseLU(NULL) {}
+ SparseLUTransposeView(const SparseLUTransposeView& view) {
+ this->m_sparseLU = view.m_sparseLU;
+ }
+ void setIsInitialized(const bool isInitialized) {this->m_isInitialized = isInitialized;}
+ void setSparseLU(SparseLUType* sparseLU) {m_sparseLU = sparseLU;}
+ using APIBase::_solve_impl;
+ template<typename Rhs, typename Dest>
+ bool _solve_impl(const MatrixBase<Rhs> &B, MatrixBase<Dest> &X_base) const
+ {
+ Dest& X(X_base.derived());
+ eigen_assert(m_sparseLU->info() == Success && "The matrix should be factorized first");
+ EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,
+ THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+
+
+ // this ugly const_cast_derived() helps to detect aliasing when applying the permutations
+ for(Index j = 0; j < B.cols(); ++j){
+ X.col(j) = m_sparseLU->colsPermutation() * B.const_cast_derived().col(j);
+ }
+ //Forward substitution with transposed or adjoint of U
+ m_sparseLU->matrixU().template solveTransposedInPlace<Conjugate>(X);
+
+ //Backward substitution with transposed or adjoint of L
+ m_sparseLU->matrixL().template solveTransposedInPlace<Conjugate>(X);
+
+ // Permute back the solution
+ for (Index j = 0; j < B.cols(); ++j)
+ X.col(j) = m_sparseLU->rowsPermutation().transpose() * X.col(j);
+ return true;
+ }
+ inline Index rows() const { return m_sparseLU->rows(); }
+ inline Index cols() const { return m_sparseLU->cols(); }
+
+private:
+ SparseLUType *m_sparseLU;
+ SparseLUTransposeView& operator=(const SparseLUTransposeView&);
+};
+
+
/** \ingroup SparseLU_Module
* \class SparseLU
*
@@ -26,7 +83,7 @@ template <typename MatrixLType, typename MatrixUType> struct SparseLUMatrixURetu
* This class implements the supernodal LU factorization for general matrices.
* It uses the main techniques from the sequential SuperLU package
* (http://crd-legacy.lbl.gov/~xiaoye/SuperLU/). It handles transparently real
- * and complex arithmetics with single and double precision, depending on the
+ * and complex arithmetic with single and double precision, depending on the
* scalar type of your input matrix.
* The code has been optimized to provide BLAS-3 operations during supernode-panel updates.
* It benefits directly from the built-in high-performant Eigen BLAS routines.
@@ -43,8 +100,8 @@ template <typename MatrixLType, typename MatrixUType> struct SparseLUMatrixURetu
* Simple example with key steps
* \code
* VectorXd x(n), b(n);
- * SparseMatrix<double, ColMajor> A;
- * SparseLU<SparseMatrix<scalar, ColMajor>, COLAMDOrdering<Index> > solver;
+ * SparseMatrix<double> A;
+ * SparseLU<SparseMatrix<double>, COLAMDOrdering<int> > solver;
* // fill A and b;
* // Compute the ordering permutation vector from the structural pattern of A
* solver.analyzePattern(A);
@@ -97,6 +154,7 @@ class SparseLU : public SparseSolverBase<SparseLU<_MatrixType,_OrderingType> >,
};
public:
+
SparseLU():m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1)
{
initperfvalues();
@@ -128,6 +186,45 @@ class SparseLU : public SparseSolverBase<SparseLU<_MatrixType,_OrderingType> >,
//Factorize
factorize(matrix);
}
+
+ /** \returns an expression of the transposed of the factored matrix.
+ *
+ * A typical usage is to solve for the transposed problem A^T x = b:
+ * \code
+ * solver.compute(A);
+ * x = solver.transpose().solve(b);
+ * \endcode
+ *
+ * \sa adjoint(), solve()
+ */
+ const SparseLUTransposeView<false,SparseLU<_MatrixType,_OrderingType> > transpose()
+ {
+ SparseLUTransposeView<false, SparseLU<_MatrixType,_OrderingType> > transposeView;
+ transposeView.setSparseLU(this);
+ transposeView.setIsInitialized(this->m_isInitialized);
+ return transposeView;
+ }
+
+
+ /** \returns an expression of the adjoint of the factored matrix
+ *
+ * A typical usage is to solve for the adjoint problem A' x = b:
+ * \code
+ * solver.compute(A);
+ * x = solver.adjoint().solve(b);
+ * \endcode
+ *
+ * For real scalar types, this function is equivalent to transpose().
+ *
+ * \sa transpose(), solve()
+ */
+ const SparseLUTransposeView<true, SparseLU<_MatrixType,_OrderingType> > adjoint()
+ {
+ SparseLUTransposeView<true, SparseLU<_MatrixType,_OrderingType> > adjointView;
+ adjointView.setSparseLU(this);
+ adjointView.setIsInitialized(this->m_isInitialized);
+ return adjointView;
+ }
inline Index rows() const { return m_mat.rows(); }
inline Index cols() const { return m_mat.cols(); }
@@ -193,7 +290,7 @@ class SparseLU : public SparseSolverBase<SparseLU<_MatrixType,_OrderingType> >,
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the LU factorization reports a problem, zero diagonal for instance
* \c InvalidInput if the input matrix is invalid
*
@@ -355,6 +452,9 @@ class SparseLU : public SparseSolverBase<SparseLU<_MatrixType,_OrderingType> >,
return (m_detPermR * m_detPermC) > 0 ? det : -det;
}
+ Index nnzL() const { return m_nnzL; };
+ Index nnzU() const { return m_nnzU; };
+
protected:
// Functions
void initperfvalues()
@@ -391,7 +491,6 @@ class SparseLU : public SparseSolverBase<SparseLU<_MatrixType,_OrderingType> >,
private:
// Disable copy constructor
SparseLU (const SparseLU& );
-
}; // End class SparseLU
@@ -499,11 +598,8 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
eigen_assert(m_analysisIsOk && "analyzePattern() should be called first");
eigen_assert((matrix.rows() == matrix.cols()) && "Only for squared matrices");
- typedef typename IndexVector::Scalar StorageIndex;
-
m_isInitialized = true;
-
// Apply the column permutation computed in analyzepattern()
// m_mat = matrix * m_perm_c.inverse();
m_mat = matrix;
@@ -587,7 +683,6 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
// (a) a relaxed supernode at the bottom of the etree, or
// (b) panel_size contiguous columns, <panel_size> defined by the user
Index jcol;
- IndexVector panel_histo(n);
Index pivrow; // Pivotal row number in the original row matrix
Index nseg1; // Number of segments in U-column above panel row jcol
Index nseg; // Number of segments in each U-column
@@ -706,13 +801,19 @@ struct SparseLUMatrixLReturnType : internal::no_assignment_operator
typedef typename MappedSupernodalType::Scalar Scalar;
explicit SparseLUMatrixLReturnType(const MappedSupernodalType& mapL) : m_mapL(mapL)
{ }
- Index rows() { return m_mapL.rows(); }
- Index cols() { return m_mapL.cols(); }
+ Index rows() const { return m_mapL.rows(); }
+ Index cols() const { return m_mapL.cols(); }
template<typename Dest>
void solveInPlace( MatrixBase<Dest> &X) const
{
m_mapL.solveInPlace(X);
}
+ template<bool Conjugate, typename Dest>
+ void solveTransposedInPlace( MatrixBase<Dest> &X) const
+ {
+ m_mapL.template solveTransposedInPlace<Conjugate>(X);
+ }
+
const MappedSupernodalType& m_mapL;
};
@@ -723,8 +824,8 @@ struct SparseLUMatrixUReturnType : internal::no_assignment_operator
SparseLUMatrixUReturnType(const MatrixLType& mapL, const MatrixUType& mapU)
: m_mapL(mapL),m_mapU(mapU)
{ }
- Index rows() { return m_mapL.rows(); }
- Index cols() { return m_mapL.cols(); }
+ Index rows() const { return m_mapL.rows(); }
+ Index cols() const { return m_mapL.cols(); }
template<typename Dest> void solveInPlace(MatrixBase<Dest> &X) const
{
@@ -747,8 +848,9 @@ struct SparseLUMatrixUReturnType : internal::no_assignment_operator
}
else
{
+ // FIXME: the following lines should use Block expressions and not Map!
Map<const Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > A( &(m_mapL.valuePtr()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
- Map< Matrix<Scalar,Dynamic,Dest::ColsAtCompileTime, ColMajor>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
+ Map< Matrix<Scalar,Dynamic,Dest::ColsAtCompileTime, ColMajor>, 0, OuterStride<> > U (&(X.coeffRef(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
U = A.template triangularView<Upper>().solve(U);
}
@@ -766,6 +868,52 @@ struct SparseLUMatrixUReturnType : internal::no_assignment_operator
}
} // End For U-solve
}
+
+ template<bool Conjugate, typename Dest> void solveTransposedInPlace(MatrixBase<Dest> &X) const
+ {
+ using numext::conj;
+ Index nrhs = X.cols();
+ Index n = X.rows();
+ // Forward solve with U
+ for (Index k = 0; k <= m_mapL.nsuper(); k++)
+ {
+ Index fsupc = m_mapL.supToCol()[k];
+ Index lda = m_mapL.colIndexPtr()[fsupc+1] - m_mapL.colIndexPtr()[fsupc]; // leading dimension
+ Index nsupc = m_mapL.supToCol()[k+1] - fsupc;
+ Index luptr = m_mapL.colIndexPtr()[fsupc];
+
+ for (Index j = 0; j < nrhs; ++j)
+ {
+ for (Index jcol = fsupc; jcol < fsupc + nsupc; jcol++)
+ {
+ typename MatrixUType::InnerIterator it(m_mapU, jcol);
+ for ( ; it; ++it)
+ {
+ Index irow = it.index();
+ X(jcol, j) -= X(irow, j) * (Conjugate? conj(it.value()): it.value());
+ }
+ }
+ }
+ if (nsupc == 1)
+ {
+ for (Index j = 0; j < nrhs; j++)
+ {
+ X(fsupc, j) /= (Conjugate? conj(m_mapL.valuePtr()[luptr]) : m_mapL.valuePtr()[luptr]);
+ }
+ }
+ else
+ {
+ Map<const Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > A( &(m_mapL.valuePtr()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
+ Map< Matrix<Scalar,Dynamic,Dest::ColsAtCompileTime, ColMajor>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
+ if(Conjugate)
+ U = A.adjoint().template triangularView<Lower>().solve(U);
+ else
+ U = A.transpose().template triangularView<Lower>().solve(U);
+ }
+ }// End For U-solve
+ }
+
+
const MatrixLType& m_mapL;
const MatrixUType& m_mapU;
};
diff --git a/examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU_Memory.h b/examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU_Memory.h
index 4dc42e87b..349bfd585 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU_Memory.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU_Memory.h
@@ -51,7 +51,7 @@ inline Index LUTempSpace(Index&m, Index& w)
/**
- * Expand the existing storage to accomodate more fill-ins
+ * Expand the existing storage to accommodate more fill-ins
* \param vec Valid pointer to the vector to allocate or expand
* \param[in,out] length At input, contain the current length of the vector that is to be increased. At output, length of the newly allocated vector
* \param[in] nbElts Current number of elements in the factors
diff --git a/examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h b/examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h
index 721e1883b..0be293d17 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h
@@ -75,12 +75,12 @@ class MappedSuperNodalMatrix
/**
* Number of rows
*/
- Index rows() { return m_row; }
+ Index rows() const { return m_row; }
/**
* Number of columns
*/
- Index cols() { return m_col; }
+ Index cols() const { return m_col; }
/**
* Return the array of nonzero values packed by column
@@ -156,6 +156,9 @@ class MappedSuperNodalMatrix
class InnerIterator;
template<typename Dest>
void solveInPlace( MatrixBase<Dest>&X) const;
+ template<bool Conjugate, typename Dest>
+ void solveTransposedInPlace( MatrixBase<Dest>&X) const;
+
@@ -294,6 +297,77 @@ void MappedSuperNodalMatrix<Scalar,Index_>::solveInPlace( MatrixBase<Dest>&X) co
}
}
+template<typename Scalar, typename Index_>
+template<bool Conjugate, typename Dest>
+void MappedSuperNodalMatrix<Scalar,Index_>::solveTransposedInPlace( MatrixBase<Dest>&X) const
+{
+ using numext::conj;
+ Index n = int(X.rows());
+ Index nrhs = Index(X.cols());
+ const Scalar * Lval = valuePtr(); // Nonzero values
+ Matrix<Scalar,Dynamic,Dest::ColsAtCompileTime, ColMajor> work(n, nrhs); // working vector
+ work.setZero();
+ for (Index k = nsuper(); k >= 0; k--)
+ {
+ Index fsupc = supToCol()[k]; // First column of the current supernode
+ Index istart = rowIndexPtr()[fsupc]; // Pointer index to the subscript of the current column
+ Index nsupr = rowIndexPtr()[fsupc+1] - istart; // Number of rows in the current supernode
+ Index nsupc = supToCol()[k+1] - fsupc; // Number of columns in the current supernode
+ Index nrow = nsupr - nsupc; // Number of rows in the non-diagonal part of the supernode
+ Index irow; //Current index row
+
+ if (nsupc == 1 )
+ {
+ for (Index j = 0; j < nrhs; j++)
+ {
+ InnerIterator it(*this, fsupc);
+ ++it; // Skip the diagonal element
+ for (; it; ++it)
+ {
+ irow = it.row();
+ X(fsupc,j) -= X(irow, j) * (Conjugate?conj(it.value()):it.value());
+ }
+ }
+ }
+ else
+ {
+ // The supernode has more than one column
+ Index luptr = colIndexPtr()[fsupc];
+ Index lda = colIndexPtr()[fsupc+1] - luptr;
+
+ //Begin Gather
+ for (Index j = 0; j < nrhs; j++)
+ {
+ Index iptr = istart + nsupc;
+ for (Index i = 0; i < nrow; i++)
+ {
+ irow = rowIndex()[iptr];
+ work.topRows(nrow)(i,j)= X(irow,j); // Gather operation
+ iptr++;
+ }
+ }
+
+ // Matrix-vector product with transposed submatrix
+ Map<const Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > A( &(Lval[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) );
+ Map< Matrix<Scalar,Dynamic,Dest::ColsAtCompileTime, ColMajor>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
+ if(Conjugate)
+ U = U - A.adjoint() * work.topRows(nrow);
+ else
+ U = U - A.transpose() * work.topRows(nrow);
+
+ // Triangular solve (of transposed diagonal block)
+ new (&A) Map<const Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > ( &(Lval[luptr]), nsupc, nsupc, OuterStride<>(lda) );
+ if(Conjugate)
+ U = A.adjoint().template triangularView<UnitUpper>().solve(U);
+ else
+ U = A.transpose().template triangularView<UnitUpper>().solve(U);
+
+ }
+
+ }
+}
+
+
} // end namespace internal
} // end namespace Eigen
diff --git a/examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU_column_dfs.h b/examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU_column_dfs.h
index c98b30e32..5a2c941b4 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU_column_dfs.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU_column_dfs.h
@@ -151,7 +151,7 @@ Index SparseLUImpl<Scalar,StorageIndex>::column_dfs(const Index m, const Index j
StorageIndex ito = glu.xlsub(fsupc+1);
glu.xlsub(jcolm1) = ito;
StorageIndex istop = ito + jptr - jm1ptr;
- xprune(jcolm1) = istop; // intialize xprune(jcol-1)
+ xprune(jcolm1) = istop; // initialize xprune(jcol-1)
glu.xlsub(jcol) = istop;
for (StorageIndex ifrom = jm1ptr; ifrom < nextl; ++ifrom, ++ito)
@@ -166,7 +166,7 @@ Index SparseLUImpl<Scalar,StorageIndex>::column_dfs(const Index m, const Index j
// Tidy up the pointers before exit
glu.xsup(nsuper+1) = jcolp1;
glu.supno(jcolp1) = nsuper;
- xprune(jcol) = StorageIndex(nextl); // Intialize upper bound for pruning
+ xprune(jcol) = StorageIndex(nextl); // Initialize upper bound for pruning
glu.xlsub(jcolp1) = StorageIndex(nextl);
return 0;
diff --git a/examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU_gemm_kernel.h b/examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU_gemm_kernel.h
index 95ba7413f..e37c2fe0d 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU_gemm_kernel.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU_gemm_kernel.h
@@ -215,7 +215,7 @@ void sparselu_gemm(Index m, Index n, Index d, const Scalar* A, Index lda, const
if(RK==4){ a3 = pload<Packet>(A3+i+(I+1)*PacketSize); }\
pstore(C0+i+(I)*PacketSize, c0);
- // agressive vectorization and peeling
+ // aggressive vectorization and peeling
for(Index i=0; i<actual_b_end1; i+=PacketSize*8)
{
EIGEN_ASM_COMMENT("SPARSELU_GEMML_KERNEL2");
diff --git a/examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU_panel_bmod.h b/examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU_panel_bmod.h
index 822cf32c3..f052001c8 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU_panel_bmod.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SparseLU/SparseLU_panel_bmod.h
@@ -38,7 +38,7 @@ namespace internal {
* \brief Performs numeric block updates (sup-panel) in topological order.
*
* Before entering this routine, the original nonzeros in the panel
- * were already copied i nto the spa[m,w]
+ * were already copied into the spa[m,w]
*
* \param m number of rows in the matrix
* \param w Panel size
diff --git a/examples/ThirdPartyLibs/Eigen/src/SparseQR/SparseQR.h b/examples/ThirdPartyLibs/Eigen/src/SparseQR/SparseQR.h
index f7111fe2e..d1fb96f5c 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SparseQR/SparseQR.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SparseQR/SparseQR.h
@@ -41,18 +41,19 @@ namespace internal {
/**
* \ingroup SparseQR_Module
* \class SparseQR
- * \brief Sparse left-looking rank-revealing QR factorization
+ * \brief Sparse left-looking QR factorization with numerical column pivoting
*
- * This class implements a left-looking rank-revealing QR decomposition
- * of sparse matrices. When a column has a norm less than a given tolerance
+ * This class implements a left-looking QR decomposition of sparse matrices
+ * with numerical column pivoting.
+ * When a column has a norm less than a given tolerance
* it is implicitly permuted to the end. The QR factorization thus obtained is
* given by A*P = Q*R where R is upper triangular or trapezoidal.
*
* P is the column permutation which is the product of the fill-reducing and the
- * rank-revealing permutations. Use colsPermutation() to get it.
+ * numerical permutations. Use colsPermutation() to get it.
*
* Q is the orthogonal matrix represented as products of Householder reflectors.
- * Use matrixQ() to get an expression and matrixQ().transpose() to get the transpose.
+ * Use matrixQ() to get an expression and matrixQ().adjoint() to get the adjoint.
* You can then apply it to a vector.
*
* R is the sparse triangular or trapezoidal matrix. The later occurs when A is rank-deficient.
@@ -64,7 +65,19 @@ namespace internal {
*
* \implsparsesolverconcept
*
+ * The numerical pivoting strategy and default threshold are the same as in SuiteSparse QR, and
+ * detailed in the following paper:
+ * <i>
+ * Tim Davis, "Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing
+ * Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011.
+ * </i>
+ * Even though it is qualified as "rank-revealing", this strategy might fail for some
+ * rank deficient problems. When this class is used to solve linear or least-square problems
+ * it is thus strongly recommended to check the accuracy of the computed solution. If it
+ * failed, it usually helps to increase the threshold with setPivotThreshold.
+ *
* \warning The input sparse matrix A must be in compressed mode (see SparseMatrix::makeCompressed()).
+ * \warning For complex matrices matrixQ().transpose() will actually return the adjoint matrix.
*
*/
template<typename _MatrixType, typename _OrderingType>
@@ -196,9 +209,9 @@ class SparseQR : public SparseSolverBase<SparseQR<_MatrixType,_OrderingType> >
Index rank = this->rank();
- // Compute Q^T * b;
+ // Compute Q^* * b;
typename Dest::PlainObject y, b;
- y = this->matrixQ().transpose() * B;
+ y = this->matrixQ().adjoint() * B;
b = y;
// Solve with the triangular matrix R
@@ -327,10 +340,10 @@ void SparseQR<MatrixType,OrderingType>::analyzePattern(const MatrixType& mat)
internal::coletree(matCpy, m_etree, m_firstRowElt, m_outputPerm_c.indices().data());
m_isEtreeOk = true;
- m_R.resize(diagSize, n);
+ m_R.resize(m, n);
m_Q.resize(m, diagSize);
- // Allocate space for nonzero elements : rough estimation
+ // Allocate space for nonzero elements: rough estimation
m_R.reserve(2*mat.nonZeros()); //FIXME Get a more accurate estimation through symbolic factorization with the etree
m_Q.reserve(2*mat.nonZeros());
m_hcoeffs.resize(diagSize);
@@ -604,7 +617,7 @@ struct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived
// Get the references
SparseQR_QProduct(const SparseQRType& qr, const Derived& other, bool transpose) :
m_qr(qr),m_other(other),m_transpose(transpose) {}
- inline Index rows() const { return m_transpose ? m_qr.rows() : m_qr.cols(); }
+ inline Index rows() const { return m_qr.matrixQ().rows(); }
inline Index cols() const { return m_other.cols(); }
// Assign to a vector
@@ -632,16 +645,20 @@ struct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived
}
else
{
- eigen_assert(m_qr.m_Q.rows() == m_other.rows() && "Non conforming object sizes");
+ eigen_assert(m_qr.matrixQ().cols() == m_other.rows() && "Non conforming object sizes");
+
+ res.conservativeResize(rows(), cols());
+
// Compute res = Q * other column by column
for(Index j = 0; j < res.cols(); j++)
{
- for (Index k = diagSize-1; k >=0; k--)
+ Index start_k = internal::is_identity<Derived>::value ? numext::mini(j,diagSize-1) : diagSize-1;
+ for (Index k = start_k; k >=0; k--)
{
Scalar tau = Scalar(0);
tau = m_qr.m_Q.col(k).dot(res.col(j));
if(tau==Scalar(0)) continue;
- tau = tau * m_qr.m_hcoeffs(k);
+ tau = tau * numext::conj(m_qr.m_hcoeffs(k));
res.col(j) -= tau * m_qr.m_Q.col(k);
}
}
@@ -650,7 +667,7 @@ struct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived
const SparseQRType& m_qr;
const Derived& m_other;
- bool m_transpose;
+ bool m_transpose; // TODO this actually means adjoint
};
template<typename SparseQRType>
@@ -668,13 +685,14 @@ struct SparseQRMatrixQReturnType : public EigenBase<SparseQRMatrixQReturnType<Sp
{
return SparseQR_QProduct<SparseQRType,Derived>(m_qr,other.derived(),false);
}
+ // To use for operations with the adjoint of Q
SparseQRMatrixQTransposeReturnType<SparseQRType> adjoint() const
{
return SparseQRMatrixQTransposeReturnType<SparseQRType>(m_qr);
}
inline Index rows() const { return m_qr.rows(); }
- inline Index cols() const { return (std::min)(m_qr.rows(),m_qr.cols()); }
- // To use for operations with the transpose of Q
+ inline Index cols() const { return m_qr.rows(); }
+ // To use for operations with the transpose of Q FIXME this is the same as adjoint at the moment
SparseQRMatrixQTransposeReturnType<SparseQRType> transpose() const
{
return SparseQRMatrixQTransposeReturnType<SparseQRType>(m_qr);
@@ -682,6 +700,7 @@ struct SparseQRMatrixQReturnType : public EigenBase<SparseQRMatrixQReturnType<Sp
const SparseQRType& m_qr;
};
+// TODO this actually represents the adjoint of Q
template<typename SparseQRType>
struct SparseQRMatrixQTransposeReturnType
{
@@ -712,7 +731,7 @@ struct Assignment<DstXprType, SparseQRMatrixQReturnType<SparseQRType>, internal:
typedef typename DstXprType::StorageIndex StorageIndex;
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &/*func*/)
{
- typename DstXprType::PlainObject idMat(src.m_qr.rows(), src.m_qr.rows());
+ typename DstXprType::PlainObject idMat(src.rows(), src.cols());
idMat.setIdentity();
// Sort the sparse householder reflectors if needed
const_cast<SparseQRType *>(&src.m_qr)->_sort_matrix_Q();
diff --git a/examples/ThirdPartyLibs/Eigen/src/StlSupport/StdDeque.h b/examples/ThirdPartyLibs/Eigen/src/StlSupport/StdDeque.h
index cf1fedf92..6d47e7572 100644
--- a/examples/ThirdPartyLibs/Eigen/src/StlSupport/StdDeque.h
+++ b/examples/ThirdPartyLibs/Eigen/src/StlSupport/StdDeque.h
@@ -36,7 +36,7 @@ namespace std \
deque(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : deque_base(first, last, a) {} \
deque(const deque& c) : deque_base(c) {} \
explicit deque(size_type num, const value_type& val = value_type()) : deque_base(num, val) {} \
- deque(iterator start, iterator end) : deque_base(start, end) {} \
+ deque(iterator start_, iterator end_) : deque_base(start_, end_) {} \
deque& operator=(const deque& x) { \
deque_base::operator=(x); \
return *this; \
@@ -62,7 +62,7 @@ namespace std {
: deque_base(first, last, a) {} \
deque(const deque& c) : deque_base(c) {} \
explicit deque(size_type num, const value_type& val = value_type()) : deque_base(num, val) {} \
- deque(iterator start, iterator end) : deque_base(start, end) {} \
+ deque(iterator start_, iterator end_) : deque_base(start_, end_) {} \
deque& operator=(const deque& x) { \
deque_base::operator=(x); \
return *this; \
@@ -98,17 +98,7 @@ namespace std {
{ return deque_base::insert(position,x); }
void insert(const_iterator position, size_type new_size, const value_type& x)
{ deque_base::insert(position, new_size, x); }
-#elif defined(_GLIBCXX_DEQUE) && EIGEN_GNUC_AT_LEAST(4,2)
- // workaround GCC std::deque implementation
- void resize(size_type new_size, const value_type& x)
- {
- if (new_size < deque_base::size())
- deque_base::_M_erase_at_end(this->_M_impl._M_start + new_size);
- else
- deque_base::insert(deque_base::end(), new_size - deque_base::size(), x);
- }
#else
- // either GCC 4.1 or non-GCC
// default implementation which should always work.
void resize(size_type new_size, const value_type& x)
{
diff --git a/examples/ThirdPartyLibs/Eigen/src/StlSupport/StdList.h b/examples/ThirdPartyLibs/Eigen/src/StlSupport/StdList.h
index e1eba4985..8ba3fada0 100644
--- a/examples/ThirdPartyLibs/Eigen/src/StlSupport/StdList.h
+++ b/examples/ThirdPartyLibs/Eigen/src/StlSupport/StdList.h
@@ -35,7 +35,7 @@ namespace std \
list(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : list_base(first, last, a) {} \
list(const list& c) : list_base(c) {} \
explicit list(size_type num, const value_type& val = value_type()) : list_base(num, val) {} \
- list(iterator start, iterator end) : list_base(start, end) {} \
+ list(iterator start_, iterator end_) : list_base(start_, end_) {} \
list& operator=(const list& x) { \
list_base::operator=(x); \
return *this; \
@@ -62,7 +62,7 @@ namespace std
: list_base(first, last, a) {} \
list(const list& c) : list_base(c) {} \
explicit list(size_type num, const value_type& val = value_type()) : list_base(num, val) {} \
- list(iterator start, iterator end) : list_base(start, end) {} \
+ list(iterator start_, iterator end_) : list_base(start_, end_) {} \
list& operator=(const list& x) { \
list_base::operator=(x); \
return *this; \
diff --git a/examples/ThirdPartyLibs/Eigen/src/StlSupport/StdVector.h b/examples/ThirdPartyLibs/Eigen/src/StlSupport/StdVector.h
index ec22821d2..9fcf19bce 100644
--- a/examples/ThirdPartyLibs/Eigen/src/StlSupport/StdVector.h
+++ b/examples/ThirdPartyLibs/Eigen/src/StlSupport/StdVector.h
@@ -36,7 +36,7 @@ namespace std \
vector(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : vector_base(first, last, a) {} \
vector(const vector& c) : vector_base(c) {} \
explicit vector(size_type num, const value_type& val = value_type()) : vector_base(num, val) {} \
- vector(iterator start, iterator end) : vector_base(start, end) {} \
+ vector(iterator start_, iterator end_) : vector_base(start_, end_) {} \
vector& operator=(const vector& x) { \
vector_base::operator=(x); \
return *this; \
@@ -62,7 +62,7 @@ namespace std {
: vector_base(first, last, a) {} \
vector(const vector& c) : vector_base(c) {} \
explicit vector(size_type num, const value_type& val = value_type()) : vector_base(num, val) {} \
- vector(iterator start, iterator end) : vector_base(start, end) {} \
+ vector(iterator start_, iterator end_) : vector_base(start_, end_) {} \
vector& operator=(const vector& x) { \
vector_base::operator=(x); \
return *this; \
diff --git a/examples/ThirdPartyLibs/Eigen/src/SuperLUSupport/SuperLUSupport.h b/examples/ThirdPartyLibs/Eigen/src/SuperLUSupport/SuperLUSupport.h
index 50a69f306..d1d3ad7f1 100644
--- a/examples/ThirdPartyLibs/Eigen/src/SuperLUSupport/SuperLUSupport.h
+++ b/examples/ThirdPartyLibs/Eigen/src/SuperLUSupport/SuperLUSupport.h
@@ -217,12 +217,12 @@ struct SluMatrix : SuperMatrix
res.setScalarType<typename MatrixType::Scalar>();
// FIXME the following is not very accurate
- if (MatrixType::Flags & Upper)
+ if (int(MatrixType::Flags) & int(Upper))
res.Mtype = SLU_TRU;
- if (MatrixType::Flags & Lower)
+ if (int(MatrixType::Flags) & int(Lower))
res.Mtype = SLU_TRL;
- eigen_assert(((MatrixType::Flags & SelfAdjoint)==0) && "SelfAdjoint matrix shape not supported by SuperLU");
+ eigen_assert(((int(MatrixType::Flags) & int(SelfAdjoint))==0) && "SelfAdjoint matrix shape not supported by SuperLU");
return res;
}
@@ -297,8 +297,8 @@ SluMatrix asSluMatrix(MatrixType& mat)
template<typename Scalar, int Flags, typename Index>
MappedSparseMatrix<Scalar,Flags,Index> map_superlu(SluMatrix& sluMat)
{
- eigen_assert((Flags&RowMajor)==RowMajor && sluMat.Stype == SLU_NR
- || (Flags&ColMajor)==ColMajor && sluMat.Stype == SLU_NC);
+ eigen_assert(((Flags&RowMajor)==RowMajor && sluMat.Stype == SLU_NR)
+ || ((Flags&ColMajor)==ColMajor && sluMat.Stype == SLU_NC));
Index outerSize = (Flags&RowMajor)==RowMajor ? sluMat.ncol : sluMat.nrow;
@@ -352,7 +352,7 @@ class SuperLUBase : public SparseSolverBase<Derived>
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the matrix.appears to be negative.
*/
ComputationInfo info() const
@@ -650,9 +650,8 @@ void SuperLU<MatrixType>::_solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest>
{
eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or analyzePattern()/factorize()");
- const Index size = m_matrix.rows();
const Index rhsCols = b.cols();
- eigen_assert(size==b.rows());
+ eigen_assert(m_matrix.rows()==b.rows());
m_sluOptions.Trans = NOTRANS;
m_sluOptions.Fact = FACTORED;
@@ -974,9 +973,8 @@ void SuperILU<MatrixType>::_solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest
{
eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or analyzePattern()/factorize()");
- const int size = m_matrix.rows();
const int rhsCols = b.cols();
- eigen_assert(size==b.rows());
+ eigen_assert(m_matrix.rows()==b.rows());
m_sluOptions.Trans = NOTRANS;
m_sluOptions.Fact = FACTORED;
diff --git a/examples/ThirdPartyLibs/Eigen/src/UmfPackSupport/UmfPackSupport.h b/examples/ThirdPartyLibs/Eigen/src/UmfPackSupport/UmfPackSupport.h
index 9568cc1d5..e3a333f80 100644
--- a/examples/ThirdPartyLibs/Eigen/src/UmfPackSupport/UmfPackSupport.h
+++ b/examples/ThirdPartyLibs/Eigen/src/UmfPackSupport/UmfPackSupport.h
@@ -10,6 +10,16 @@
#ifndef EIGEN_UMFPACKSUPPORT_H
#define EIGEN_UMFPACKSUPPORT_H
+// for compatibility with super old version of umfpack,
+// not sure this is really needed, but this is harmless.
+#ifndef SuiteSparse_long
+#ifdef UF_long
+#define SuiteSparse_long UF_long
+#else
+#error neither SuiteSparse_long nor UF_long are defined
+#endif
+#endif
+
namespace Eigen {
/* TODO extract L, extract U, compute det, etc... */
@@ -17,42 +27,85 @@ namespace Eigen {
// generic double/complex<double> wrapper functions:
-inline void umfpack_defaults(double control[UMFPACK_CONTROL], double)
+ // Defaults
+inline void umfpack_defaults(double control[UMFPACK_CONTROL], double, int)
{ umfpack_di_defaults(control); }
-inline void umfpack_defaults(double control[UMFPACK_CONTROL], std::complex<double>)
+inline void umfpack_defaults(double control[UMFPACK_CONTROL], std::complex<double>, int)
{ umfpack_zi_defaults(control); }
-inline void umfpack_report_info(double control[UMFPACK_CONTROL], double info[UMFPACK_INFO], double)
+inline void umfpack_defaults(double control[UMFPACK_CONTROL], double, SuiteSparse_long)
+{ umfpack_dl_defaults(control); }
+
+inline void umfpack_defaults(double control[UMFPACK_CONTROL], std::complex<double>, SuiteSparse_long)
+{ umfpack_zl_defaults(control); }
+
+// Report info
+inline void umfpack_report_info(double control[UMFPACK_CONTROL], double info[UMFPACK_INFO], double, int)
{ umfpack_di_report_info(control, info);}
-inline void umfpack_report_info(double control[UMFPACK_CONTROL], double info[UMFPACK_INFO], std::complex<double>)
+inline void umfpack_report_info(double control[UMFPACK_CONTROL], double info[UMFPACK_INFO], std::complex<double>, int)
{ umfpack_zi_report_info(control, info);}
-inline void umfpack_report_status(double control[UMFPACK_CONTROL], int status, double)
+inline void umfpack_report_info(double control[UMFPACK_CONTROL], double info[UMFPACK_INFO], double, SuiteSparse_long)
+{ umfpack_dl_report_info(control, info);}
+
+inline void umfpack_report_info(double control[UMFPACK_CONTROL], double info[UMFPACK_INFO], std::complex<double>, SuiteSparse_long)
+{ umfpack_zl_report_info(control, info);}
+
+// Report status
+inline void umfpack_report_status(double control[UMFPACK_CONTROL], int status, double, int)
{ umfpack_di_report_status(control, status);}
-inline void umfpack_report_status(double control[UMFPACK_CONTROL], int status, std::complex<double>)
+inline void umfpack_report_status(double control[UMFPACK_CONTROL], int status, std::complex<double>, int)
{ umfpack_zi_report_status(control, status);}
-inline void umfpack_report_control(double control[UMFPACK_CONTROL], double)
+inline void umfpack_report_status(double control[UMFPACK_CONTROL], int status, double, SuiteSparse_long)
+{ umfpack_dl_report_status(control, status);}
+
+inline void umfpack_report_status(double control[UMFPACK_CONTROL], int status, std::complex<double>, SuiteSparse_long)
+{ umfpack_zl_report_status(control, status);}
+
+// report control
+inline void umfpack_report_control(double control[UMFPACK_CONTROL], double, int)
{ umfpack_di_report_control(control);}
-inline void umfpack_report_control(double control[UMFPACK_CONTROL], std::complex<double>)
+inline void umfpack_report_control(double control[UMFPACK_CONTROL], std::complex<double>, int)
{ umfpack_zi_report_control(control);}
-inline void umfpack_free_numeric(void **Numeric, double)
+inline void umfpack_report_control(double control[UMFPACK_CONTROL], double, SuiteSparse_long)
+{ umfpack_dl_report_control(control);}
+
+inline void umfpack_report_control(double control[UMFPACK_CONTROL], std::complex<double>, SuiteSparse_long)
+{ umfpack_zl_report_control(control);}
+
+// Free numeric
+inline void umfpack_free_numeric(void **Numeric, double, int)
{ umfpack_di_free_numeric(Numeric); *Numeric = 0; }
-inline void umfpack_free_numeric(void **Numeric, std::complex<double>)
+inline void umfpack_free_numeric(void **Numeric, std::complex<double>, int)
{ umfpack_zi_free_numeric(Numeric); *Numeric = 0; }
-inline void umfpack_free_symbolic(void **Symbolic, double)
+inline void umfpack_free_numeric(void **Numeric, double, SuiteSparse_long)
+{ umfpack_dl_free_numeric(Numeric); *Numeric = 0; }
+
+inline void umfpack_free_numeric(void **Numeric, std::complex<double>, SuiteSparse_long)
+{ umfpack_zl_free_numeric(Numeric); *Numeric = 0; }
+
+// Free symbolic
+inline void umfpack_free_symbolic(void **Symbolic, double, int)
{ umfpack_di_free_symbolic(Symbolic); *Symbolic = 0; }
-inline void umfpack_free_symbolic(void **Symbolic, std::complex<double>)
+inline void umfpack_free_symbolic(void **Symbolic, std::complex<double>, int)
{ umfpack_zi_free_symbolic(Symbolic); *Symbolic = 0; }
+inline void umfpack_free_symbolic(void **Symbolic, double, SuiteSparse_long)
+{ umfpack_dl_free_symbolic(Symbolic); *Symbolic = 0; }
+
+inline void umfpack_free_symbolic(void **Symbolic, std::complex<double>, SuiteSparse_long)
+{ umfpack_zl_free_symbolic(Symbolic); *Symbolic = 0; }
+
+// Symbolic
inline int umfpack_symbolic(int n_row,int n_col,
const int Ap[], const int Ai[], const double Ax[], void **Symbolic,
const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO])
@@ -66,7 +119,21 @@ inline int umfpack_symbolic(int n_row,int n_col,
{
return umfpack_zi_symbolic(n_row,n_col,Ap,Ai,&numext::real_ref(Ax[0]),0,Symbolic,Control,Info);
}
+inline SuiteSparse_long umfpack_symbolic( SuiteSparse_long n_row,SuiteSparse_long n_col,
+ const SuiteSparse_long Ap[], const SuiteSparse_long Ai[], const double Ax[], void **Symbolic,
+ const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO])
+{
+ return umfpack_dl_symbolic(n_row,n_col,Ap,Ai,Ax,Symbolic,Control,Info);
+}
+inline SuiteSparse_long umfpack_symbolic( SuiteSparse_long n_row,SuiteSparse_long n_col,
+ const SuiteSparse_long Ap[], const SuiteSparse_long Ai[], const std::complex<double> Ax[], void **Symbolic,
+ const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO])
+{
+ return umfpack_zl_symbolic(n_row,n_col,Ap,Ai,&numext::real_ref(Ax[0]),0,Symbolic,Control,Info);
+}
+
+// Numeric
inline int umfpack_numeric( const int Ap[], const int Ai[], const double Ax[],
void *Symbolic, void **Numeric,
const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO])
@@ -80,7 +147,21 @@ inline int umfpack_numeric( const int Ap[], const int Ai[], const std::complex<d
{
return umfpack_zi_numeric(Ap,Ai,&numext::real_ref(Ax[0]),0,Symbolic,Numeric,Control,Info);
}
+inline SuiteSparse_long umfpack_numeric(const SuiteSparse_long Ap[], const SuiteSparse_long Ai[], const double Ax[],
+ void *Symbolic, void **Numeric,
+ const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO])
+{
+ return umfpack_dl_numeric(Ap,Ai,Ax,Symbolic,Numeric,Control,Info);
+}
+inline SuiteSparse_long umfpack_numeric(const SuiteSparse_long Ap[], const SuiteSparse_long Ai[], const std::complex<double> Ax[],
+ void *Symbolic, void **Numeric,
+ const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO])
+{
+ return umfpack_zl_numeric(Ap,Ai,&numext::real_ref(Ax[0]),0,Symbolic,Numeric,Control,Info);
+}
+
+// solve
inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const double Ax[],
double X[], const double B[], void *Numeric,
const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO])
@@ -95,6 +176,21 @@ inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const std::co
return umfpack_zi_solve(sys,Ap,Ai,&numext::real_ref(Ax[0]),0,&numext::real_ref(X[0]),0,&numext::real_ref(B[0]),0,Numeric,Control,Info);
}
+inline SuiteSparse_long umfpack_solve(int sys, const SuiteSparse_long Ap[], const SuiteSparse_long Ai[], const double Ax[],
+ double X[], const double B[], void *Numeric,
+ const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO])
+{
+ return umfpack_dl_solve(sys,Ap,Ai,Ax,X,B,Numeric,Control,Info);
+}
+
+inline SuiteSparse_long umfpack_solve(int sys, const SuiteSparse_long Ap[], const SuiteSparse_long Ai[], const std::complex<double> Ax[],
+ std::complex<double> X[], const std::complex<double> B[], void *Numeric,
+ const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO])
+{
+ return umfpack_zl_solve(sys,Ap,Ai,&numext::real_ref(Ax[0]),0,&numext::real_ref(X[0]),0,&numext::real_ref(B[0]),0,Numeric,Control,Info);
+}
+
+// Get Lunz
inline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_udiag, void *Numeric, double)
{
return umfpack_di_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric);
@@ -105,6 +201,19 @@ inline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_
return umfpack_zi_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric);
}
+inline SuiteSparse_long umfpack_get_lunz( SuiteSparse_long *lnz, SuiteSparse_long *unz, SuiteSparse_long *n_row, SuiteSparse_long *n_col,
+ SuiteSparse_long *nz_udiag, void *Numeric, double)
+{
+ return umfpack_dl_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric);
+}
+
+inline SuiteSparse_long umfpack_get_lunz( SuiteSparse_long *lnz, SuiteSparse_long *unz, SuiteSparse_long *n_row, SuiteSparse_long *n_col,
+ SuiteSparse_long *nz_udiag, void *Numeric, std::complex<double>)
+{
+ return umfpack_zl_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric);
+}
+
+// Get Numeric
inline int umfpack_get_numeric(int Lp[], int Lj[], double Lx[], int Up[], int Ui[], double Ux[],
int P[], int Q[], double Dx[], int *do_recip, double Rs[], void *Numeric)
{
@@ -120,18 +229,45 @@ inline int umfpack_get_numeric(int Lp[], int Lj[], std::complex<double> Lx[], in
return umfpack_zi_get_numeric(Lp,Lj,Lx?&lx0_real:0,0,Up,Ui,Ux?&ux0_real:0,0,P,Q,
Dx?&dx0_real:0,0,do_recip,Rs,Numeric);
}
+inline SuiteSparse_long umfpack_get_numeric(SuiteSparse_long Lp[], SuiteSparse_long Lj[], double Lx[], SuiteSparse_long Up[], SuiteSparse_long Ui[], double Ux[],
+ SuiteSparse_long P[], SuiteSparse_long Q[], double Dx[], SuiteSparse_long *do_recip, double Rs[], void *Numeric)
+{
+ return umfpack_dl_get_numeric(Lp,Lj,Lx,Up,Ui,Ux,P,Q,Dx,do_recip,Rs,Numeric);
+}
-inline int umfpack_get_determinant(double *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO])
+inline SuiteSparse_long umfpack_get_numeric(SuiteSparse_long Lp[], SuiteSparse_long Lj[], std::complex<double> Lx[], SuiteSparse_long Up[], SuiteSparse_long Ui[], std::complex<double> Ux[],
+ SuiteSparse_long P[], SuiteSparse_long Q[], std::complex<double> Dx[], SuiteSparse_long *do_recip, double Rs[], void *Numeric)
+{
+ double& lx0_real = numext::real_ref(Lx[0]);
+ double& ux0_real = numext::real_ref(Ux[0]);
+ double& dx0_real = numext::real_ref(Dx[0]);
+ return umfpack_zl_get_numeric(Lp,Lj,Lx?&lx0_real:0,0,Up,Ui,Ux?&ux0_real:0,0,P,Q,
+ Dx?&dx0_real:0,0,do_recip,Rs,Numeric);
+}
+
+// Get Determinant
+inline int umfpack_get_determinant(double *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO], int)
{
return umfpack_di_get_determinant(Mx,Ex,NumericHandle,User_Info);
}
-inline int umfpack_get_determinant(std::complex<double> *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO])
+inline int umfpack_get_determinant(std::complex<double> *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO], int)
{
double& mx_real = numext::real_ref(*Mx);
return umfpack_zi_get_determinant(&mx_real,0,Ex,NumericHandle,User_Info);
}
+inline SuiteSparse_long umfpack_get_determinant(double *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO], SuiteSparse_long)
+{
+ return umfpack_dl_get_determinant(Mx,Ex,NumericHandle,User_Info);
+}
+
+inline SuiteSparse_long umfpack_get_determinant(std::complex<double> *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO], SuiteSparse_long)
+{
+ double& mx_real = numext::real_ref(*Mx);
+ return umfpack_zl_get_determinant(&mx_real,0,Ex,NumericHandle,User_Info);
+}
+
/** \ingroup UmfPackSupport_Module
* \brief A sparse LU factorization and solver based on UmfPack
@@ -164,7 +300,7 @@ class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
typedef Matrix<int, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
typedef Matrix<int, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
typedef SparseMatrix<Scalar> LUMatrixType;
- typedef SparseMatrix<Scalar,ColMajor,int> UmfpackMatrixType;
+ typedef SparseMatrix<Scalar,ColMajor,StorageIndex> UmfpackMatrixType;
typedef Ref<const UmfpackMatrixType, StandardCompressedFormat> UmfpackMatrixRef;
enum {
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
@@ -192,8 +328,8 @@ class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
~UmfPackLU()
{
- if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar());
- if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar());
+ if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar(), StorageIndex());
+ if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar(), StorageIndex());
}
inline Index rows() const { return mp_matrix.rows(); }
@@ -201,7 +337,7 @@ class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the matrix.appears to be negative.
*/
ComputationInfo info() const
@@ -241,8 +377,8 @@ class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
template<typename InputMatrixType>
void compute(const InputMatrixType& matrix)
{
- if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar());
- if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar());
+ if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar(),StorageIndex());
+ if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar(),StorageIndex());
grab(matrix.derived());
analyzePattern_impl();
factorize_impl();
@@ -257,8 +393,8 @@ class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
template<typename InputMatrixType>
void analyzePattern(const InputMatrixType& matrix)
{
- if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar());
- if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar());
+ if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar(),StorageIndex());
+ if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar(),StorageIndex());
grab(matrix.derived());
@@ -309,7 +445,7 @@ class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
{
eigen_assert(m_analysisIsOk && "UmfPackLU: you must first call analyzePattern()");
if(m_numeric)
- umfpack_free_numeric(&m_numeric,Scalar());
+ umfpack_free_numeric(&m_numeric,Scalar(),StorageIndex());
grab(matrix.derived());
@@ -322,7 +458,7 @@ class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
*/
void printUmfpackControl()
{
- umfpack_report_control(m_control.data(), Scalar());
+ umfpack_report_control(m_control.data(), Scalar(),StorageIndex());
}
/** Prints statistics collected by UmfPack.
@@ -332,7 +468,7 @@ class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
void printUmfpackInfo()
{
eigen_assert(m_analysisIsOk && "UmfPackLU: you must first call analyzePattern()");
- umfpack_report_info(m_control.data(), m_umfpackInfo.data(), Scalar());
+ umfpack_report_info(m_control.data(), m_umfpackInfo.data(), Scalar(),StorageIndex());
}
/** Prints the status of the previous factorization operation performed by UmfPack (symbolic or numerical factorization).
@@ -341,7 +477,7 @@ class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
*/
void printUmfpackStatus() {
eigen_assert(m_analysisIsOk && "UmfPackLU: you must first call analyzePattern()");
- umfpack_report_status(m_control.data(), m_fact_errorCode, Scalar());
+ umfpack_report_status(m_control.data(), m_fact_errorCode, Scalar(),StorageIndex());
}
/** \internal */
@@ -362,13 +498,13 @@ class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
m_symbolic = 0;
m_extractedDataAreDirty = true;
- umfpack_defaults(m_control.data(), Scalar());
+ umfpack_defaults(m_control.data(), Scalar(),StorageIndex());
}
void analyzePattern_impl()
{
- m_fact_errorCode = umfpack_symbolic(internal::convert_index<int>(mp_matrix.rows()),
- internal::convert_index<int>(mp_matrix.cols()),
+ m_fact_errorCode = umfpack_symbolic(internal::convert_index<StorageIndex>(mp_matrix.rows()),
+ internal::convert_index<StorageIndex>(mp_matrix.cols()),
mp_matrix.outerIndexPtr(), mp_matrix.innerIndexPtr(), mp_matrix.valuePtr(),
&m_symbolic, m_control.data(), m_umfpackInfo.data());
@@ -408,7 +544,7 @@ class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
// cached data to reduce reallocation, etc.
mutable LUMatrixType m_l;
- int m_fact_errorCode;
+ StorageIndex m_fact_errorCode;
UmfpackControl m_control;
mutable UmfpackInfo m_umfpackInfo;
@@ -438,7 +574,7 @@ void UmfPackLU<MatrixType>::extractData() const
if (m_extractedDataAreDirty)
{
// get size of the data
- int lnz, unz, rows, cols, nz_udiag;
+ StorageIndex lnz, unz, rows, cols, nz_udiag;
umfpack_get_lunz(&lnz, &unz, &rows, &cols, &nz_udiag, m_numeric, Scalar());
// allocate data
@@ -464,7 +600,7 @@ template<typename MatrixType>
typename UmfPackLU<MatrixType>::Scalar UmfPackLU<MatrixType>::determinant() const
{
Scalar det;
- umfpack_get_determinant(&det, 0, m_numeric, 0);
+ umfpack_get_determinant(&det, 0, m_numeric, 0, StorageIndex());
return det;
}
@@ -477,7 +613,6 @@ bool UmfPackLU<MatrixType>::_solve_impl(const MatrixBase<BDerived> &b, MatrixBas
eigen_assert((XDerived::Flags&RowMajorBit)==0 && "UmfPackLU backend does not support non col-major result yet");
eigen_assert(b.derived().data() != x.derived().data() && " Umfpack does not support inplace solve");
- int errorCode;
Scalar* x_ptr = 0;
Matrix<Scalar,Dynamic,1> x_tmp;
if(x.innerStride()!=1)
@@ -489,9 +624,10 @@ bool UmfPackLU<MatrixType>::_solve_impl(const MatrixBase<BDerived> &b, MatrixBas
{
if(x.innerStride()==1)
x_ptr = &x.col(j).coeffRef(0);
- errorCode = umfpack_solve(UMFPACK_A,
- mp_matrix.outerIndexPtr(), mp_matrix.innerIndexPtr(), mp_matrix.valuePtr(),
- x_ptr, &b.const_cast_derived().col(j).coeffRef(0), m_numeric, m_control.data(), m_umfpackInfo.data());
+ StorageIndex errorCode = umfpack_solve(UMFPACK_A,
+ mp_matrix.outerIndexPtr(), mp_matrix.innerIndexPtr(), mp_matrix.valuePtr(),
+ x_ptr, &b.const_cast_derived().col(j).coeffRef(0),
+ m_numeric, m_control.data(), m_umfpackInfo.data());
if(x.innerStride()!=1)
x.col(j) = x_tmp;
if (errorCode!=0)
diff --git a/examples/ThirdPartyLibs/Eigen/src/misc/lapacke.h b/examples/ThirdPartyLibs/Eigen/src/misc/lapacke.h
index 3d8e24f5a..3d8e24f5a 100644..100755
--- a/examples/ThirdPartyLibs/Eigen/src/misc/lapacke.h
+++ b/examples/ThirdPartyLibs/Eigen/src/misc/lapacke.h
diff --git a/examples/ThirdPartyLibs/Eigen/src/plugins/ArrayCwiseBinaryOps.h b/examples/ThirdPartyLibs/Eigen/src/plugins/ArrayCwiseBinaryOps.h
index 1f8a531af..1b422e201 100644
--- a/examples/ThirdPartyLibs/Eigen/src/plugins/ArrayCwiseBinaryOps.h
+++ b/examples/ThirdPartyLibs/Eigen/src/plugins/ArrayCwiseBinaryOps.h
@@ -30,15 +30,40 @@ operator/(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
*
* \sa max()
*/
-EIGEN_MAKE_CWISE_BINARY_OP(min,min)
+template <int NaNPropagation, typename OtherDerived>
+EIGEN_DEVICE_FUNC
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar,Scalar,NaNPropagation>, const Derived, const OtherDerived>
+#ifdef EIGEN_PARSED_BY_DOXYGEN
+min
+#else
+(min)
+#endif
+(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
+{
+ return CwiseBinaryOp<internal::scalar_min_op<Scalar,Scalar,NaNPropagation>, const Derived, const OtherDerived>(derived(), other.derived());
+}
+
+template <typename OtherDerived>
+EIGEN_DEVICE_FUNC
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar,Scalar,PropagateFast>, const Derived, const OtherDerived>
+#ifdef EIGEN_PARSED_BY_DOXYGEN
+min
+#else
+(min)
+#endif
+(const OtherDerived &other) const
+{
+ return (min<PropagateFast>)(other);
+}
/** \returns an expression of the coefficient-wise min of \c *this and scalar \a other
*
* \sa max()
*/
+template <int NaNPropagation>
EIGEN_DEVICE_FUNC
-EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar,Scalar>, const Derived,
- const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> >
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar,Scalar,NaNPropagation>, const Derived,
+ const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> >
#ifdef EIGEN_PARSED_BY_DOXYGEN
min
#else
@@ -46,7 +71,20 @@ min
#endif
(const Scalar &other) const
{
- return (min)(Derived::PlainObject::Constant(rows(), cols(), other));
+ return (min<NaNPropagation>)(Derived::PlainObject::Constant(rows(), cols(), other));
+}
+
+EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar,Scalar,PropagateFast>, const Derived,
+ const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> >
+#ifdef EIGEN_PARSED_BY_DOXYGEN
+min
+#else
+(min)
+#endif
+(const Scalar &other) const
+{
+ return (min<PropagateFast>)(Derived::PlainObject::Constant(rows(), cols(), other));
}
/** \returns an expression of the coefficient-wise max of \c *this and \a other
@@ -56,14 +94,52 @@ min
*
* \sa min()
*/
-EIGEN_MAKE_CWISE_BINARY_OP(max,max)
+template <int NaNPropagation, typename OtherDerived>
+EIGEN_DEVICE_FUNC
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar,Scalar,NaNPropagation>, const Derived, const OtherDerived>
+#ifdef EIGEN_PARSED_BY_DOXYGEN
+max
+#else
+(max)
+#endif
+(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
+{
+ return CwiseBinaryOp<internal::scalar_max_op<Scalar,Scalar,NaNPropagation>, const Derived, const OtherDerived>(derived(), other.derived());
+}
+
+template <typename OtherDerived>
+EIGEN_DEVICE_FUNC
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar,Scalar,PropagateFast>, const Derived, const OtherDerived>
+#ifdef EIGEN_PARSED_BY_DOXYGEN
+max
+#else
+(max)
+#endif
+(const OtherDerived &other) const
+{
+ return (max<PropagateFast>)(other);
+}
/** \returns an expression of the coefficient-wise max of \c *this and scalar \a other
*
* \sa min()
*/
+template <int NaNPropagation>
+EIGEN_DEVICE_FUNC
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar,Scalar,NaNPropagation>, const Derived,
+ const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> >
+#ifdef EIGEN_PARSED_BY_DOXYGEN
+max
+#else
+(max)
+#endif
+(const Scalar &other) const
+{
+ return (max<NaNPropagation>)(Derived::PlainObject::Constant(rows(), cols(), other));
+}
+
EIGEN_DEVICE_FUNC
-EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar,Scalar>, const Derived,
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar,Scalar,PropagateFast>, const Derived,
const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> >
#ifdef EIGEN_PARSED_BY_DOXYGEN
max
@@ -72,7 +148,33 @@ max
#endif
(const Scalar &other) const
{
- return (max)(Derived::PlainObject::Constant(rows(), cols(), other));
+ return (max<PropagateFast>)(Derived::PlainObject::Constant(rows(), cols(), other));
+}
+
+/** \returns an expression of the coefficient-wise absdiff of \c *this and \a other
+ *
+ * Example: \include Cwise_absolute_difference.cpp
+ * Output: \verbinclude Cwise_absolute_difference.out
+ *
+ * \sa absolute_difference()
+ */
+EIGEN_MAKE_CWISE_BINARY_OP(absolute_difference,absolute_difference)
+
+/** \returns an expression of the coefficient-wise absolute_difference of \c *this and scalar \a other
+ *
+ * \sa absolute_difference()
+ */
+EIGEN_DEVICE_FUNC
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_absolute_difference_op<Scalar,Scalar>, const Derived,
+ const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> >
+#ifdef EIGEN_PARSED_BY_DOXYGEN
+absolute_difference
+#else
+(absolute_difference)
+#endif
+(const Scalar &other) const
+{
+ return (absolute_difference)(Derived::PlainObject::Constant(rows(), cols(), other));
}
/** \returns an expression of the coefficient-wise power of \c *this to the given array of \a exponents.
@@ -119,7 +221,7 @@ OP(const Scalar& s) const { \
return this->OP(Derived::PlainObject::Constant(rows(), cols(), s)); \
} \
EIGEN_DEVICE_FUNC friend EIGEN_STRONG_INLINE const RCmp ## COMPARATOR ## ReturnType \
-OP(const Scalar& s, const Derived& d) { \
+OP(const Scalar& s, const EIGEN_CURRENT_STORAGE_BASE_CLASS<Derived>& d) { \
return Derived::PlainObject::Constant(d.rows(), d.cols(), s).OP(d); \
}
@@ -314,9 +416,9 @@ polygamma(const EIGEN_CURRENT_STORAGE_BASE_CLASS<DerivedN> &n) const
*
* It returns the Riemann zeta function of two arguments \c *this and \a q:
*
- * \param *this is the exposent, it must be > 1
* \param q is the shift, it must be > 0
*
+ * \note *this is the exponent, it must be > 1.
* \note This function supports only float and double scalar types. To support other scalar types, the user has
* to provide implementations of zeta(T,T) for any scalar type T to be supported.
*
diff --git a/examples/ThirdPartyLibs/Eigen/src/plugins/ArrayCwiseUnaryOps.h b/examples/ThirdPartyLibs/Eigen/src/plugins/ArrayCwiseUnaryOps.h
index 43615bd56..13c55f4b1 100644
--- a/examples/ThirdPartyLibs/Eigen/src/plugins/ArrayCwiseUnaryOps.h
+++ b/examples/ThirdPartyLibs/Eigen/src/plugins/ArrayCwiseUnaryOps.h
@@ -14,6 +14,7 @@ typedef CwiseUnaryOp<internal::scalar_expm1_op<Scalar>, const Derived> Expm1Retu
typedef CwiseUnaryOp<internal::scalar_log_op<Scalar>, const Derived> LogReturnType;
typedef CwiseUnaryOp<internal::scalar_log1p_op<Scalar>, const Derived> Log1pReturnType;
typedef CwiseUnaryOp<internal::scalar_log10_op<Scalar>, const Derived> Log10ReturnType;
+typedef CwiseUnaryOp<internal::scalar_log2_op<Scalar>, const Derived> Log2ReturnType;
typedef CwiseUnaryOp<internal::scalar_cos_op<Scalar>, const Derived> CosReturnType;
typedef CwiseUnaryOp<internal::scalar_sin_op<Scalar>, const Derived> SinReturnType;
typedef CwiseUnaryOp<internal::scalar_tan_op<Scalar>, const Derived> TanReturnType;
@@ -21,11 +22,18 @@ typedef CwiseUnaryOp<internal::scalar_acos_op<Scalar>, const Derived> AcosReturn
typedef CwiseUnaryOp<internal::scalar_asin_op<Scalar>, const Derived> AsinReturnType;
typedef CwiseUnaryOp<internal::scalar_atan_op<Scalar>, const Derived> AtanReturnType;
typedef CwiseUnaryOp<internal::scalar_tanh_op<Scalar>, const Derived> TanhReturnType;
+typedef CwiseUnaryOp<internal::scalar_logistic_op<Scalar>, const Derived> LogisticReturnType;
typedef CwiseUnaryOp<internal::scalar_sinh_op<Scalar>, const Derived> SinhReturnType;
+#if EIGEN_HAS_CXX11_MATH
+typedef CwiseUnaryOp<internal::scalar_atanh_op<Scalar>, const Derived> AtanhReturnType;
+typedef CwiseUnaryOp<internal::scalar_asinh_op<Scalar>, const Derived> AsinhReturnType;
+typedef CwiseUnaryOp<internal::scalar_acosh_op<Scalar>, const Derived> AcoshReturnType;
+#endif
typedef CwiseUnaryOp<internal::scalar_cosh_op<Scalar>, const Derived> CoshReturnType;
typedef CwiseUnaryOp<internal::scalar_square_op<Scalar>, const Derived> SquareReturnType;
typedef CwiseUnaryOp<internal::scalar_cube_op<Scalar>, const Derived> CubeReturnType;
typedef CwiseUnaryOp<internal::scalar_round_op<Scalar>, const Derived> RoundReturnType;
+typedef CwiseUnaryOp<internal::scalar_rint_op<Scalar>, const Derived> RintReturnType;
typedef CwiseUnaryOp<internal::scalar_floor_op<Scalar>, const Derived> FloorReturnType;
typedef CwiseUnaryOp<internal::scalar_ceil_op<Scalar>, const Derived> CeilReturnType;
typedef CwiseUnaryOp<internal::scalar_isnan_op<Scalar>, const Derived> IsNaNReturnType;
@@ -152,6 +160,18 @@ log10() const
return Log10ReturnType(derived());
}
+/** \returns an expression of the coefficient-wise base-2 logarithm of *this.
+ *
+ * This function computes the coefficient-wise base-2 logarithm.
+ *
+ */
+EIGEN_DEVICE_FUNC
+inline const Log2ReturnType
+log2() const
+{
+ return Log2ReturnType(derived());
+}
+
/** \returns an expression of the coefficient-wise square root of *this.
*
* This function computes the coefficient-wise square root. The function MatrixBase::sqrt() in the
@@ -326,7 +346,7 @@ sinh() const
* Example: \include Cwise_cosh.cpp
* Output: \verbinclude Cwise_cosh.out
*
- * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_cosh">Math functions</a>, tan(), sinh(), cosh()
+ * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_cosh">Math functions</a>, tanh(), sinh(), cosh()
*/
EIGEN_DEVICE_FUNC
inline const CoshReturnType
@@ -335,6 +355,50 @@ cosh() const
return CoshReturnType(derived());
}
+#if EIGEN_HAS_CXX11_MATH
+/** \returns an expression of the coefficient-wise inverse hyperbolic tan of *this.
+ *
+ * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_atanh">Math functions</a>, atanh(), asinh(), acosh()
+ */
+EIGEN_DEVICE_FUNC
+inline const AtanhReturnType
+atanh() const
+{
+ return AtanhReturnType(derived());
+}
+
+/** \returns an expression of the coefficient-wise inverse hyperbolic sin of *this.
+ *
+ * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_asinh">Math functions</a>, atanh(), asinh(), acosh()
+ */
+EIGEN_DEVICE_FUNC
+inline const AsinhReturnType
+asinh() const
+{
+ return AsinhReturnType(derived());
+}
+
+/** \returns an expression of the coefficient-wise inverse hyperbolic cos of *this.
+ *
+ * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_acosh">Math functions</a>, atanh(), asinh(), acosh()
+ */
+EIGEN_DEVICE_FUNC
+inline const AcoshReturnType
+acosh() const
+{
+ return AcoshReturnType(derived());
+}
+#endif
+
+/** \returns an expression of the coefficient-wise logistic of *this.
+ */
+EIGEN_DEVICE_FUNC
+inline const LogisticReturnType
+logistic() const
+{
+ return LogisticReturnType(derived());
+}
+
/** \returns an expression of the coefficient-wise inverse of *this.
*
* Example: \include Cwise_inverse.cpp
@@ -377,6 +441,20 @@ cube() const
return CubeReturnType(derived());
}
+/** \returns an expression of the coefficient-wise rint of *this.
+ *
+ * Example: \include Cwise_rint.cpp
+ * Output: \verbinclude Cwise_rint.out
+ *
+ * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_rint">Math functions</a>, ceil(), floor()
+ */
+EIGEN_DEVICE_FUNC
+inline const RintReturnType
+rint() const
+{
+ return RintReturnType(derived());
+}
+
/** \returns an expression of the coefficient-wise round of *this.
*
* Example: \include Cwise_round.cpp
@@ -419,6 +497,45 @@ ceil() const
return CeilReturnType(derived());
}
+template<int N> struct ShiftRightXpr {
+ typedef CwiseUnaryOp<internal::scalar_shift_right_op<Scalar, N>, const Derived> Type;
+};
+
+/** \returns an expression of \c *this with the \a Scalar type arithmetically
+ * shifted right by \a N bit positions.
+ *
+ * The template parameter \a N specifies the number of bit positions to shift.
+ *
+ * \sa shiftLeft()
+ */
+template<int N>
+EIGEN_DEVICE_FUNC
+typename ShiftRightXpr<N>::Type
+shiftRight() const
+{
+ return typename ShiftRightXpr<N>::Type(derived());
+}
+
+
+template<int N> struct ShiftLeftXpr {
+ typedef CwiseUnaryOp<internal::scalar_shift_left_op<Scalar, N>, const Derived> Type;
+};
+
+/** \returns an expression of \c *this with the \a Scalar type logically
+ * shifted left by \a N bit positions.
+ *
+ * The template parameter \a N specifies the number of bit positions to shift.
+ *
+ * \sa shiftRight()
+ */
+template<int N>
+EIGEN_DEVICE_FUNC
+typename ShiftLeftXpr<N>::Type
+shiftLeft() const
+{
+ return typename ShiftLeftXpr<N>::Type(derived());
+}
+
/** \returns an expression of the coefficient-wise isnan of *this.
*
* Example: \include Cwise_isNaN.cpp
@@ -486,14 +603,12 @@ typedef CwiseUnaryOp<internal::scalar_lgamma_op<Scalar>, const Derived> LgammaRe
typedef CwiseUnaryOp<internal::scalar_digamma_op<Scalar>, const Derived> DigammaReturnType;
typedef CwiseUnaryOp<internal::scalar_erf_op<Scalar>, const Derived> ErfReturnType;
typedef CwiseUnaryOp<internal::scalar_erfc_op<Scalar>, const Derived> ErfcReturnType;
+typedef CwiseUnaryOp<internal::scalar_ndtri_op<Scalar>, const Derived> NdtriReturnType;
/** \cpp11 \returns an expression of the coefficient-wise ln(|gamma(*this)|).
*
* \specialfunctions_module
*
- * Example: \include Cwise_lgamma.cpp
- * Output: \verbinclude Cwise_lgamma.out
- *
* \note This function supports only float and double scalar types in c++11 mode. To support other scalar types,
* or float/double in non c++11 mode, the user has to provide implementations of lgamma(T) for any scalar
* type T to be supported.
@@ -529,9 +644,6 @@ digamma() const
*
* \specialfunctions_module
*
- * Example: \include Cwise_erf.cpp
- * Output: \verbinclude Cwise_erf.out
- *
* \note This function supports only float and double scalar types in c++11 mode. To support other scalar types,
* or float/double in non c++11 mode, the user has to provide implementations of erf(T) for any scalar
* type T to be supported.
@@ -550,9 +662,6 @@ erf() const
*
* \specialfunctions_module
*
- * Example: \include Cwise_erfc.cpp
- * Output: \verbinclude Cwise_erfc.out
- *
* \note This function supports only float and double scalar types in c++11 mode. To support other scalar types,
* or float/double in non c++11 mode, the user has to provide implementations of erfc(T) for any scalar
* type T to be supported.
@@ -565,3 +674,23 @@ erfc() const
{
return ErfcReturnType(derived());
}
+
+/** \returns an expression of the coefficient-wise inverse of the CDF of the Normal distribution function
+ * function of *this.
+ *
+ * \specialfunctions_module
+ *
+ * In other words, considering `x = ndtri(y)`, it returns the argument, x, for which the area under the
+ * Gaussian probability density function (integrated from minus infinity to x) is equal to y.
+ *
+ * \note This function supports only float and double scalar types. To support other scalar types,
+ * the user has to provide implementations of ndtri(T) for any scalar type T to be supported.
+ *
+ * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_ndtri">Math functions</a>
+ */
+EIGEN_DEVICE_FUNC
+inline const NdtriReturnType
+ndtri() const
+{
+ return NdtriReturnType(derived());
+}
diff --git a/examples/ThirdPartyLibs/Eigen/src/plugins/BlockMethods.h b/examples/ThirdPartyLibs/Eigen/src/plugins/BlockMethods.h
index 5caf14469..63a52a6ff 100644
--- a/examples/ThirdPartyLibs/Eigen/src/plugins/BlockMethods.h
+++ b/examples/ThirdPartyLibs/Eigen/src/plugins/BlockMethods.h
@@ -40,6 +40,14 @@ typedef const VectorBlock<const Derived> ConstSegmentReturnType;
template<int Size> struct FixedSegmentReturnType { typedef VectorBlock<Derived, Size> Type; };
template<int Size> struct ConstFixedSegmentReturnType { typedef const VectorBlock<const Derived, Size> Type; };
+/// \internal inner-vector
+typedef Block<Derived,IsRowMajor?1:Dynamic,IsRowMajor?Dynamic:1,true> InnerVectorReturnType;
+typedef Block<const Derived,IsRowMajor?1:Dynamic,IsRowMajor?Dynamic:1,true> ConstInnerVectorReturnType;
+
+/// \internal set of inner-vectors
+typedef Block<Derived,Dynamic,Dynamic,true> InnerVectorsReturnType;
+typedef Block<const Derived,Dynamic,Dynamic,true> ConstInnerVectorsReturnType;
+
#endif // not EIGEN_PARSED_BY_DOXYGEN
/// \returns an expression of a block in \c *this with either dynamic or fixed sizes.
@@ -79,11 +87,11 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa class Block, fix, fix<N>(int)
///
template<typename NRowsType, typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
+typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
#else
-inline typename FixedBlockXpr<...,...>::Type
+typename FixedBlockXpr<...,...>::Type
#endif
block(Index startRow, Index startCol, NRowsType blockRows, NColsType blockCols)
{
@@ -93,11 +101,11 @@ block(Index startRow, Index startCol, NRowsType blockRows, NColsType blockCols)
/// This is the const version of block(Index,Index,NRowsType,NColsType)
template<typename NRowsType, typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline const typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
+const typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
#else
-inline const typename ConstFixedBlockXpr<...,...>::Type
+const typename ConstFixedBlockXpr<...,...>::Type
#endif
block(Index startRow, Index startCol, NRowsType blockRows, NColsType blockCols) const
{
@@ -125,11 +133,11 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<typename NRowsType, typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
+typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
#else
-inline typename FixedBlockXpr<...,...>::Type
+typename FixedBlockXpr<...,...>::Type
#endif
topRightCorner(NRowsType cRows, NColsType cCols)
{
@@ -139,11 +147,11 @@ topRightCorner(NRowsType cRows, NColsType cCols)
/// This is the const version of topRightCorner(NRowsType, NColsType).
template<typename NRowsType, typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline const typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
+const typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
#else
-inline const typename ConstFixedBlockXpr<...,...>::Type
+const typename ConstFixedBlockXpr<...,...>::Type
#endif
topRightCorner(NRowsType cRows, NColsType cCols) const
{
@@ -164,16 +172,16 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa class Block, block<int,int>(Index,Index)
///
template<int CRows, int CCols>
-EIGEN_DEVICE_FUNC
-inline typename FixedBlockXpr<CRows,CCols>::Type topRightCorner()
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename FixedBlockXpr<CRows,CCols>::Type topRightCorner()
{
return typename FixedBlockXpr<CRows,CCols>::Type(derived(), 0, cols() - CCols);
}
/// This is the const version of topRightCorner<int, int>().
template<int CRows, int CCols>
-EIGEN_DEVICE_FUNC
-inline const typename ConstFixedBlockXpr<CRows,CCols>::Type topRightCorner() const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+const typename ConstFixedBlockXpr<CRows,CCols>::Type topRightCorner() const
{
return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), 0, cols() - CCols);
}
@@ -198,14 +206,16 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa class Block
///
template<int CRows, int CCols>
-inline typename FixedBlockXpr<CRows,CCols>::Type topRightCorner(Index cRows, Index cCols)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename FixedBlockXpr<CRows,CCols>::Type topRightCorner(Index cRows, Index cCols)
{
return typename FixedBlockXpr<CRows,CCols>::Type(derived(), 0, cols() - cCols, cRows, cCols);
}
/// This is the const version of topRightCorner<int, int>(Index, Index).
template<int CRows, int CCols>
-inline const typename ConstFixedBlockXpr<CRows,CCols>::Type topRightCorner(Index cRows, Index cCols) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+const typename ConstFixedBlockXpr<CRows,CCols>::Type topRightCorner(Index cRows, Index cCols) const
{
return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), 0, cols() - cCols, cRows, cCols);
}
@@ -230,11 +240,11 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<typename NRowsType, typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
+typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
#else
-inline typename FixedBlockXpr<...,...>::Type
+typename FixedBlockXpr<...,...>::Type
#endif
topLeftCorner(NRowsType cRows, NColsType cCols)
{
@@ -244,11 +254,11 @@ topLeftCorner(NRowsType cRows, NColsType cCols)
/// This is the const version of topLeftCorner(Index, Index).
template<typename NRowsType, typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline const typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
+const typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
#else
-inline const typename ConstFixedBlockXpr<...,...>::Type
+const typename ConstFixedBlockXpr<...,...>::Type
#endif
topLeftCorner(NRowsType cRows, NColsType cCols) const
{
@@ -268,16 +278,16 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<int CRows, int CCols>
-EIGEN_DEVICE_FUNC
-inline typename FixedBlockXpr<CRows,CCols>::Type topLeftCorner()
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename FixedBlockXpr<CRows,CCols>::Type topLeftCorner()
{
return typename FixedBlockXpr<CRows,CCols>::Type(derived(), 0, 0);
}
/// This is the const version of topLeftCorner<int, int>().
template<int CRows, int CCols>
-EIGEN_DEVICE_FUNC
-inline const typename ConstFixedBlockXpr<CRows,CCols>::Type topLeftCorner() const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+const typename ConstFixedBlockXpr<CRows,CCols>::Type topLeftCorner() const
{
return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), 0, 0);
}
@@ -302,14 +312,16 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa class Block
///
template<int CRows, int CCols>
-inline typename FixedBlockXpr<CRows,CCols>::Type topLeftCorner(Index cRows, Index cCols)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename FixedBlockXpr<CRows,CCols>::Type topLeftCorner(Index cRows, Index cCols)
{
return typename FixedBlockXpr<CRows,CCols>::Type(derived(), 0, 0, cRows, cCols);
}
/// This is the const version of topLeftCorner<int, int>(Index, Index).
template<int CRows, int CCols>
-inline const typename ConstFixedBlockXpr<CRows,CCols>::Type topLeftCorner(Index cRows, Index cCols) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+const typename ConstFixedBlockXpr<CRows,CCols>::Type topLeftCorner(Index cRows, Index cCols) const
{
return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), 0, 0, cRows, cCols);
}
@@ -334,11 +346,11 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<typename NRowsType, typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
+typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
#else
-inline typename FixedBlockXpr<...,...>::Type
+typename FixedBlockXpr<...,...>::Type
#endif
bottomRightCorner(NRowsType cRows, NColsType cCols)
{
@@ -349,11 +361,11 @@ bottomRightCorner(NRowsType cRows, NColsType cCols)
/// This is the const version of bottomRightCorner(NRowsType, NColsType).
template<typename NRowsType, typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline const typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
+const typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
#else
-inline const typename ConstFixedBlockXpr<...,...>::Type
+const typename ConstFixedBlockXpr<...,...>::Type
#endif
bottomRightCorner(NRowsType cRows, NColsType cCols) const
{
@@ -374,16 +386,16 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<int CRows, int CCols>
-EIGEN_DEVICE_FUNC
-inline typename FixedBlockXpr<CRows,CCols>::Type bottomRightCorner()
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename FixedBlockXpr<CRows,CCols>::Type bottomRightCorner()
{
return typename FixedBlockXpr<CRows,CCols>::Type(derived(), rows() - CRows, cols() - CCols);
}
/// This is the const version of bottomRightCorner<int, int>().
template<int CRows, int CCols>
-EIGEN_DEVICE_FUNC
-inline const typename ConstFixedBlockXpr<CRows,CCols>::Type bottomRightCorner() const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+const typename ConstFixedBlockXpr<CRows,CCols>::Type bottomRightCorner() const
{
return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), rows() - CRows, cols() - CCols);
}
@@ -408,14 +420,16 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa class Block
///
template<int CRows, int CCols>
-inline typename FixedBlockXpr<CRows,CCols>::Type bottomRightCorner(Index cRows, Index cCols)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename FixedBlockXpr<CRows,CCols>::Type bottomRightCorner(Index cRows, Index cCols)
{
return typename FixedBlockXpr<CRows,CCols>::Type(derived(), rows() - cRows, cols() - cCols, cRows, cCols);
}
/// This is the const version of bottomRightCorner<int, int>(Index, Index).
template<int CRows, int CCols>
-inline const typename ConstFixedBlockXpr<CRows,CCols>::Type bottomRightCorner(Index cRows, Index cCols) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+const typename ConstFixedBlockXpr<CRows,CCols>::Type bottomRightCorner(Index cRows, Index cCols) const
{
return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), rows() - cRows, cols() - cCols, cRows, cCols);
}
@@ -440,11 +454,11 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<typename NRowsType, typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
+typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
#else
-inline typename FixedBlockXpr<...,...>::Type
+typename FixedBlockXpr<...,...>::Type
#endif
bottomLeftCorner(NRowsType cRows, NColsType cCols)
{
@@ -455,11 +469,11 @@ bottomLeftCorner(NRowsType cRows, NColsType cCols)
/// This is the const version of bottomLeftCorner(NRowsType, NColsType).
template<typename NRowsType, typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
+typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type
#else
-inline typename ConstFixedBlockXpr<...,...>::Type
+typename ConstFixedBlockXpr<...,...>::Type
#endif
bottomLeftCorner(NRowsType cRows, NColsType cCols) const
{
@@ -480,16 +494,16 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<int CRows, int CCols>
-EIGEN_DEVICE_FUNC
-inline typename FixedBlockXpr<CRows,CCols>::Type bottomLeftCorner()
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename FixedBlockXpr<CRows,CCols>::Type bottomLeftCorner()
{
return typename FixedBlockXpr<CRows,CCols>::Type(derived(), rows() - CRows, 0);
}
/// This is the const version of bottomLeftCorner<int, int>().
template<int CRows, int CCols>
-EIGEN_DEVICE_FUNC
-inline const typename ConstFixedBlockXpr<CRows,CCols>::Type bottomLeftCorner() const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+const typename ConstFixedBlockXpr<CRows,CCols>::Type bottomLeftCorner() const
{
return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), rows() - CRows, 0);
}
@@ -514,14 +528,16 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa class Block
///
template<int CRows, int CCols>
-inline typename FixedBlockXpr<CRows,CCols>::Type bottomLeftCorner(Index cRows, Index cCols)
+EIGEN_STRONG_INLINE
+typename FixedBlockXpr<CRows,CCols>::Type bottomLeftCorner(Index cRows, Index cCols)
{
return typename FixedBlockXpr<CRows,CCols>::Type(derived(), rows() - cRows, 0, cRows, cCols);
}
/// This is the const version of bottomLeftCorner<int, int>(Index, Index).
template<int CRows, int CCols>
-inline const typename ConstFixedBlockXpr<CRows,CCols>::Type bottomLeftCorner(Index cRows, Index cCols) const
+EIGEN_STRONG_INLINE
+const typename ConstFixedBlockXpr<CRows,CCols>::Type bottomLeftCorner(Index cRows, Index cCols) const
{
return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), rows() - cRows, 0, cRows, cCols);
}
@@ -545,11 +561,11 @@ EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major)
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<typename NRowsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename NRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type
+typename NRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type
#else
-inline typename NRowsBlockXpr<...>::Type
+typename NRowsBlockXpr<...>::Type
#endif
topRows(NRowsType n)
{
@@ -559,11 +575,11 @@ topRows(NRowsType n)
/// This is the const version of topRows(NRowsType).
template<typename NRowsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline const typename ConstNRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type
+const typename ConstNRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type
#else
-inline const typename ConstNRowsBlockXpr<...>::Type
+const typename ConstNRowsBlockXpr<...>::Type
#endif
topRows(NRowsType n) const
{
@@ -587,16 +603,16 @@ EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major)
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename NRowsBlockXpr<N>::Type topRows(Index n = N)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename NRowsBlockXpr<N>::Type topRows(Index n = N)
{
return typename NRowsBlockXpr<N>::Type(derived(), 0, 0, n, cols());
}
/// This is the const version of topRows<int>().
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename ConstNRowsBlockXpr<N>::Type topRows(Index n = N) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename ConstNRowsBlockXpr<N>::Type topRows(Index n = N) const
{
return typename ConstNRowsBlockXpr<N>::Type(derived(), 0, 0, n, cols());
}
@@ -620,11 +636,11 @@ EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major)
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<typename NRowsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename NRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type
+typename NRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type
#else
-inline typename NRowsBlockXpr<...>::Type
+typename NRowsBlockXpr<...>::Type
#endif
bottomRows(NRowsType n)
{
@@ -634,11 +650,11 @@ bottomRows(NRowsType n)
/// This is the const version of bottomRows(NRowsType).
template<typename NRowsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline const typename ConstNRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type
+const typename ConstNRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type
#else
-inline const typename ConstNRowsBlockXpr<...>::Type
+const typename ConstNRowsBlockXpr<...>::Type
#endif
bottomRows(NRowsType n) const
{
@@ -662,16 +678,16 @@ EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major)
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename NRowsBlockXpr<N>::Type bottomRows(Index n = N)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename NRowsBlockXpr<N>::Type bottomRows(Index n = N)
{
return typename NRowsBlockXpr<N>::Type(derived(), rows() - n, 0, n, cols());
}
/// This is the const version of bottomRows<int>().
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename ConstNRowsBlockXpr<N>::Type bottomRows(Index n = N) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename ConstNRowsBlockXpr<N>::Type bottomRows(Index n = N) const
{
return typename ConstNRowsBlockXpr<N>::Type(derived(), rows() - n, 0, n, cols());
}
@@ -696,11 +712,11 @@ EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major)
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<typename NRowsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename NRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type
+typename NRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type
#else
-inline typename NRowsBlockXpr<...>::Type
+typename NRowsBlockXpr<...>::Type
#endif
middleRows(Index startRow, NRowsType n)
{
@@ -710,11 +726,11 @@ middleRows(Index startRow, NRowsType n)
/// This is the const version of middleRows(Index,NRowsType).
template<typename NRowsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline const typename ConstNRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type
+const typename ConstNRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type
#else
-inline const typename ConstNRowsBlockXpr<...>::Type
+const typename ConstNRowsBlockXpr<...>::Type
#endif
middleRows(Index startRow, NRowsType n) const
{
@@ -739,16 +755,16 @@ EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major)
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename NRowsBlockXpr<N>::Type middleRows(Index startRow, Index n = N)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename NRowsBlockXpr<N>::Type middleRows(Index startRow, Index n = N)
{
return typename NRowsBlockXpr<N>::Type(derived(), startRow, 0, n, cols());
}
/// This is the const version of middleRows<int>().
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename ConstNRowsBlockXpr<N>::Type middleRows(Index startRow, Index n = N) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename ConstNRowsBlockXpr<N>::Type middleRows(Index startRow, Index n = N) const
{
return typename ConstNRowsBlockXpr<N>::Type(derived(), startRow, 0, n, cols());
}
@@ -772,11 +788,11 @@ EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major)
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename NColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type
+typename NColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type
#else
-inline typename NColsBlockXpr<...>::Type
+typename NColsBlockXpr<...>::Type
#endif
leftCols(NColsType n)
{
@@ -786,11 +802,11 @@ leftCols(NColsType n)
/// This is the const version of leftCols(NColsType).
template<typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline const typename ConstNColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type
+const typename ConstNColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type
#else
-inline const typename ConstNColsBlockXpr<...>::Type
+const typename ConstNColsBlockXpr<...>::Type
#endif
leftCols(NColsType n) const
{
@@ -814,16 +830,16 @@ EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major)
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename NColsBlockXpr<N>::Type leftCols(Index n = N)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename NColsBlockXpr<N>::Type leftCols(Index n = N)
{
return typename NColsBlockXpr<N>::Type(derived(), 0, 0, rows(), n);
}
/// This is the const version of leftCols<int>().
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename ConstNColsBlockXpr<N>::Type leftCols(Index n = N) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename ConstNColsBlockXpr<N>::Type leftCols(Index n = N) const
{
return typename ConstNColsBlockXpr<N>::Type(derived(), 0, 0, rows(), n);
}
@@ -847,11 +863,11 @@ EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major)
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename NColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type
+typename NColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type
#else
-inline typename NColsBlockXpr<...>::Type
+typename NColsBlockXpr<...>::Type
#endif
rightCols(NColsType n)
{
@@ -861,11 +877,11 @@ rightCols(NColsType n)
/// This is the const version of rightCols(NColsType).
template<typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline const typename ConstNColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type
+const typename ConstNColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type
#else
-inline const typename ConstNColsBlockXpr<...>::Type
+const typename ConstNColsBlockXpr<...>::Type
#endif
rightCols(NColsType n) const
{
@@ -889,16 +905,16 @@ EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major)
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename NColsBlockXpr<N>::Type rightCols(Index n = N)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename NColsBlockXpr<N>::Type rightCols(Index n = N)
{
return typename NColsBlockXpr<N>::Type(derived(), 0, cols() - n, rows(), n);
}
/// This is the const version of rightCols<int>().
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename ConstNColsBlockXpr<N>::Type rightCols(Index n = N) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename ConstNColsBlockXpr<N>::Type rightCols(Index n = N) const
{
return typename ConstNColsBlockXpr<N>::Type(derived(), 0, cols() - n, rows(), n);
}
@@ -923,11 +939,11 @@ EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major)
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename NColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type
+typename NColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type
#else
-inline typename NColsBlockXpr<...>::Type
+typename NColsBlockXpr<...>::Type
#endif
middleCols(Index startCol, NColsType numCols)
{
@@ -937,11 +953,11 @@ middleCols(Index startCol, NColsType numCols)
/// This is the const version of middleCols(Index,NColsType).
template<typename NColsType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline const typename ConstNColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type
+const typename ConstNColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type
#else
-inline const typename ConstNColsBlockXpr<...>::Type
+const typename ConstNColsBlockXpr<...>::Type
#endif
middleCols(Index startCol, NColsType numCols) const
{
@@ -966,16 +982,16 @@ EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major)
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename NColsBlockXpr<N>::Type middleCols(Index startCol, Index n = N)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename NColsBlockXpr<N>::Type middleCols(Index startCol, Index n = N)
{
return typename NColsBlockXpr<N>::Type(derived(), 0, startCol, rows(), n);
}
/// This is the const version of middleCols<int>().
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename ConstNColsBlockXpr<N>::Type middleCols(Index startCol, Index n = N) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename ConstNColsBlockXpr<N>::Type middleCols(Index startCol, Index n = N) const
{
return typename ConstNColsBlockXpr<N>::Type(derived(), 0, startCol, rows(), n);
}
@@ -1007,16 +1023,16 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<int NRows, int NCols>
-EIGEN_DEVICE_FUNC
-inline typename FixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index startCol)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename FixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index startCol)
{
return typename FixedBlockXpr<NRows,NCols>::Type(derived(), startRow, startCol);
}
/// This is the const version of block<>(Index, Index). */
template<int NRows, int NCols>
-EIGEN_DEVICE_FUNC
-inline const typename ConstFixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index startCol) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+const typename ConstFixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index startCol) const
{
return typename ConstFixedBlockXpr<NRows,NCols>::Type(derived(), startRow, startCol);
}
@@ -1036,7 +1052,7 @@ inline const typename ConstFixedBlockXpr<NRows,NCols>::Type block(Index startRow
/// \a NRows is \a Dynamic, and the same for the number of columns.
///
/// Example: \include MatrixBase_template_int_int_block_int_int_int_int.cpp
-/// Output: \verbinclude MatrixBase_template_int_int_block_int_int_int_int.cpp
+/// Output: \verbinclude MatrixBase_template_int_int_block_int_int_int_int.out
///
/// \note The usage of of this overload is discouraged from %Eigen 3.4, better used the generic
/// block(Index,Index,NRowsType,NColsType), here is the one-to-one complete equivalence:
@@ -1053,7 +1069,8 @@ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
/// \sa block(Index,Index,NRowsType,NColsType), class Block
///
template<int NRows, int NCols>
-inline typename FixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index startCol,
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename FixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index startCol,
Index blockRows, Index blockCols)
{
return typename FixedBlockXpr<NRows,NCols>::Type(derived(), startRow, startCol, blockRows, blockCols);
@@ -1061,7 +1078,8 @@ inline typename FixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index sta
/// This is the const version of block<>(Index, Index, Index, Index).
template<int NRows, int NCols>
-inline const typename ConstFixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index startCol,
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+const typename ConstFixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index startCol,
Index blockRows, Index blockCols) const
{
return typename ConstFixedBlockXpr<NRows,NCols>::Type(derived(), startRow, startCol, blockRows, blockCols);
@@ -1075,15 +1093,15 @@ inline const typename ConstFixedBlockXpr<NRows,NCols>::Type block(Index startRow
EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major)
/**
* \sa row(), class Block */
-EIGEN_DEVICE_FUNC
-inline ColXpr col(Index i)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ColXpr col(Index i)
{
return ColXpr(derived(), i);
}
/// This is the const version of col().
-EIGEN_DEVICE_FUNC
-inline ConstColXpr col(Index i) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ConstColXpr col(Index i) const
{
return ConstColXpr(derived(), i);
}
@@ -1096,15 +1114,15 @@ inline ConstColXpr col(Index i) const
EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major)
/**
* \sa col(), class Block */
-EIGEN_DEVICE_FUNC
-inline RowXpr row(Index i)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+RowXpr row(Index i)
{
return RowXpr(derived(), i);
}
/// This is the const version of row(). */
-EIGEN_DEVICE_FUNC
-inline ConstRowXpr row(Index i) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ConstRowXpr row(Index i) const
{
return ConstRowXpr(derived(), i);
}
@@ -1131,11 +1149,11 @@ inline ConstRowXpr row(Index i) const
/// \sa block(Index,Index,NRowsType,NColsType), fix<N>, fix<N>(int), class Block
///
template<typename NType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename FixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type
+typename FixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type
#else
-inline typename FixedSegmentReturnType<...>::Type
+typename FixedSegmentReturnType<...>::Type
#endif
segment(Index start, NType n)
{
@@ -1147,11 +1165,11 @@ segment(Index start, NType n)
/// This is the const version of segment(Index,NType).
template<typename NType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline const typename ConstFixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type
+const typename ConstFixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type
#else
-inline const typename ConstFixedSegmentReturnType<...>::Type
+const typename ConstFixedSegmentReturnType<...>::Type
#endif
segment(Index start, NType n) const
{
@@ -1181,11 +1199,11 @@ segment(Index start, NType n) const
/// \sa class Block, block(Index,Index)
///
template<typename NType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename FixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type
+typename FixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type
#else
-inline typename FixedSegmentReturnType<...>::Type
+typename FixedSegmentReturnType<...>::Type
#endif
head(NType n)
{
@@ -1196,11 +1214,11 @@ head(NType n)
/// This is the const version of head(NType).
template<typename NType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline const typename ConstFixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type
+const typename ConstFixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type
#else
-inline const typename ConstFixedSegmentReturnType<...>::Type
+const typename ConstFixedSegmentReturnType<...>::Type
#endif
head(NType n) const
{
@@ -1230,11 +1248,11 @@ head(NType n) const
/// \sa class Block, block(Index,Index)
///
template<typename NType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline typename FixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type
+typename FixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type
#else
-inline typename FixedSegmentReturnType<...>::Type
+typename FixedSegmentReturnType<...>::Type
#endif
tail(NType n)
{
@@ -1245,11 +1263,11 @@ tail(NType n)
/// This is the const version of tail(Index).
template<typename NType>
-EIGEN_DEVICE_FUNC
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
#ifndef EIGEN_PARSED_BY_DOXYGEN
-inline const typename ConstFixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type
+const typename ConstFixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type
#else
-inline const typename ConstFixedSegmentReturnType<...>::Type
+const typename ConstFixedSegmentReturnType<...>::Type
#endif
tail(NType n) const
{
@@ -1275,8 +1293,8 @@ tail(NType n) const
/// \sa segment(Index,NType), class Block
///
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename FixedSegmentReturnType<N>::Type segment(Index start, Index n = N)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename FixedSegmentReturnType<N>::Type segment(Index start, Index n = N)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename FixedSegmentReturnType<N>::Type(derived(), start, n);
@@ -1284,8 +1302,8 @@ inline typename FixedSegmentReturnType<N>::Type segment(Index start, Index n = N
/// This is the const version of segment<int>(Index).
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename ConstFixedSegmentReturnType<N>::Type segment(Index start, Index n = N) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename ConstFixedSegmentReturnType<N>::Type segment(Index start, Index n = N) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename ConstFixedSegmentReturnType<N>::Type(derived(), start, n);
@@ -1307,8 +1325,8 @@ inline typename ConstFixedSegmentReturnType<N>::Type segment(Index start, Index
/// \sa head(NType), class Block
///
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename FixedSegmentReturnType<N>::Type head(Index n = N)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename FixedSegmentReturnType<N>::Type head(Index n = N)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename FixedSegmentReturnType<N>::Type(derived(), 0, n);
@@ -1316,8 +1334,8 @@ inline typename FixedSegmentReturnType<N>::Type head(Index n = N)
/// This is the const version of head<int>().
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename ConstFixedSegmentReturnType<N>::Type head(Index n = N) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename ConstFixedSegmentReturnType<N>::Type head(Index n = N) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename ConstFixedSegmentReturnType<N>::Type(derived(), 0, n);
@@ -1339,8 +1357,8 @@ inline typename ConstFixedSegmentReturnType<N>::Type head(Index n = N) const
/// \sa tail(NType), class Block
///
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename FixedSegmentReturnType<N>::Type tail(Index n = N)
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename FixedSegmentReturnType<N>::Type tail(Index n = N)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename FixedSegmentReturnType<N>::Type(derived(), size() - n);
@@ -1348,9 +1366,77 @@ inline typename FixedSegmentReturnType<N>::Type tail(Index n = N)
/// This is the const version of tail<int>.
template<int N>
-EIGEN_DEVICE_FUNC
-inline typename ConstFixedSegmentReturnType<N>::Type tail(Index n = N) const
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename ConstFixedSegmentReturnType<N>::Type tail(Index n = N) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename ConstFixedSegmentReturnType<N>::Type(derived(), size() - n);
}
+
+/// \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+/// is col-major (resp. row-major).
+///
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+InnerVectorReturnType innerVector(Index outer)
+{ return InnerVectorReturnType(derived(), outer); }
+
+/// \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+/// is col-major (resp. row-major). Read-only.
+///
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+const ConstInnerVectorReturnType innerVector(Index outer) const
+{ return ConstInnerVectorReturnType(derived(), outer); }
+
+/// \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+/// is col-major (resp. row-major).
+///
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+InnerVectorsReturnType
+innerVectors(Index outerStart, Index outerSize)
+{
+ return Block<Derived,Dynamic,Dynamic,true>(derived(),
+ IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
+ IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
+
+}
+
+/// \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+/// is col-major (resp. row-major). Read-only.
+///
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+const ConstInnerVectorsReturnType
+innerVectors(Index outerStart, Index outerSize) const
+{
+ return Block<const Derived,Dynamic,Dynamic,true>(derived(),
+ IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
+ IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
+
+}
+
+/** \returns the i-th subvector (column or vector) according to the \c Direction
+ * \sa subVectors()
+ */
+template<DirectionType Direction>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename internal::conditional<Direction==Vertical,ColXpr,RowXpr>::type
+subVector(Index i)
+{
+ return typename internal::conditional<Direction==Vertical,ColXpr,RowXpr>::type(derived(),i);
+}
+
+/** This is the const version of subVector(Index) */
+template<DirectionType Direction>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+typename internal::conditional<Direction==Vertical,ConstColXpr,ConstRowXpr>::type
+subVector(Index i) const
+{
+ return typename internal::conditional<Direction==Vertical,ConstColXpr,ConstRowXpr>::type(derived(),i);
+}
+
+/** \returns the number of subvectors (rows or columns) in the direction \c Direction
+ * \sa subVector(Index)
+ */
+template<DirectionType Direction>
+EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
+Index subVectors() const
+{ return (Direction==Vertical)?cols():rows(); }
diff --git a/examples/ThirdPartyLibs/Eigen/src/plugins/CommonCwiseUnaryOps.h b/examples/ThirdPartyLibs/Eigen/src/plugins/CommonCwiseUnaryOps.h
index 89f4faaac..5418dc415 100644
--- a/examples/ThirdPartyLibs/Eigen/src/plugins/CommonCwiseUnaryOps.h
+++ b/examples/ThirdPartyLibs/Eigen/src/plugins/CommonCwiseUnaryOps.h
@@ -76,6 +76,20 @@ conjugate() const
return ConjugateReturnType(derived());
}
+/// \returns an expression of the complex conjugate of \c *this if Cond==true, returns derived() otherwise.
+///
+EIGEN_DOC_UNARY_ADDONS(conjugate,complex conjugate)
+///
+/// \sa conjugate()
+template<bool Cond>
+EIGEN_DEVICE_FUNC
+inline typename internal::conditional<Cond,ConjugateReturnType,const Derived&>::type
+conjugateIf() const
+{
+ typedef typename internal::conditional<Cond,ConjugateReturnType,const Derived&>::type ReturnType;
+ return ReturnType(derived());
+}
+
/// \returns a read-only expression of the real part of \c *this.
///
EIGEN_DOC_UNARY_ADDONS(real,real part function)
diff --git a/examples/ThirdPartyLibs/Eigen/src/plugins/IndexedViewMethods.h b/examples/ThirdPartyLibs/Eigen/src/plugins/IndexedViewMethods.h
index a7ec63adf..5bfb19ac6 100644
--- a/examples/ThirdPartyLibs/Eigen/src/plugins/IndexedViewMethods.h
+++ b/examples/ThirdPartyLibs/Eigen/src/plugins/IndexedViewMethods.h
@@ -53,11 +53,6 @@ ivcSize(const Indices& indices) const {
return internal::makeIndexedViewCompatible(indices, internal::variable_if_dynamic<Index,SizeAtCompileTime>(derived().size()),Specialized);
}
-template<typename RowIndices, typename ColIndices>
-struct valid_indexed_view_overload {
- enum { value = !(internal::is_valid_index_type<RowIndices>::value && internal::is_valid_index_type<ColIndices>::value) };
-};
-
public:
#endif
@@ -72,7 +67,7 @@ struct EIGEN_INDEXED_VIEW_METHOD_TYPE {
// This is the generic version
template<typename RowIndices, typename ColIndices>
-typename internal::enable_if<valid_indexed_view_overload<RowIndices,ColIndices>::value
+typename internal::enable_if<internal::valid_indexed_view_overload<RowIndices,ColIndices>::value
&& internal::traits<typename EIGEN_INDEXED_VIEW_METHOD_TYPE<RowIndices,ColIndices>::type>::ReturnAsIndexedView,
typename EIGEN_INDEXED_VIEW_METHOD_TYPE<RowIndices,ColIndices>::type >::type
operator()(const RowIndices& rowIndices, const ColIndices& colIndices) EIGEN_INDEXED_VIEW_METHOD_CONST
@@ -84,7 +79,7 @@ operator()(const RowIndices& rowIndices, const ColIndices& colIndices) EIGEN_IND
// The following overload returns a Block<> object
template<typename RowIndices, typename ColIndices>
-typename internal::enable_if<valid_indexed_view_overload<RowIndices,ColIndices>::value
+typename internal::enable_if<internal::valid_indexed_view_overload<RowIndices,ColIndices>::value
&& internal::traits<typename EIGEN_INDEXED_VIEW_METHOD_TYPE<RowIndices,ColIndices>::type>::ReturnAsBlock,
typename internal::traits<typename EIGEN_INDEXED_VIEW_METHOD_TYPE<RowIndices,ColIndices>::type>::BlockType>::type
operator()(const RowIndices& rowIndices, const ColIndices& colIndices) EIGEN_INDEXED_VIEW_METHOD_CONST
@@ -102,7 +97,7 @@ operator()(const RowIndices& rowIndices, const ColIndices& colIndices) EIGEN_IND
// The following overload returns a Scalar
template<typename RowIndices, typename ColIndices>
-typename internal::enable_if<valid_indexed_view_overload<RowIndices,ColIndices>::value
+typename internal::enable_if<internal::valid_indexed_view_overload<RowIndices,ColIndices>::value
&& internal::traits<typename EIGEN_INDEXED_VIEW_METHOD_TYPE<RowIndices,ColIndices>::type>::ReturnAsScalar,
CoeffReturnType >::type
operator()(const RowIndices& rowIndices, const ColIndices& colIndices) EIGEN_INDEXED_VIEW_METHOD_CONST
@@ -112,7 +107,7 @@ operator()(const RowIndices& rowIndices, const ColIndices& colIndices) EIGEN_IND
#if EIGEN_HAS_STATIC_ARRAY_TEMPLATE
-// The folowing three overloads are needed to handle raw Index[N] arrays.
+// The following three overloads are needed to handle raw Index[N] arrays.
template<typename RowIndicesT, std::size_t RowIndicesN, typename ColIndices>
IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,const RowIndicesT (&)[RowIndicesN],typename IvcColType<ColIndices>::type>
@@ -166,7 +161,7 @@ operator()(const Indices& indices) EIGEN_INDEXED_VIEW_METHOD_CONST
template<typename Indices>
typename internal::enable_if<
- (internal::get_compile_time_incr<typename IvcType<Indices>::type>::value==1) && (!internal::is_valid_index_type<Indices>::value) && (!Symbolic::is_symbolic<Indices>::value),
+ (internal::get_compile_time_incr<typename IvcType<Indices>::type>::value==1) && (!internal::is_valid_index_type<Indices>::value) && (!symbolic::is_symbolic<Indices>::value),
VectorBlock<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,internal::array_size<Indices>::value> >::type
operator()(const Indices& indices) EIGEN_INDEXED_VIEW_METHOD_CONST
{
@@ -177,7 +172,7 @@ operator()(const Indices& indices) EIGEN_INDEXED_VIEW_METHOD_CONST
}
template<typename IndexType>
-typename internal::enable_if<Symbolic::is_symbolic<IndexType>::value, CoeffReturnType >::type
+typename internal::enable_if<symbolic::is_symbolic<IndexType>::value, CoeffReturnType >::type
operator()(const IndexType& id) EIGEN_INDEXED_VIEW_METHOD_CONST
{
return Base::operator()(internal::eval_expr_given_size(id,size()));
diff --git a/examples/ThirdPartyLibs/Eigen/src/plugins/MatrixCwiseBinaryOps.h b/examples/ThirdPartyLibs/Eigen/src/plugins/MatrixCwiseBinaryOps.h
index f1084abef..514d83a71 100644
--- a/examples/ThirdPartyLibs/Eigen/src/plugins/MatrixCwiseBinaryOps.h
+++ b/examples/ThirdPartyLibs/Eigen/src/plugins/MatrixCwiseBinaryOps.h
@@ -39,10 +39,10 @@ cwiseProduct(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
*/
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
-inline const CwiseBinaryOp<std::equal_to<Scalar>, const Derived, const OtherDerived>
+inline const CwiseBinaryOp<numext::equal_to<Scalar>, const Derived, const OtherDerived>
cwiseEqual(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
{
- return CwiseBinaryOp<std::equal_to<Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
+ return CwiseBinaryOp<numext::equal_to<Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
}
/** \returns an expression of the coefficient-wise != operator of *this and \a other
@@ -59,10 +59,10 @@ cwiseEqual(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
*/
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
-inline const CwiseBinaryOp<std::not_equal_to<Scalar>, const Derived, const OtherDerived>
+inline const CwiseBinaryOp<numext::not_equal_to<Scalar>, const Derived, const OtherDerived>
cwiseNotEqual(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
{
- return CwiseBinaryOp<std::not_equal_to<Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
+ return CwiseBinaryOp<numext::not_equal_to<Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
}
/** \returns an expression of the coefficient-wise min of *this and \a other
@@ -72,23 +72,39 @@ cwiseNotEqual(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
*
* \sa class CwiseBinaryOp, max()
*/
+template<int NaNPropagation, typename OtherDerived>
+EIGEN_DEVICE_FUNC
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar,Scalar,NaNPropagation>, const Derived, const OtherDerived>
+cwiseMin(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
+{
+ return CwiseBinaryOp<internal::scalar_min_op<Scalar,Scalar,NaNPropagation>, const Derived, const OtherDerived>(derived(), other.derived());
+}
+
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
-EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar,Scalar>, const Derived, const OtherDerived>
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar,Scalar,PropagateFast>, const Derived, const OtherDerived>
cwiseMin(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
{
- return CwiseBinaryOp<internal::scalar_min_op<Scalar,Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
+ return cwiseMin<PropagateFast>(other);
}
/** \returns an expression of the coefficient-wise min of *this and scalar \a other
*
* \sa class CwiseBinaryOp, min()
*/
+template<int NaNPropagation>
+EIGEN_DEVICE_FUNC
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar,Scalar,NaNPropagation>, const Derived, const ConstantReturnType>
+cwiseMin(const Scalar &other) const
+{
+ return cwiseMin<NaNPropagation>(Derived::Constant(rows(), cols(), other));
+}
+
EIGEN_DEVICE_FUNC
-EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar,Scalar>, const Derived, const ConstantReturnType>
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar,Scalar,PropagateFast>, const Derived, const ConstantReturnType>
cwiseMin(const Scalar &other) const
{
- return cwiseMin(Derived::Constant(rows(), cols(), other));
+ return cwiseMin<PropagateFast>(Derived::Constant(rows(), cols(), other));
}
/** \returns an expression of the coefficient-wise max of *this and \a other
@@ -98,23 +114,39 @@ cwiseMin(const Scalar &other) const
*
* \sa class CwiseBinaryOp, min()
*/
+template<int NaNPropagation, typename OtherDerived>
+EIGEN_DEVICE_FUNC
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar,Scalar,NaNPropagation>, const Derived, const OtherDerived>
+cwiseMax(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
+{
+ return CwiseBinaryOp<internal::scalar_max_op<Scalar,Scalar,NaNPropagation>, const Derived, const OtherDerived>(derived(), other.derived());
+}
+
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
-EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar,Scalar>, const Derived, const OtherDerived>
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar,Scalar,PropagateFast>, const Derived, const OtherDerived>
cwiseMax(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
{
- return CwiseBinaryOp<internal::scalar_max_op<Scalar,Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
+ return cwiseMax<PropagateFast>(other);
}
/** \returns an expression of the coefficient-wise max of *this and scalar \a other
*
* \sa class CwiseBinaryOp, min()
*/
+template<int NaNPropagation>
+EIGEN_DEVICE_FUNC
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar,Scalar,NaNPropagation>, const Derived, const ConstantReturnType>
+cwiseMax(const Scalar &other) const
+{
+ return cwiseMax<NaNPropagation>(Derived::Constant(rows(), cols(), other));
+}
+
EIGEN_DEVICE_FUNC
-EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar,Scalar>, const Derived, const ConstantReturnType>
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar,Scalar,PropagateFast>, const Derived, const ConstantReturnType>
cwiseMax(const Scalar &other) const
{
- return cwiseMax(Derived::Constant(rows(), cols(), other));
+ return cwiseMax<PropagateFast>(Derived::Constant(rows(), cols(), other));
}
diff --git a/examples/ThirdPartyLibs/Eigen/src/plugins/MatrixCwiseUnaryOps.h b/examples/ThirdPartyLibs/Eigen/src/plugins/MatrixCwiseUnaryOps.h
index b1be3d566..0514d8f78 100644
--- a/examples/ThirdPartyLibs/Eigen/src/plugins/MatrixCwiseUnaryOps.h
+++ b/examples/ThirdPartyLibs/Eigen/src/plugins/MatrixCwiseUnaryOps.h
@@ -14,6 +14,7 @@
typedef CwiseUnaryOp<internal::scalar_abs_op<Scalar>, const Derived> CwiseAbsReturnType;
typedef CwiseUnaryOp<internal::scalar_abs2_op<Scalar>, const Derived> CwiseAbs2ReturnType;
+typedef CwiseUnaryOp<internal::scalar_arg_op<Scalar>, const Derived> CwiseArgReturnType;
typedef CwiseUnaryOp<internal::scalar_sqrt_op<Scalar>, const Derived> CwiseSqrtReturnType;
typedef CwiseUnaryOp<internal::scalar_sign_op<Scalar>, const Derived> CwiseSignReturnType;
typedef CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const Derived> CwiseInverseReturnType;
@@ -82,4 +83,13 @@ EIGEN_DEVICE_FUNC
inline const CwiseInverseReturnType
cwiseInverse() const { return CwiseInverseReturnType(derived()); }
+/// \returns an expression of the coefficient-wise phase angle of \c *this
+///
+/// Example: \include MatrixBase_cwiseArg.cpp
+/// Output: \verbinclude MatrixBase_cwiseArg.out
+///
+EIGEN_DOC_UNARY_ADDONS(cwiseArg,arg)
+EIGEN_DEVICE_FUNC
+inline const CwiseArgReturnType
+cwiseArg() const { return CwiseArgReturnType(derived()); }
diff --git a/examples/ThirdPartyLibs/Eigen/src/plugins/ReshapedMethods.h b/examples/ThirdPartyLibs/Eigen/src/plugins/ReshapedMethods.h
new file mode 100644
index 000000000..482a6b045
--- /dev/null
+++ b/examples/ThirdPartyLibs/Eigen/src/plugins/ReshapedMethods.h
@@ -0,0 +1,149 @@
+
+#ifdef EIGEN_PARSED_BY_DOXYGEN
+
+/// \returns an expression of \c *this with reshaped sizes.
+///
+/// \param nRows the number of rows in the reshaped expression, specified at either run-time or compile-time, or AutoSize
+/// \param nCols the number of columns in the reshaped expression, specified at either run-time or compile-time, or AutoSize
+/// \tparam Order specifies whether the coefficients should be processed in column-major-order (ColMajor), in row-major-order (RowMajor),
+/// or follows the \em natural order of the nested expression (AutoOrder). The default is ColMajor.
+/// \tparam NRowsType the type of the value handling the number of rows, typically Index.
+/// \tparam NColsType the type of the value handling the number of columns, typically Index.
+///
+/// Dynamic size example: \include MatrixBase_reshaped_int_int.cpp
+/// Output: \verbinclude MatrixBase_reshaped_int_int.out
+///
+/// The number of rows \a nRows and columns \a nCols can also be specified at compile-time by passing Eigen::fix<N>,
+/// or Eigen::fix<N>(n) as arguments. In the later case, \c n plays the role of a runtime fallback value in case \c N equals Eigen::Dynamic.
+/// Here is an example with a fixed number of rows and columns:
+/// \include MatrixBase_reshaped_fixed.cpp
+/// Output: \verbinclude MatrixBase_reshaped_fixed.out
+///
+/// Finally, one of the sizes parameter can be automatically deduced from the other one by passing AutoSize as in the following example:
+/// \include MatrixBase_reshaped_auto.cpp
+/// Output: \verbinclude MatrixBase_reshaped_auto.out
+/// AutoSize does preserve compile-time sizes when possible, i.e., when the sizes of the input are known at compile time \b and
+/// that the other size is passed at compile-time using Eigen::fix<N> as above.
+///
+/// \sa class Reshaped, fix, fix<N>(int)
+///
+template<int Order = ColMajor, typename NRowsType, typename NColsType>
+EIGEN_DEVICE_FUNC
+inline Reshaped<Derived,...>
+reshaped(NRowsType nRows, NColsType nCols);
+
+/// This is the const version of reshaped(NRowsType,NColsType).
+template<int Order = ColMajor, typename NRowsType, typename NColsType>
+EIGEN_DEVICE_FUNC
+inline const Reshaped<const Derived,...>
+reshaped(NRowsType nRows, NColsType nCols) const;
+
+/// \returns an expression of \c *this with columns (or rows) stacked to a linear column vector
+///
+/// \tparam Order specifies whether the coefficients should be processed in column-major-order (ColMajor), in row-major-order (RowMajor),
+/// or follows the \em natural order of the nested expression (AutoOrder). The default is ColMajor.
+///
+/// This overloads is essentially a shortcut for `A.reshaped<Order>(AutoSize,fix<1>)`.
+///
+/// - If `Order==ColMajor` (the default), then it returns a column-vector from the stacked columns of \c *this.
+/// - If `Order==RowMajor`, then it returns a column-vector from the stacked rows of \c *this.
+/// - If `Order==AutoOrder`, then it returns a column-vector with elements stacked following the storage order of \c *this.
+/// This mode is the recommended one when the particular ordering of the element is not relevant.
+///
+/// Example:
+/// \include MatrixBase_reshaped_to_vector.cpp
+/// Output: \verbinclude MatrixBase_reshaped_to_vector.out
+///
+/// If you want more control, you can still fall back to reshaped(NRowsType,NColsType).
+///
+/// \sa reshaped(NRowsType,NColsType), class Reshaped
+///
+template<int Order = ColMajor>
+EIGEN_DEVICE_FUNC
+inline Reshaped<Derived,...>
+reshaped();
+
+/// This is the const version of reshaped().
+template<int Order = ColMajor>
+EIGEN_DEVICE_FUNC
+inline const Reshaped<const Derived,...>
+reshaped() const;
+
+#else
+
+// This file is automatically included twice to generate const and non-const versions
+
+#ifndef EIGEN_RESHAPED_METHOD_2ND_PASS
+#define EIGEN_RESHAPED_METHOD_CONST const
+#else
+#define EIGEN_RESHAPED_METHOD_CONST
+#endif
+
+#ifndef EIGEN_RESHAPED_METHOD_2ND_PASS
+
+// This part is included once
+
+#endif
+
+template<typename NRowsType, typename NColsType>
+EIGEN_DEVICE_FUNC
+inline Reshaped<EIGEN_RESHAPED_METHOD_CONST Derived,
+ internal::get_compiletime_reshape_size<NRowsType,NColsType,SizeAtCompileTime>::value,
+ internal::get_compiletime_reshape_size<NColsType,NRowsType,SizeAtCompileTime>::value>
+reshaped(NRowsType nRows, NColsType nCols) EIGEN_RESHAPED_METHOD_CONST
+{
+ return Reshaped<EIGEN_RESHAPED_METHOD_CONST Derived,
+ internal::get_compiletime_reshape_size<NRowsType,NColsType,SizeAtCompileTime>::value,
+ internal::get_compiletime_reshape_size<NColsType,NRowsType,SizeAtCompileTime>::value>
+ (derived(),
+ internal::get_runtime_reshape_size(nRows,internal::get_runtime_value(nCols),size()),
+ internal::get_runtime_reshape_size(nCols,internal::get_runtime_value(nRows),size()));
+}
+
+template<int Order, typename NRowsType, typename NColsType>
+EIGEN_DEVICE_FUNC
+inline Reshaped<EIGEN_RESHAPED_METHOD_CONST Derived,
+ internal::get_compiletime_reshape_size<NRowsType,NColsType,SizeAtCompileTime>::value,
+ internal::get_compiletime_reshape_size<NColsType,NRowsType,SizeAtCompileTime>::value,
+ internal::get_compiletime_reshape_order<Flags,Order>::value>
+reshaped(NRowsType nRows, NColsType nCols) EIGEN_RESHAPED_METHOD_CONST
+{
+ return Reshaped<EIGEN_RESHAPED_METHOD_CONST Derived,
+ internal::get_compiletime_reshape_size<NRowsType,NColsType,SizeAtCompileTime>::value,
+ internal::get_compiletime_reshape_size<NColsType,NRowsType,SizeAtCompileTime>::value,
+ internal::get_compiletime_reshape_order<Flags,Order>::value>
+ (derived(),
+ internal::get_runtime_reshape_size(nRows,internal::get_runtime_value(nCols),size()),
+ internal::get_runtime_reshape_size(nCols,internal::get_runtime_value(nRows),size()));
+}
+
+// Views as linear vectors
+
+EIGEN_DEVICE_FUNC
+inline Reshaped<EIGEN_RESHAPED_METHOD_CONST Derived,SizeAtCompileTime,1>
+reshaped() EIGEN_RESHAPED_METHOD_CONST
+{
+ return Reshaped<EIGEN_RESHAPED_METHOD_CONST Derived,SizeAtCompileTime,1>(derived(),size(),1);
+}
+
+template<int Order>
+EIGEN_DEVICE_FUNC
+inline Reshaped<EIGEN_RESHAPED_METHOD_CONST Derived, SizeAtCompileTime, 1,
+ internal::get_compiletime_reshape_order<Flags,Order>::value>
+reshaped() EIGEN_RESHAPED_METHOD_CONST
+{
+ EIGEN_STATIC_ASSERT(Order==RowMajor || Order==ColMajor || Order==AutoOrder, INVALID_TEMPLATE_PARAMETER);
+ return Reshaped<EIGEN_RESHAPED_METHOD_CONST Derived, SizeAtCompileTime, 1,
+ internal::get_compiletime_reshape_order<Flags,Order>::value>
+ (derived(), size(), 1);
+}
+
+#undef EIGEN_RESHAPED_METHOD_CONST
+
+#ifndef EIGEN_RESHAPED_METHOD_2ND_PASS
+#define EIGEN_RESHAPED_METHOD_2ND_PASS
+#include "ReshapedMethods.h"
+#undef EIGEN_RESHAPED_METHOD_2ND_PASS
+#endif
+
+#endif // EIGEN_PARSED_BY_DOXYGEN
diff --git a/examples/TwoJoint/CMakeLists.txt b/examples/TwoJoint/CMakeLists.txt
index 11a51f3bb..210f5ee3b 100644
--- a/examples/TwoJoint/CMakeLists.txt
+++ b/examples/TwoJoint/CMakeLists.txt
@@ -20,8 +20,6 @@ SET(RobotSimulator_SRCS
../../examples/ExampleBrowser/InProcessExampleBrowser.cpp
../../examples/SharedMemory/plugins/tinyRendererPlugin/tinyRendererPlugin.cpp
../../examples/SharedMemory/plugins/tinyRendererPlugin/TinyRendererVisualShapeConverter.cpp
- ../../examples/OpenGLWindow/SimpleCamera.cpp
- ../../examples/OpenGLWindow/SimpleCamera.h
../../examples/TinyRenderer/geometry.cpp
../../examples/TinyRenderer/model.cpp
../../examples/TinyRenderer/tgaimage.cpp
diff --git a/examples/TwoJoint/TwoJointMain.cpp b/examples/TwoJoint/TwoJointMain.cpp
index 8625bd483..df74b0d2f 100644
--- a/examples/TwoJoint/TwoJointMain.cpp
+++ b/examples/TwoJoint/TwoJointMain.cpp
@@ -29,7 +29,12 @@ std::map<std::string, int> jointNameToId;
int main(int argc, char* argv[])
{
+#ifdef __APPLE__
+ kPhysClient = b3CreateInProcessPhysicsServerAndConnectMainThread(argc, argv);
+#else
kPhysClient = b3CreateInProcessPhysicsServerAndConnect(argc, argv);
+#endif
+
if (!kPhysClient)
return -1;
// visualizer
@@ -131,4 +136,4 @@ int main(int argc, char* argv[])
}
b3DisconnectSharedMemory(kPhysClient);
return 0;
-} \ No newline at end of file
+}
diff --git a/examples/pybullet/gym/pybullet_data/bunny.obj b/examples/pybullet/gym/pybullet_data/bunny.obj
new file mode 100644
index 000000000..0edf6f97c
--- /dev/null
+++ b/examples/pybullet/gym/pybullet_data/bunny.obj
@@ -0,0 +1,1360 @@
+# bunny.obj
+#
+
+o bunny
+v -0.334392 0.133007 0.062259
+v -0.350189 0.150354 -0.147769
+v -0.234201 0.343811 -0.174307
+v -0.200259 0.285207 0.093749
+v 0.003520 0.475208 -0.159365
+v 0.001856 0.419203 0.098582
+v -0.252802 0.093666 0.237538
+v -0.162901 0.237984 0.206905
+v 0.000865 0.318141 0.235370
+v -0.414624 0.164083 -0.278254
+v -0.262213 0.357334 -0.293246
+v 0.004628 0.482694 -0.338626
+v -0.402162 0.133528 -0.443247
+v -0.243781 0.324275 -0.436763
+v 0.005293 0.437592 -0.458332
+v -0.339884 -0.041150 -0.668211
+v -0.248382 0.255825 -0.627493
+v 0.006261 0.376103 -0.631506
+v -0.216201 -0.126776 -0.886936
+v -0.171075 0.011544 -0.881386
+v -0.181074 0.098223 -0.814779
+v -0.119891 0.218786 -0.760153
+v -0.078895 0.276780 -0.739281
+v 0.006801 0.310959 -0.735661
+v -0.168842 0.102387 -0.920381
+v -0.104072 0.177278 -0.952530
+v -0.129704 0.211848 -0.836678
+v -0.099875 0.310931 -0.799381
+v 0.007237 0.361687 -0.794439
+v -0.077913 0.258753 -0.921640
+v 0.007957 0.282241 -0.931680
+v -0.252222 -0.550401 -0.557810
+v -0.267633 -0.603419 -0.655209
+v -0.446838 -0.118517 -0.466159
+v -0.459488 -0.093017 -0.311341
+v -0.370645 -0.100108 -0.159454
+v -0.371984 -0.091991 -0.011044
+v -0.328945 -0.098269 0.088659
+v -0.282452 -0.018862 0.311501
+v -0.352403 -0.131341 0.144902
+v -0.364126 -0.200299 0.202388
+v -0.283965 -0.231869 0.023668
+v -0.298943 -0.155218 0.369716
+v -0.293787 -0.121856 0.419097
+v -0.290163 -0.290797 0.107824
+v -0.264165 -0.272849 0.036347
+v -0.228567 -0.372573 0.290309
+v -0.190431 -0.286997 0.421917
+v -0.191039 -0.240973 0.507118
+v -0.287272 -0.276431 -0.065444
+v -0.295675 -0.280818 -0.174200
+v -0.399537 -0.313131 -0.376167
+v -0.392666 -0.488581 -0.427494
+v -0.331669 -0.570185 -0.466054
+v -0.282290 -0.618140 -0.589220
+v -0.374238 -0.594882 -0.323298
+v -0.381071 -0.629723 -0.350777
+v -0.382112 -0.624060 -0.221577
+v -0.272701 -0.566522 0.259157
+v -0.256702 -0.663406 0.286079
+v -0.280948 -0.428359 0.055790
+v -0.184974 -0.508894 0.326265
+v -0.279971 -0.526918 0.395319
+v -0.282599 -0.663393 0.412411
+v -0.188329 -0.475093 0.417954
+v -0.263384 -0.663396 0.466604
+v -0.209063 -0.663393 0.509344
+v -0.002044 -0.319624 0.553078
+v -0.001266 -0.371260 0.413296
+v -0.219753 -0.339762 -0.040921
+v -0.256986 -0.282511 -0.006349
+v -0.271706 -0.260881 0.001764
+v -0.091191 -0.419184 -0.045912
+v -0.114944 -0.429752 -0.124739
+v -0.113970 -0.382987 -0.188540
+v -0.243012 -0.464942 -0.242850
+v -0.314815 -0.505402 -0.324768
+v 0.002774 -0.437526 -0.262766
+v -0.072625 -0.417748 -0.221440
+v -0.160112 -0.476932 -0.293450
+v 0.003859 -0.453425 -0.443916
+v -0.120363 -0.581567 -0.438689
+v -0.091499 -0.584191 -0.294511
+v -0.116469 -0.599861 -0.188308
+v -0.208032 -0.513640 -0.134649
+v -0.235749 -0.610017 -0.040939
+v -0.344916 -0.622487 -0.085380
+v -0.336401 -0.531864 -0.212298
+v 0.001961 -0.459550 -0.135547
+v -0.058296 -0.430536 -0.043440
+v 0.001378 -0.449511 -0.037762
+v -0.130135 -0.510222 0.079144
+v 0.000142 -0.477549 0.157064
+v -0.114284 -0.453206 0.304397
+v -0.000592 -0.443558 0.285401
+v -0.056215 -0.663402 0.326073
+v -0.026248 -0.568010 0.273318
+v -0.049261 -0.531064 0.389854
+v -0.127096 -0.663398 0.479316
+v -0.058384 -0.663401 0.372891
+v -0.303961 0.054199 0.625921
+v -0.268594 0.193403 0.502766
+v -0.277159 0.126123 0.443289
+v -0.287605 -0.005722 0.531844
+v -0.231396 -0.121289 0.587387
+v -0.253475 -0.081797 0.756541
+v -0.195164 -0.137969 0.728011
+v -0.167673 -0.156573 0.609388
+v -0.145917 -0.169029 0.697600
+v -0.077776 -0.214247 0.622586
+v -0.076873 -0.214971 0.696301
+v -0.002341 -0.233135 0.622859
+v -0.002730 -0.213526 0.691267
+v -0.003136 -0.192628 0.762731
+v -0.056136 -0.201222 0.763806
+v -0.114589 -0.166192 0.770723
+v -0.155145 -0.129632 0.791738
+v -0.183611 -0.058705 0.847012
+v -0.165562 0.001980 0.833386
+v -0.220084 0.019914 0.768935
+v -0.255730 0.090306 0.670782
+v -0.255594 0.113833 0.663389
+v -0.226380 0.212655 0.617740
+v -0.003367 -0.195342 0.799680
+v -0.029743 -0.210508 0.827180
+v -0.003818 -0.194783 0.873636
+v -0.004116 -0.157907 0.931268
+v -0.031280 -0.184555 0.889476
+v -0.059885 -0.184448 0.841330
+v -0.135333 -0.164332 0.878200
+v -0.085574 -0.170948 0.925547
+v -0.163833 -0.094170 0.897114
+v -0.138444 -0.104250 0.945975
+v -0.083497 -0.084934 0.979607
+v -0.004433 -0.146642 0.985872
+v -0.150715 0.032650 0.884111
+v -0.135892 -0.035520 0.945455
+v -0.070612 0.036849 0.975733
+v -0.004458 -0.042526 1.015670
+v -0.004249 0.046042 1.003240
+v -0.086969 0.133224 0.947633
+v -0.003873 0.161605 0.970499
+v -0.125544 0.140012 0.917678
+v -0.125651 0.250246 0.857602
+v -0.003127 0.284070 0.878870
+v -0.159174 0.125726 0.888878
+v -0.183807 0.196970 0.844480
+v -0.159890 0.291736 0.732480
+v -0.199495 0.207230 0.779864
+v -0.206182 0.164608 0.693257
+v -0.186315 0.160689 0.817193
+v -0.192827 0.166706 0.782271
+v -0.175112 0.110008 0.860621
+v -0.161022 0.057420 0.855111
+v -0.172319 0.036155 0.816189
+v -0.190318 0.064083 0.760605
+v -0.195072 0.129179 0.731104
+v -0.203126 0.410287 0.680536
+v -0.216677 0.309274 0.642272
+v -0.241515 0.311485 0.587832
+v -0.002209 0.366663 0.749413
+v -0.088230 0.396265 0.678635
+v -0.170147 0.109517 0.840784
+v -0.160521 0.067766 0.830650
+v -0.181546 0.139805 0.812146
+v -0.180495 0.148568 0.776087
+v -0.180255 0.129125 0.744192
+v -0.186298 0.078308 0.769352
+v -0.167622 0.060539 0.806675
+v -0.189876 0.102760 0.802582
+v -0.108340 0.455446 0.657174
+v -0.241585 0.527592 0.669296
+v -0.265676 0.513366 0.634594
+v -0.203073 0.478550 0.581526
+v -0.266772 0.642330 0.602061
+v -0.216961 0.564846 0.535435
+v -0.202210 0.525495 0.475944
+v -0.193888 0.467925 0.520606
+v -0.265837 0.757267 0.500933
+v -0.240306 0.653440 0.463215
+v -0.309239 0.776868 0.304726
+v -0.271009 0.683094 0.382018
+v -0.312111 0.671099 0.286687
+v -0.268791 0.624342 0.377231
+v -0.302457 0.533996 0.360289
+v -0.263656 0.529310 0.412564
+v -0.282311 0.415167 0.447666
+v -0.239201 0.442096 0.495604
+v -0.220043 0.569026 0.445877
+v -0.001263 0.395631 0.602029
+v -0.057345 0.442535 0.572224
+v -0.088927 0.506333 0.529106
+v -0.125738 0.535076 0.612913
+v -0.126251 0.577170 0.483159
+v -0.149594 0.611520 0.557731
+v -0.163188 0.660791 0.491080
+v -0.172482 0.663387 0.415416
+v -0.160464 0.591710 0.370659
+v -0.156445 0.536396 0.378302
+v -0.136496 0.444358 0.425226
+v -0.095564 0.373768 0.473659
+v -0.104146 0.315912 0.498104
+v -0.000496 0.384194 0.473817
+v -0.000183 0.297770 0.401486
+v -0.129042 0.270145 0.434495
+v 0.000100 0.272963 0.349138
+v -0.113060 0.236984 0.385554
+v 0.007260 0.016311 -0.883396
+v 0.007865 0.122104 -0.956137
+v -0.032842 0.115282 -0.953252
+v -0.089115 0.108449 -0.950317
+v -0.047440 0.014729 -0.882756
+v -0.104458 0.013137 -0.882070
+v -0.086439 -0.584866 -0.608343
+v -0.115026 -0.662605 -0.436732
+v -0.071683 -0.665372 -0.606385
+v -0.257884 -0.665381 -0.658052
+v -0.272542 -0.665381 -0.592063
+v -0.371322 -0.665382 -0.353620
+v -0.372362 -0.665381 -0.224420
+v -0.335166 -0.665380 -0.078623
+v -0.225999 -0.665375 -0.038981
+v -0.106719 -0.665374 -0.186351
+v -0.081749 -0.665372 -0.292554
+v 0.006943 -0.091505 -0.858354
+v 0.006117 -0.280985 -0.769967
+v 0.004495 -0.502360 -0.559799
+v -0.198638 -0.302135 -0.845816
+v -0.237395 -0.542544 -0.587188
+v -0.270001 -0.279489 -0.669861
+v -0.134547 -0.119852 -0.959004
+v -0.052088 -0.122463 -0.944549
+v -0.124463 -0.293508 -0.899566
+v -0.047616 -0.289643 -0.879292
+v -0.168595 -0.529132 -0.654931
+v -0.099793 -0.515719 -0.645873
+v -0.186168 -0.605282 -0.724690
+v -0.112970 -0.583097 -0.707469
+v -0.108152 -0.665375 -0.700408
+v -0.183019 -0.665378 -0.717630
+v -0.349529 -0.334459 -0.511985
+v -0.141182 -0.437705 -0.798194
+v -0.212670 -0.448725 -0.737447
+v -0.261111 -0.414945 -0.613835
+v -0.077364 -0.431480 -0.778113
+v 0.005174 -0.425277 -0.651592
+v 0.089236 -0.431732 -0.777093
+v 0.271006 -0.415749 -0.610577
+v 0.223981 -0.449384 -0.734774
+v 0.153275 -0.438150 -0.796391
+v 0.358414 -0.335529 -0.507649
+v 0.193434 -0.665946 -0.715325
+v 0.118363 -0.665717 -0.699021
+v 0.123515 -0.583454 -0.706020
+v 0.196851 -0.605860 -0.722345
+v 0.109788 -0.516035 -0.644590
+v 0.178656 -0.529656 -0.652804
+v 0.061157 -0.289807 -0.878626
+v 0.138234 -0.293905 -0.897958
+v 0.066933 -0.122643 -0.943820
+v 0.149571 -0.120281 -0.957264
+v 0.280989 -0.280321 -0.666487
+v 0.246581 -0.543275 -0.584224
+v 0.211720 -0.302754 -0.843303
+v 0.086966 -0.665627 -0.291520
+v 0.110634 -0.665702 -0.185021
+v 0.228099 -0.666061 -0.036201
+v 0.337743 -0.666396 -0.074503
+v 0.376722 -0.666513 -0.219833
+v 0.377265 -0.666513 -0.349036
+v 0.281411 -0.666217 -0.588670
+v 0.267564 -0.666174 -0.654834
+v 0.080745 -0.665602 -0.605452
+v 0.122016 -0.662963 -0.435280
+v 0.095767 -0.585141 -0.607228
+v 0.118944 0.012799 -0.880702
+v 0.061944 0.014564 -0.882086
+v 0.104725 0.108156 -0.949130
+v 0.048513 0.115159 -0.952753
+v 0.112696 0.236643 0.386937
+v 0.128177 0.269757 0.436071
+v 0.102643 0.315600 0.499370
+v 0.094535 0.373481 0.474824
+v 0.136270 0.443946 0.426895
+v 0.157071 0.535923 0.380222
+v 0.161350 0.591224 0.372630
+v 0.173035 0.662865 0.417531
+v 0.162808 0.660299 0.493077
+v 0.148250 0.611070 0.559555
+v 0.125719 0.576790 0.484702
+v 0.123489 0.534699 0.614440
+v 0.087621 0.506066 0.530188
+v 0.055321 0.442365 0.572915
+v 0.219936 0.568361 0.448571
+v 0.238099 0.441375 0.498528
+v 0.281711 0.414315 0.451121
+v 0.263833 0.528513 0.415794
+v 0.303284 0.533081 0.363998
+v 0.269687 0.623528 0.380528
+v 0.314255 0.670153 0.290524
+v 0.272023 0.682273 0.385343
+v 0.311480 0.775931 0.308527
+v 0.240239 0.652714 0.466159
+v 0.265619 0.756464 0.504187
+v 0.192562 0.467341 0.522972
+v 0.201605 0.524885 0.478417
+v 0.215743 0.564193 0.538084
+v 0.264969 0.641527 0.605317
+v 0.201031 0.477940 0.584002
+v 0.263086 0.512567 0.637832
+v 0.238615 0.526867 0.672237
+v 0.105309 0.455123 0.658482
+v 0.183993 0.102195 0.804872
+v 0.161563 0.060042 0.808692
+v 0.180748 0.077754 0.771600
+v 0.175168 0.128588 0.746368
+v 0.175075 0.148030 0.778264
+v 0.175658 0.139265 0.814333
+v 0.154191 0.067291 0.832578
+v 0.163818 0.109013 0.842830
+v 0.084760 0.396004 0.679695
+v 0.238888 0.310760 0.590775
+v 0.213380 0.308625 0.644905
+v 0.199666 0.409678 0.683003
+v 0.190143 0.128597 0.733463
+v 0.184833 0.063516 0.762902
+v 0.166070 0.035644 0.818261
+v 0.154361 0.056943 0.857042
+v 0.168542 0.109489 0.862725
+v 0.187387 0.166131 0.784599
+v 0.180428 0.160135 0.819438
+v 0.201823 0.163991 0.695756
+v 0.194206 0.206635 0.782275
+v 0.155438 0.291260 0.734412
+v 0.177696 0.196424 0.846693
+v 0.152305 0.125256 0.890786
+v 0.119546 0.249876 0.859104
+v 0.118369 0.139643 0.919173
+v 0.079410 0.132973 0.948652
+v 0.062419 0.036648 0.976547
+v 0.127847 -0.035919 0.947070
+v 0.143624 0.032206 0.885913
+v 0.074888 -0.085173 0.980577
+v 0.130184 -0.104656 0.947620
+v 0.156201 -0.094653 0.899074
+v 0.077366 -0.171194 0.926545
+v 0.127722 -0.164729 0.879810
+v 0.052670 -0.184618 0.842019
+v 0.023477 -0.184638 0.889811
+v 0.022626 -0.210587 0.827500
+v 0.223089 0.211976 0.620493
+v 0.251444 0.113067 0.666494
+v 0.251419 0.089540 0.673887
+v 0.214360 0.019258 0.771595
+v 0.158999 0.001490 0.835374
+v 0.176696 -0.059249 0.849218
+v 0.148696 -0.130091 0.793599
+v 0.108290 -0.166528 0.772088
+v 0.049820 -0.201382 0.764454
+v 0.071341 -0.215195 0.697209
+v 0.073148 -0.214475 0.623510
+v 0.140502 -0.169461 0.699354
+v 0.163374 -0.157073 0.611416
+v 0.189466 -0.138550 0.730366
+v 0.247593 -0.082554 0.759610
+v 0.227468 -0.121982 0.590197
+v 0.284702 -0.006586 0.535347
+v 0.275741 0.125287 0.446676
+v 0.266650 0.192594 0.506044
+v 0.300086 0.053287 0.629620
+v 0.055450 -0.663935 0.375065
+v 0.122854 -0.664138 0.482323
+v 0.046520 -0.531571 0.391918
+v 0.024824 -0.568450 0.275106
+v 0.053855 -0.663931 0.328224
+v 0.112829 -0.453549 0.305788
+v 0.131265 -0.510617 0.080746
+v 0.061174 -0.430716 -0.042710
+v 0.341019 -0.532887 -0.208150
+v 0.347705 -0.623533 -0.081139
+v 0.238040 -0.610732 -0.038037
+v 0.211764 -0.514274 -0.132078
+v 0.120605 -0.600219 -0.186856
+v 0.096985 -0.584476 -0.293357
+v 0.127621 -0.581941 -0.437170
+v 0.165902 -0.477425 -0.291453
+v 0.077720 -0.417975 -0.220519
+v 0.320892 -0.506363 -0.320874
+v 0.248214 -0.465684 -0.239842
+v 0.118764 -0.383338 -0.187114
+v 0.118816 -0.430106 -0.123307
+v 0.094131 -0.419464 -0.044777
+v 0.274526 -0.261706 0.005110
+v 0.259842 -0.283292 -0.003185
+v 0.222861 -0.340431 -0.038210
+v 0.204445 -0.664380 0.513353
+v 0.259286 -0.664547 0.471281
+v 0.185402 -0.476020 0.421718
+v 0.279163 -0.664604 0.417328
+v 0.277157 -0.528122 0.400208
+v 0.183069 -0.509812 0.329995
+v 0.282599 -0.429210 0.059242
+v 0.254816 -0.664541 0.290687
+v 0.271436 -0.567707 0.263966
+v 0.386561 -0.625221 -0.216870
+v 0.387086 -0.630883 -0.346073
+v 0.380021 -0.596021 -0.318679
+v 0.291269 -0.619007 -0.585707
+v 0.339280 -0.571198 -0.461946
+v 0.400045 -0.489778 -0.422640
+v 0.406817 -0.314349 -0.371230
+v 0.300588 -0.281718 -0.170549
+v 0.290866 -0.277304 -0.061905
+v 0.187735 -0.241545 0.509437
+v 0.188032 -0.287569 0.424234
+v 0.227520 -0.373262 0.293102
+v 0.266526 -0.273650 0.039597
+v 0.291592 -0.291676 0.111386
+v 0.291914 -0.122741 0.422683
+v 0.297574 -0.156119 0.373368
+v 0.286603 -0.232731 0.027162
+v 0.364663 -0.201399 0.206850
+v 0.353855 -0.132408 0.149228
+v 0.282208 -0.019715 0.314960
+v 0.331187 -0.099266 0.092701
+v 0.375463 -0.093120 -0.006467
+v 0.375917 -0.101236 -0.154882
+v 0.466635 -0.094416 -0.305669
+v 0.455805 -0.119881 -0.460632
+v 0.277465 -0.604242 -0.651871
+v 0.261022 -0.551176 -0.554667
+v 0.093627 0.258494 -0.920589
+v 0.114248 0.310608 -0.798070
+v 0.144232 0.211434 -0.835001
+v 0.119916 0.176940 -0.951159
+v 0.184061 0.101854 -0.918220
+v 0.092431 0.276521 -0.738231
+v 0.133504 0.218403 -0.758602
+v 0.194987 0.097655 -0.812476
+v 0.185542 0.011005 -0.879202
+v 0.230315 -0.127450 -0.884202
+v 0.260471 0.255056 -0.624378
+v 0.351567 -0.042194 -0.663976
+v 0.253742 0.323524 -0.433716
+v 0.411612 0.132299 -0.438264
+v 0.270513 0.356530 -0.289984
+v 0.422146 0.162819 -0.273130
+v 0.164724 0.237490 0.208912
+v 0.253806 0.092900 0.240640
+v 0.203608 0.284597 0.096223
+v 0.241006 0.343093 -0.171396
+v 0.356076 0.149288 -0.143443
+v 0.337656 0.131992 0.066374
+f 127 135 134
+f 343 139 135
+f 134 135 139
+f 127 343 135
+f 313 317 318
+f 170 164 163
+f 313 318 320
+f 313 320 319
+f 170 163 165
+f 170 169 164
+f 313 315 316
+f 170 165 166
+f 170 168 169
+f 313 316 317
+f 313 314 315
+f 170 166 167
+f 170 167 168
+f 313 319 314
+f 309 305 306
+f 309 306 307
+f 180 182 189
+f 178 174 176
+f 178 176 177
+f 303 294 301
+f 323 295 305
+f 189 177 176
+f 189 176 180
+f 159 178 188
+f 306 294 303
+f 306 303 307
+f 323 305 309
+f 189 182 184
+f 159 174 178
+f 294 299 301
+f 305 295 297
+f 305 297 306
+f 186 177 189
+f 186 189 184
+f 188 178 177
+f 188 177 186
+f 306 297 299
+f 306 299 294
+f 437 433 29
+f 437 29 24
+f 435 279 432
+f 31 209 210
+f 31 210 30
+f 20 21 25
+f 209 208 212
+f 209 212 210
+f 20 211 213
+f 434 435 432
+f 434 432 433
+f 434 433 437
+f 437 438 434
+f 278 276 277
+f 278 277 279
+f 210 211 26
+f 22 27 25
+f 22 25 21
+f 26 27 28
+f 26 28 30
+f 436 440 278
+f 440 276 278
+f 433 432 31
+f 433 31 29
+f 434 438 439
+f 434 439 436
+f 435 278 279
+f 25 26 211
+f 25 27 26
+f 30 28 29
+f 30 29 31
+f 20 25 211
+f 209 31 432
+f 209 432 279
+f 436 435 434
+f 436 278 435
+f 26 30 210
+f 28 23 24
+f 28 24 29
+f 27 23 28
+f 27 22 23
+f 213 211 210
+f 213 210 212
+f 208 209 279
+f 208 279 277
+f 440 436 439
+f 13 10 11
+f 13 11 14
+f 3 4 6
+f 3 6 5
+f 17 14 15
+f 17 15 18
+f 23 22 17
+f 14 11 12
+f 14 12 15
+f 2 1 4
+f 2 4 3
+f 16 13 17
+f 20 19 16
+f 20 16 17
+f 20 17 21
+f 10 2 3
+f 10 3 11
+f 4 8 9
+f 4 9 6
+f 17 18 24
+f 17 24 23
+f 22 21 17
+f 11 3 5
+f 11 5 12
+f 1 7 8
+f 1 8 4
+f 13 14 17
+f 452 447 446
+f 452 446 451
+f 443 441 440
+f 443 440 439
+f 443 439 442
+f 422 421 423
+f 413 412 427
+f 413 427 426
+f 409 406 408
+f 414 68 69
+f 414 69 415
+f 392 391 413
+f 81 385 387
+f 405 407 379
+f 391 392 378
+f 391 378 89
+f 401 416 376
+f 399 397 396
+f 399 396 372
+f 399 372 371
+f 113 360 359
+f 113 359 114
+f 352 353 370
+f 126 350 349
+f 346 344 343
+f 343 341 340
+f 342 336 338
+f 329 342 328
+f 332 324 334
+f 332 323 324
+f 328 319 320
+f 328 320 329
+f 316 315 325
+f 303 301 302
+f 303 302 304
+f 321 312 293
+f 286 285 290
+f 311 308 289
+f 311 289 291
+f 322 351 282
+f 322 282 283
+f 424 449 368
+f 273 274 385
+f 273 385 275
+f 265 266 383
+f 265 383 384
+f 441 443 262
+f 441 262 264
+f 253 254 255
+f 253 255 252
+f 263 257 250
+f 263 250 249
+f 229 244 243
+f 229 32 244
+f 214 216 239
+f 214 239 238
+f 20 213 231
+f 225 226 234
+f 225 234 232
+f 218 219 57
+f 218 57 55
+f 218 217 240
+f 218 240 239
+f 218 239 216
+f 219 218 216
+f 219 216 215
+f 7 103 207
+f 187 200 201
+f 198 183 181
+f 171 172 158
+f 202 201 190
+f 171 191 192
+f 171 192 193
+f 176 175 179
+f 176 179 180
+f 169 168 156
+f 123 150 159
+f 123 159 160
+f 136 154 155
+f 136 155 119
+f 144 141 142
+f 144 142 145
+f 133 134 137
+f 131 127 134
+f 125 126 128
+f 123 102 101
+f 123 101 122
+f 111 109 108
+f 111 108 110
+f 99 100 98
+f 99 98 65
+f 99 65 67
+f 88 56 58
+f 84 83 80
+f 84 80 85
+f 79 75 51
+f 50 72 42
+f 50 42 38
+f 50 38 37
+f 59 45 61
+f 61 60 59
+f 52 35 34
+f 40 41 43
+f 40 43 39
+f 244 241 34
+f 244 34 230
+f 40 39 7
+f 45 47 41
+f 56 57 58
+f 65 63 66
+f 65 66 67
+f 42 72 46
+f 76 51 52
+f 82 80 83
+f 78 89 74
+f 94 93 95
+f 69 48 47
+f 97 98 100
+f 97 100 96
+f 111 110 112
+f 112 113 111
+f 115 114 124
+f 115 124 125
+f 133 132 130
+f 134 138 137
+f 136 143 146
+f 146 153 136
+f 150 148 158
+f 158 159 150
+f 165 151 152
+f 154 164 169
+f 154 169 155
+f 186 184 183
+f 186 183 185
+f 162 190 191
+f 201 200 192
+f 201 192 191
+f 181 179 196
+f 181 196 197
+f 103 102 205
+f 103 205 207
+f 44 49 105
+f 44 105 104
+f 217 218 55
+f 217 55 33
+f 208 225 232
+f 231 213 212
+f 231 212 232
+f 228 233 242
+f 228 242 243
+f 236 235 242
+f 236 242 245
+f 431 249 248
+f 273 275 254
+f 273 254 253
+f 440 261 276
+f 226 225 260
+f 226 260 258
+f 270 271 408
+f 270 408 406
+f 271 270 274
+f 271 274 273
+f 274 270 269
+f 274 269 268
+f 274 268 267
+f 274 267 266
+f 274 266 265
+f 449 280 368
+f 282 351 369
+f 286 287 302
+f 291 324 311
+f 291 312 324
+f 283 282 190
+f 293 312 291
+f 293 291 292
+f 308 307 303
+f 308 303 304
+f 317 316 325
+f 317 325 330
+f 332 352 351
+f 331 335 336
+f 331 336 329
+f 342 338 339
+f 345 356 355
+f 347 346 349
+f 347 349 348
+f 365 370 353
+f 365 353 354
+f 366 364 362
+f 366 362 363
+f 377 402 403
+f 374 373 398
+f 374 398 401
+f 377 93 378
+f 382 379 388
+f 382 388 386
+f 387 78 81
+f 391 390 413
+f 417 418 402
+f 404 418 416
+f 409 430 431
+f 420 424 419
+f 428 429 445
+f 428 445 447
+f 438 437 442
+f 451 446 12
+f 451 12 5
+f 448 450 6
+f 448 6 9
+f 442 439 438
+f 426 427 452
+f 426 452 453
+f 418 422 416
+f 409 408 430
+f 400 404 401
+f 400 401 398
+f 395 394 417
+f 390 412 413
+f 387 384 386
+f 409 388 379
+f 409 379 407
+f 378 392 377
+f 95 376 416
+f 373 374 375
+f 373 375 371
+f 360 112 361
+f 360 113 112
+f 114 359 350
+f 114 350 124
+f 347 344 346
+f 344 341 343
+f 339 337 145
+f 339 145 142
+f 328 342 355
+f 328 355 327
+f 332 351 322
+f 332 322 323
+f 315 314 327
+f 315 327 326
+f 301 299 300
+f 301 300 302
+f 289 288 290
+f 190 293 283
+f 288 289 304
+f 285 286 298
+f 369 281 282
+f 449 448 280
+f 275 227 256
+f 268 269 405
+f 268 405 380
+f 430 263 431
+f 440 441 261
+f 258 259 250
+f 258 250 247
+f 431 263 249
+f 235 229 243
+f 235 243 242
+f 238 239 240
+f 238 240 237
+f 16 19 228
+f 16 228 230
+f 223 224 83
+f 223 83 84
+f 215 216 214
+f 215 214 82
+f 39 103 7
+f 123 160 201
+f 123 201 202
+f 175 172 193
+f 175 193 195
+f 198 194 199
+f 191 171 162
+f 182 180 179
+f 182 179 181
+f 167 157 156
+f 164 154 153
+f 164 153 163
+f 121 157 150
+f 121 150 122
+f 153 154 136
+f 141 144 143
+f 136 132 133
+f 136 133 137
+f 131 130 129
+f 131 129 128
+f 101 106 120
+f 101 120 121
+f 107 105 108
+f 107 108 109
+f 92 96 60
+f 94 95 69
+f 92 90 93
+f 77 54 56
+f 77 56 88
+f 82 79 80
+f 75 74 50
+f 70 61 46
+f 59 63 65
+f 59 65 62
+f 54 32 33
+f 33 55 54
+f 43 44 39
+f 36 37 1
+f 36 1 2
+f 35 36 2
+f 35 2 10
+f 45 41 42
+f 45 42 46
+f 34 241 52
+f 64 63 59
+f 64 59 60
+f 46 72 71
+f 77 76 52
+f 77 52 53
+f 87 86 85
+f 87 85 88
+f 90 73 74
+f 90 74 89
+f 92 93 97
+f 92 97 96
+f 73 92 61
+f 73 61 70
+f 105 107 106
+f 120 106 118
+f 120 118 119
+f 125 128 129
+f 118 117 130
+f 118 130 132
+f 119 118 132
+f 136 141 143
+f 147 151 153
+f 147 153 146
+f 150 123 122
+f 167 166 152
+f 167 152 157
+f 159 173 174
+f 162 161 190
+f 200 199 194
+f 200 194 192
+f 205 202 203
+f 179 175 195
+f 201 160 187
+f 110 49 68
+f 49 108 105
+f 217 33 237
+f 217 237 240
+f 224 215 82
+f 224 82 83
+f 34 13 16
+f 33 229 235
+f 33 235 237
+f 241 32 53
+f 257 256 247
+f 257 247 250
+f 259 264 249
+f 259 249 250
+f 276 261 260
+f 276 260 277
+f 208 277 260
+f 271 272 430
+f 271 430 408
+f 414 419 367
+f 414 367 366
+f 369 368 280
+f 369 280 281
+f 304 302 287
+f 304 287 288
+f 284 283 293
+f 284 293 292
+f 321 293 190
+f 299 297 298
+f 299 298 300
+f 319 328 327
+f 319 327 314
+f 330 331 318
+f 337 334 321
+f 327 355 354
+f 335 333 334
+f 335 334 337
+f 343 340 140
+f 343 140 139
+f 346 343 127
+f 348 358 357
+f 370 369 352
+f 364 357 358
+f 364 358 362
+f 367 368 369
+f 367 369 370
+f 376 374 401
+f 93 91 378
+f 410 388 409
+f 387 386 388
+f 387 388 389
+f 413 395 392
+f 397 399 400
+f 409 407 406
+f 416 422 420
+f 416 420 415
+f 426 453 449
+f 426 449 425
+f 445 442 444
+f 449 453 450
+f 449 450 448
+f 447 445 444
+f 447 444 446
+f 251 248 262
+f 251 262 429
+f 422 423 424
+f 422 424 420
+f 428 411 251
+f 418 404 402
+f 404 403 402
+f 421 393 413
+f 421 413 426
+f 421 426 425
+f 387 412 390
+f 384 383 382
+f 384 382 386
+f 379 380 405
+f 373 372 396
+f 373 396 398
+f 372 373 371
+f 362 360 361
+f 362 361 363
+f 369 351 352
+f 350 348 349
+f 357 356 345
+f 357 345 347
+f 345 342 341
+f 345 341 344
+f 339 338 337
+f 329 336 342
+f 325 353 352
+f 325 352 332
+f 321 145 337
+f 315 326 325
+f 323 309 310
+f 311 310 308
+f 288 287 290
+f 204 281 280
+f 204 280 206
+f 298 296 284
+f 298 284 285
+f 448 206 280
+f 275 385 81
+f 275 81 227
+f 267 268 380
+f 267 380 381
+f 226 258 247
+f 226 247 246
+f 257 255 254
+f 257 254 256
+f 431 248 251
+f 227 236 245
+f 227 245 246
+f 233 234 245
+f 233 245 242
+f 231 19 20
+f 33 32 229
+f 220 221 87
+f 220 87 58
+f 227 214 236
+f 207 8 7
+f 123 202 102
+f 202 205 102
+f 181 197 198
+f 171 193 172
+f 201 191 190
+f 195 194 196
+f 184 182 181
+f 184 181 183
+f 156 155 169
+f 150 157 152
+f 150 152 149
+f 156 157 121
+f 146 143 144
+f 146 144 147
+f 137 138 141
+f 134 133 131
+f 129 130 117
+f 101 121 122
+f 111 113 114
+f 111 114 115
+f 67 66 64
+f 67 64 100
+f 67 100 99
+f 97 47 62
+f 90 89 91
+f 87 88 58
+f 81 79 82
+f 73 70 50
+f 68 49 48
+f 68 48 69
+f 57 56 54
+f 51 50 37
+f 51 37 36
+f 41 40 42
+f 243 244 230
+f 243 230 228
+f 7 38 40
+f 43 48 49
+f 43 49 44
+f 62 47 45
+f 46 71 70
+f 70 71 72
+f 70 72 50
+f 75 79 78
+f 84 85 86
+f 74 75 78
+f 94 97 93
+f 69 47 94
+f 96 100 64
+f 96 64 60
+f 116 109 111
+f 116 111 115
+f 126 127 128
+f 130 131 133
+f 138 134 139
+f 138 139 140
+f 149 147 144
+f 149 144 148
+f 120 119 155
+f 162 148 144
+f 166 165 152
+f 159 158 172
+f 159 172 173
+f 160 159 188
+f 160 188 187
+f 195 193 192
+f 195 192 194
+f 190 203 202
+f 183 198 185
+f 206 9 8
+f 49 110 108
+f 219 220 58
+f 219 58 57
+f 208 232 212
+f 233 231 232
+f 233 232 234
+f 54 53 32
+f 389 412 387
+f 410 431 251
+f 263 430 255
+f 263 255 257
+f 443 445 429
+f 274 265 384
+f 274 384 385
+f 430 272 252
+f 430 252 255
+f 414 366 363
+f 68 414 361
+f 283 284 296
+f 286 302 300
+f 203 282 281
+f 285 284 292
+f 285 292 290
+f 321 190 161
+f 309 307 308
+f 308 310 309
+f 320 318 331
+f 320 331 329
+f 354 353 325
+f 333 332 334
+f 341 342 339
+f 355 342 345
+f 350 359 358
+f 350 358 348
+f 365 356 357
+f 365 357 364
+f 365 366 367
+f 365 367 370
+f 375 377 403
+f 376 93 374
+f 78 390 391
+f 383 381 382
+f 390 78 387
+f 394 395 413
+f 394 413 393
+f 402 395 417
+f 416 401 404
+f 412 411 428
+f 412 428 427
+f 423 421 425
+f 248 249 264
+f 248 264 262
+f 446 444 15
+f 446 15 12
+f 450 451 5
+f 450 5 6
+f 444 442 18
+f 444 18 15
+f 437 24 18
+f 437 18 442
+f 425 449 423
+f 449 424 423
+f 415 420 419
+f 415 419 414
+f 407 405 406
+f 400 398 396
+f 400 396 397
+f 421 417 393
+f 389 411 412
+f 387 385 384
+f 391 89 78
+f 376 95 93
+f 416 415 69
+f 416 69 95
+f 371 375 403
+f 371 403 399
+f 362 358 359
+f 362 359 360
+f 126 349 127
+f 347 345 344
+f 341 339 340
+f 338 336 335
+f 338 335 337
+f 326 354 325
+f 325 332 333
+f 325 333 330
+f 324 323 310
+f 324 310 311
+f 295 296 298
+f 295 298 297
+f 290 287 286
+f 203 281 204
+f 289 308 304
+f 283 296 322
+f 68 361 112
+f 419 424 368
+f 419 368 367
+f 273 253 252
+f 273 252 272
+f 273 272 271
+f 256 254 275
+f 266 267 381
+f 266 381 383
+f 443 429 262
+f 441 264 259
+f 441 259 261
+f 410 251 411
+f 256 227 246
+f 256 246 247
+f 32 241 244
+f 237 235 236
+f 237 236 238
+f 234 226 246
+f 234 246 245
+f 221 222 86
+f 221 86 87
+f 82 214 227
+f 82 227 81
+f 8 207 206
+f 187 185 199
+f 187 199 200
+f 205 204 206
+f 205 206 207
+f 196 194 197
+f 172 175 173
+f 174 175 176
+f 174 173 175
+f 156 168 167
+f 161 162 144
+f 161 144 145
+f 120 155 156
+f 149 152 151
+f 149 151 147
+f 141 138 140
+f 141 140 142
+f 128 127 131
+f 115 125 129
+f 115 129 116
+f 118 106 107
+f 118 107 117
+f 105 106 101
+f 105 101 104
+f 60 61 92
+f 98 97 62
+f 98 62 65
+f 92 73 90
+f 88 85 80
+f 88 80 77
+f 79 81 78
+f 50 51 75
+f 61 45 46
+f 62 45 59
+f 52 51 36
+f 52 36 35
+f 40 38 42
+f 34 35 10
+f 34 10 13
+f 1 37 38
+f 1 38 7
+f 41 47 48
+f 41 48 43
+f 54 55 57
+f 66 63 64
+f 73 50 74
+f 80 79 76
+f 80 76 77
+f 53 54 77
+f 93 90 91
+f 97 94 47
+f 103 104 101
+f 103 101 102
+f 117 107 109
+f 117 109 116
+f 124 126 125
+f 117 116 129
+f 119 132 136
+f 141 136 137
+f 149 148 150
+f 121 120 156
+f 165 163 153
+f 165 153 151
+f 158 148 162
+f 158 162 171
+f 187 188 186
+f 187 186 185
+f 194 198 197
+f 203 204 205
+f 195 196 179
+f 199 185 198
+f 68 112 110
+f 39 44 104
+f 39 104 103
+f 215 224 223
+f 215 223 222
+f 215 222 221
+f 215 221 220
+f 215 220 219
+f 214 238 236
+f 222 223 84
+f 222 84 86
+f 16 230 34
+f 228 19 231
+f 228 231 233
+f 53 52 241
+f 76 79 51
+f 409 431 410
+f 261 259 258
+f 261 258 260
+f 225 208 260
+f 269 270 406
+f 269 406 405
+f 414 363 361
+f 448 9 206
+f 300 298 286
+f 190 282 203
+f 291 289 290
+f 291 290 292
+f 323 322 296
+f 323 296 295
+f 334 324 312
+f 334 312 321
+f 318 317 330
+f 321 161 145
+f 354 326 327
+f 330 333 335
+f 330 335 331
+f 340 339 142
+f 340 142 140
+f 349 346 127
+f 348 357 347
+f 124 350 126
+f 365 354 355
+f 365 355 356
+f 366 365 364
+f 377 392 395
+f 377 395 402
+f 93 377 375
+f 93 375 374
+f 378 91 89
+f 381 380 379
+f 381 379 382
+f 389 388 410
+f 389 410 411
+f 417 394 393
+f 400 399 403
+f 400 403 404
+f 251 429 428
+f 422 418 417
+f 422 417 421
+f 427 428 447
+f 427 447 452
+f 445 443 442
+f 453 452 451
+f 453 451 450
+