Important changes to repositories hosted on mbed.com
Mbed hosted mercurial repositories are deprecated and are due to be permanently deleted in July 2026.
To keep a copy of this software download the repository Zip archive or clone locally using Mercurial.
It is also possible to export all your personal repositories from the account settings page.
Dependents: MPC_current_control HydraulicControlBoard_SW AHRS Test_ekf ... more
src/Eigenvalues/SelfAdjointEigenSolver.h@0:13a5d365ba16, 2016-10-13 (annotated)
- Committer:
- ykuroda
- Date:
- Thu Oct 13 04:07:23 2016 +0000
- Revision:
- 0:13a5d365ba16
First commint, Eigne Matrix Class Library
Who changed what in which revision?
| User | Revision | Line number | New contents of line |
|---|---|---|---|
| ykuroda | 0:13a5d365ba16 | 1 | // This file is part of Eigen, a lightweight C++ template library |
| ykuroda | 0:13a5d365ba16 | 2 | // for linear algebra. |
| ykuroda | 0:13a5d365ba16 | 3 | // |
| ykuroda | 0:13a5d365ba16 | 4 | // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr> |
| ykuroda | 0:13a5d365ba16 | 5 | // Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk> |
| ykuroda | 0:13a5d365ba16 | 6 | // |
| ykuroda | 0:13a5d365ba16 | 7 | // This Source Code Form is subject to the terms of the Mozilla |
| ykuroda | 0:13a5d365ba16 | 8 | // Public License v. 2.0. If a copy of the MPL was not distributed |
| ykuroda | 0:13a5d365ba16 | 9 | // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. |
| ykuroda | 0:13a5d365ba16 | 10 | |
| ykuroda | 0:13a5d365ba16 | 11 | #ifndef EIGEN_SELFADJOINTEIGENSOLVER_H |
| ykuroda | 0:13a5d365ba16 | 12 | #define EIGEN_SELFADJOINTEIGENSOLVER_H |
| ykuroda | 0:13a5d365ba16 | 13 | |
| ykuroda | 0:13a5d365ba16 | 14 | #include "./Tridiagonalization.h" |
| ykuroda | 0:13a5d365ba16 | 15 | |
| ykuroda | 0:13a5d365ba16 | 16 | namespace Eigen { |
| ykuroda | 0:13a5d365ba16 | 17 | |
| ykuroda | 0:13a5d365ba16 | 18 | template<typename _MatrixType> |
| ykuroda | 0:13a5d365ba16 | 19 | class GeneralizedSelfAdjointEigenSolver; |
| ykuroda | 0:13a5d365ba16 | 20 | |
| ykuroda | 0:13a5d365ba16 | 21 | namespace internal { |
| ykuroda | 0:13a5d365ba16 | 22 | template<typename SolverType,int Size,bool IsComplex> struct direct_selfadjoint_eigenvalues; |
| ykuroda | 0:13a5d365ba16 | 23 | } |
| ykuroda | 0:13a5d365ba16 | 24 | |
| ykuroda | 0:13a5d365ba16 | 25 | /** \eigenvalues_module \ingroup Eigenvalues_Module |
| ykuroda | 0:13a5d365ba16 | 26 | * |
| ykuroda | 0:13a5d365ba16 | 27 | * |
| ykuroda | 0:13a5d365ba16 | 28 | * \class SelfAdjointEigenSolver |
| ykuroda | 0:13a5d365ba16 | 29 | * |
| ykuroda | 0:13a5d365ba16 | 30 | * \brief Computes eigenvalues and eigenvectors of selfadjoint matrices |
| ykuroda | 0:13a5d365ba16 | 31 | * |
| ykuroda | 0:13a5d365ba16 | 32 | * \tparam _MatrixType the type of the matrix of which we are computing the |
| ykuroda | 0:13a5d365ba16 | 33 | * eigendecomposition; this is expected to be an instantiation of the Matrix |
| ykuroda | 0:13a5d365ba16 | 34 | * class template. |
| ykuroda | 0:13a5d365ba16 | 35 | * |
| ykuroda | 0:13a5d365ba16 | 36 | * A matrix \f$ A \f$ is selfadjoint if it equals its adjoint. For real |
| ykuroda | 0:13a5d365ba16 | 37 | * matrices, this means that the matrix is symmetric: it equals its |
| ykuroda | 0:13a5d365ba16 | 38 | * transpose. This class computes the eigenvalues and eigenvectors of a |
| ykuroda | 0:13a5d365ba16 | 39 | * selfadjoint matrix. These are the scalars \f$ \lambda \f$ and vectors |
| ykuroda | 0:13a5d365ba16 | 40 | * \f$ v \f$ such that \f$ Av = \lambda v \f$. The eigenvalues of a |
| ykuroda | 0:13a5d365ba16 | 41 | * selfadjoint matrix are always real. If \f$ D \f$ is a diagonal matrix with |
| ykuroda | 0:13a5d365ba16 | 42 | * the eigenvalues on the diagonal, and \f$ V \f$ is a matrix with the |
| ykuroda | 0:13a5d365ba16 | 43 | * eigenvectors as its columns, then \f$ A = V D V^{-1} \f$ (for selfadjoint |
| ykuroda | 0:13a5d365ba16 | 44 | * matrices, the matrix \f$ V \f$ is always invertible). This is called the |
| ykuroda | 0:13a5d365ba16 | 45 | * eigendecomposition. |
| ykuroda | 0:13a5d365ba16 | 46 | * |
| ykuroda | 0:13a5d365ba16 | 47 | * The algorithm exploits the fact that the matrix is selfadjoint, making it |
| ykuroda | 0:13a5d365ba16 | 48 | * faster and more accurate than the general purpose eigenvalue algorithms |
| ykuroda | 0:13a5d365ba16 | 49 | * implemented in EigenSolver and ComplexEigenSolver. |
| ykuroda | 0:13a5d365ba16 | 50 | * |
| ykuroda | 0:13a5d365ba16 | 51 | * Only the \b lower \b triangular \b part of the input matrix is referenced. |
| ykuroda | 0:13a5d365ba16 | 52 | * |
| ykuroda | 0:13a5d365ba16 | 53 | * Call the function compute() to compute the eigenvalues and eigenvectors of |
| ykuroda | 0:13a5d365ba16 | 54 | * a given matrix. Alternatively, you can use the |
| ykuroda | 0:13a5d365ba16 | 55 | * SelfAdjointEigenSolver(const MatrixType&, int) constructor which computes |
| ykuroda | 0:13a5d365ba16 | 56 | * the eigenvalues and eigenvectors at construction time. Once the eigenvalue |
| ykuroda | 0:13a5d365ba16 | 57 | * and eigenvectors are computed, they can be retrieved with the eigenvalues() |
| ykuroda | 0:13a5d365ba16 | 58 | * and eigenvectors() functions. |
| ykuroda | 0:13a5d365ba16 | 59 | * |
| ykuroda | 0:13a5d365ba16 | 60 | * The documentation for SelfAdjointEigenSolver(const MatrixType&, int) |
| ykuroda | 0:13a5d365ba16 | 61 | * contains an example of the typical use of this class. |
| ykuroda | 0:13a5d365ba16 | 62 | * |
| ykuroda | 0:13a5d365ba16 | 63 | * To solve the \em generalized eigenvalue problem \f$ Av = \lambda Bv \f$ and |
| ykuroda | 0:13a5d365ba16 | 64 | * the likes, see the class GeneralizedSelfAdjointEigenSolver. |
| ykuroda | 0:13a5d365ba16 | 65 | * |
| ykuroda | 0:13a5d365ba16 | 66 | * \sa MatrixBase::eigenvalues(), class EigenSolver, class ComplexEigenSolver |
| ykuroda | 0:13a5d365ba16 | 67 | */ |
| ykuroda | 0:13a5d365ba16 | 68 | template<typename _MatrixType> class SelfAdjointEigenSolver |
| ykuroda | 0:13a5d365ba16 | 69 | { |
| ykuroda | 0:13a5d365ba16 | 70 | public: |
| ykuroda | 0:13a5d365ba16 | 71 | |
| ykuroda | 0:13a5d365ba16 | 72 | typedef _MatrixType MatrixType; |
| ykuroda | 0:13a5d365ba16 | 73 | enum { |
| ykuroda | 0:13a5d365ba16 | 74 | Size = MatrixType::RowsAtCompileTime, |
| ykuroda | 0:13a5d365ba16 | 75 | ColsAtCompileTime = MatrixType::ColsAtCompileTime, |
| ykuroda | 0:13a5d365ba16 | 76 | Options = MatrixType::Options, |
| ykuroda | 0:13a5d365ba16 | 77 | MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime |
| ykuroda | 0:13a5d365ba16 | 78 | }; |
| ykuroda | 0:13a5d365ba16 | 79 | |
| ykuroda | 0:13a5d365ba16 | 80 | /** \brief Scalar type for matrices of type \p _MatrixType. */ |
| ykuroda | 0:13a5d365ba16 | 81 | typedef typename MatrixType::Scalar Scalar; |
| ykuroda | 0:13a5d365ba16 | 82 | typedef typename MatrixType::Index Index; |
| ykuroda | 0:13a5d365ba16 | 83 | |
| ykuroda | 0:13a5d365ba16 | 84 | typedef Matrix<Scalar,Size,Size,ColMajor,MaxColsAtCompileTime,MaxColsAtCompileTime> EigenvectorsType; |
| ykuroda | 0:13a5d365ba16 | 85 | |
| ykuroda | 0:13a5d365ba16 | 86 | /** \brief Real scalar type for \p _MatrixType. |
| ykuroda | 0:13a5d365ba16 | 87 | * |
| ykuroda | 0:13a5d365ba16 | 88 | * This is just \c Scalar if #Scalar is real (e.g., \c float or |
| ykuroda | 0:13a5d365ba16 | 89 | * \c double), and the type of the real part of \c Scalar if #Scalar is |
| ykuroda | 0:13a5d365ba16 | 90 | * complex. |
| ykuroda | 0:13a5d365ba16 | 91 | */ |
| ykuroda | 0:13a5d365ba16 | 92 | typedef typename NumTraits<Scalar>::Real RealScalar; |
| ykuroda | 0:13a5d365ba16 | 93 | |
| ykuroda | 0:13a5d365ba16 | 94 | friend struct internal::direct_selfadjoint_eigenvalues<SelfAdjointEigenSolver,Size,NumTraits<Scalar>::IsComplex>; |
| ykuroda | 0:13a5d365ba16 | 95 | |
| ykuroda | 0:13a5d365ba16 | 96 | /** \brief Type for vector of eigenvalues as returned by eigenvalues(). |
| ykuroda | 0:13a5d365ba16 | 97 | * |
| ykuroda | 0:13a5d365ba16 | 98 | * This is a column vector with entries of type #RealScalar. |
| ykuroda | 0:13a5d365ba16 | 99 | * The length of the vector is the size of \p _MatrixType. |
| ykuroda | 0:13a5d365ba16 | 100 | */ |
| ykuroda | 0:13a5d365ba16 | 101 | typedef typename internal::plain_col_type<MatrixType, RealScalar>::type RealVectorType; |
| ykuroda | 0:13a5d365ba16 | 102 | typedef Tridiagonalization<MatrixType> TridiagonalizationType; |
| ykuroda | 0:13a5d365ba16 | 103 | |
| ykuroda | 0:13a5d365ba16 | 104 | /** \brief Default constructor for fixed-size matrices. |
| ykuroda | 0:13a5d365ba16 | 105 | * |
| ykuroda | 0:13a5d365ba16 | 106 | * The default constructor is useful in cases in which the user intends to |
| ykuroda | 0:13a5d365ba16 | 107 | * perform decompositions via compute(). This constructor |
| ykuroda | 0:13a5d365ba16 | 108 | * can only be used if \p _MatrixType is a fixed-size matrix; use |
| ykuroda | 0:13a5d365ba16 | 109 | * SelfAdjointEigenSolver(Index) for dynamic-size matrices. |
| ykuroda | 0:13a5d365ba16 | 110 | * |
| ykuroda | 0:13a5d365ba16 | 111 | * Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver.cpp |
| ykuroda | 0:13a5d365ba16 | 112 | * Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver.out |
| ykuroda | 0:13a5d365ba16 | 113 | */ |
| ykuroda | 0:13a5d365ba16 | 114 | SelfAdjointEigenSolver() |
| ykuroda | 0:13a5d365ba16 | 115 | : m_eivec(), |
| ykuroda | 0:13a5d365ba16 | 116 | m_eivalues(), |
| ykuroda | 0:13a5d365ba16 | 117 | m_subdiag(), |
| ykuroda | 0:13a5d365ba16 | 118 | m_isInitialized(false) |
| ykuroda | 0:13a5d365ba16 | 119 | { } |
| ykuroda | 0:13a5d365ba16 | 120 | |
| ykuroda | 0:13a5d365ba16 | 121 | /** \brief Constructor, pre-allocates memory for dynamic-size matrices. |
| ykuroda | 0:13a5d365ba16 | 122 | * |
| ykuroda | 0:13a5d365ba16 | 123 | * \param [in] size Positive integer, size of the matrix whose |
| ykuroda | 0:13a5d365ba16 | 124 | * eigenvalues and eigenvectors will be computed. |
| ykuroda | 0:13a5d365ba16 | 125 | * |
| ykuroda | 0:13a5d365ba16 | 126 | * This constructor is useful for dynamic-size matrices, when the user |
| ykuroda | 0:13a5d365ba16 | 127 | * intends to perform decompositions via compute(). The \p size |
| ykuroda | 0:13a5d365ba16 | 128 | * parameter is only used as a hint. It is not an error to give a wrong |
| ykuroda | 0:13a5d365ba16 | 129 | * \p size, but it may impair performance. |
| ykuroda | 0:13a5d365ba16 | 130 | * |
| ykuroda | 0:13a5d365ba16 | 131 | * \sa compute() for an example |
| ykuroda | 0:13a5d365ba16 | 132 | */ |
| ykuroda | 0:13a5d365ba16 | 133 | SelfAdjointEigenSolver(Index size) |
| ykuroda | 0:13a5d365ba16 | 134 | : m_eivec(size, size), |
| ykuroda | 0:13a5d365ba16 | 135 | m_eivalues(size), |
| ykuroda | 0:13a5d365ba16 | 136 | m_subdiag(size > 1 ? size - 1 : 1), |
| ykuroda | 0:13a5d365ba16 | 137 | m_isInitialized(false) |
| ykuroda | 0:13a5d365ba16 | 138 | {} |
| ykuroda | 0:13a5d365ba16 | 139 | |
| ykuroda | 0:13a5d365ba16 | 140 | /** \brief Constructor; computes eigendecomposition of given matrix. |
| ykuroda | 0:13a5d365ba16 | 141 | * |
| ykuroda | 0:13a5d365ba16 | 142 | * \param[in] matrix Selfadjoint matrix whose eigendecomposition is to |
| ykuroda | 0:13a5d365ba16 | 143 | * be computed. Only the lower triangular part of the matrix is referenced. |
| ykuroda | 0:13a5d365ba16 | 144 | * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly. |
| ykuroda | 0:13a5d365ba16 | 145 | * |
| ykuroda | 0:13a5d365ba16 | 146 | * This constructor calls compute(const MatrixType&, int) to compute the |
| ykuroda | 0:13a5d365ba16 | 147 | * eigenvalues of the matrix \p matrix. The eigenvectors are computed if |
| ykuroda | 0:13a5d365ba16 | 148 | * \p options equals #ComputeEigenvectors. |
| ykuroda | 0:13a5d365ba16 | 149 | * |
| ykuroda | 0:13a5d365ba16 | 150 | * Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.cpp |
| ykuroda | 0:13a5d365ba16 | 151 | * Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.out |
| ykuroda | 0:13a5d365ba16 | 152 | * |
| ykuroda | 0:13a5d365ba16 | 153 | * \sa compute(const MatrixType&, int) |
| ykuroda | 0:13a5d365ba16 | 154 | */ |
| ykuroda | 0:13a5d365ba16 | 155 | SelfAdjointEigenSolver(const MatrixType& matrix, int options = ComputeEigenvectors) |
| ykuroda | 0:13a5d365ba16 | 156 | : m_eivec(matrix.rows(), matrix.cols()), |
| ykuroda | 0:13a5d365ba16 | 157 | m_eivalues(matrix.cols()), |
| ykuroda | 0:13a5d365ba16 | 158 | m_subdiag(matrix.rows() > 1 ? matrix.rows() - 1 : 1), |
| ykuroda | 0:13a5d365ba16 | 159 | m_isInitialized(false) |
| ykuroda | 0:13a5d365ba16 | 160 | { |
| ykuroda | 0:13a5d365ba16 | 161 | compute(matrix, options); |
| ykuroda | 0:13a5d365ba16 | 162 | } |
| ykuroda | 0:13a5d365ba16 | 163 | |
| ykuroda | 0:13a5d365ba16 | 164 | /** \brief Computes eigendecomposition of given matrix. |
| ykuroda | 0:13a5d365ba16 | 165 | * |
| ykuroda | 0:13a5d365ba16 | 166 | * \param[in] matrix Selfadjoint matrix whose eigendecomposition is to |
| ykuroda | 0:13a5d365ba16 | 167 | * be computed. Only the lower triangular part of the matrix is referenced. |
| ykuroda | 0:13a5d365ba16 | 168 | * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly. |
| ykuroda | 0:13a5d365ba16 | 169 | * \returns Reference to \c *this |
| ykuroda | 0:13a5d365ba16 | 170 | * |
| ykuroda | 0:13a5d365ba16 | 171 | * This function computes the eigenvalues of \p matrix. The eigenvalues() |
| ykuroda | 0:13a5d365ba16 | 172 | * function can be used to retrieve them. If \p options equals #ComputeEigenvectors, |
| ykuroda | 0:13a5d365ba16 | 173 | * then the eigenvectors are also computed and can be retrieved by |
| ykuroda | 0:13a5d365ba16 | 174 | * calling eigenvectors(). |
| ykuroda | 0:13a5d365ba16 | 175 | * |
| ykuroda | 0:13a5d365ba16 | 176 | * This implementation uses a symmetric QR algorithm. The matrix is first |
| ykuroda | 0:13a5d365ba16 | 177 | * reduced to tridiagonal form using the Tridiagonalization class. The |
| ykuroda | 0:13a5d365ba16 | 178 | * tridiagonal matrix is then brought to diagonal form with implicit |
| ykuroda | 0:13a5d365ba16 | 179 | * symmetric QR steps with Wilkinson shift. Details can be found in |
| ykuroda | 0:13a5d365ba16 | 180 | * Section 8.3 of Golub \& Van Loan, <i>%Matrix Computations</i>. |
| ykuroda | 0:13a5d365ba16 | 181 | * |
| ykuroda | 0:13a5d365ba16 | 182 | * The cost of the computation is about \f$ 9n^3 \f$ if the eigenvectors |
| ykuroda | 0:13a5d365ba16 | 183 | * are required and \f$ 4n^3/3 \f$ if they are not required. |
| ykuroda | 0:13a5d365ba16 | 184 | * |
| ykuroda | 0:13a5d365ba16 | 185 | * This method reuses the memory in the SelfAdjointEigenSolver object that |
| ykuroda | 0:13a5d365ba16 | 186 | * was allocated when the object was constructed, if the size of the |
| ykuroda | 0:13a5d365ba16 | 187 | * matrix does not change. |
| ykuroda | 0:13a5d365ba16 | 188 | * |
| ykuroda | 0:13a5d365ba16 | 189 | * Example: \include SelfAdjointEigenSolver_compute_MatrixType.cpp |
| ykuroda | 0:13a5d365ba16 | 190 | * Output: \verbinclude SelfAdjointEigenSolver_compute_MatrixType.out |
| ykuroda | 0:13a5d365ba16 | 191 | * |
| ykuroda | 0:13a5d365ba16 | 192 | * \sa SelfAdjointEigenSolver(const MatrixType&, int) |
| ykuroda | 0:13a5d365ba16 | 193 | */ |
| ykuroda | 0:13a5d365ba16 | 194 | SelfAdjointEigenSolver& compute(const MatrixType& matrix, int options = ComputeEigenvectors); |
| ykuroda | 0:13a5d365ba16 | 195 | |
| ykuroda | 0:13a5d365ba16 | 196 | /** \brief Computes eigendecomposition of given matrix using a direct algorithm |
| ykuroda | 0:13a5d365ba16 | 197 | * |
| ykuroda | 0:13a5d365ba16 | 198 | * This is a variant of compute(const MatrixType&, int options) which |
| ykuroda | 0:13a5d365ba16 | 199 | * directly solves the underlying polynomial equation. |
| ykuroda | 0:13a5d365ba16 | 200 | * |
| ykuroda | 0:13a5d365ba16 | 201 | * Currently only 3x3 matrices for which the sizes are known at compile time are supported (e.g., Matrix3d). |
| ykuroda | 0:13a5d365ba16 | 202 | * |
| ykuroda | 0:13a5d365ba16 | 203 | * This method is usually significantly faster than the QR algorithm |
| ykuroda | 0:13a5d365ba16 | 204 | * but it might also be less accurate. It is also worth noting that |
| ykuroda | 0:13a5d365ba16 | 205 | * for 3x3 matrices it involves trigonometric operations which are |
| ykuroda | 0:13a5d365ba16 | 206 | * not necessarily available for all scalar types. |
| ykuroda | 0:13a5d365ba16 | 207 | * |
| ykuroda | 0:13a5d365ba16 | 208 | * \sa compute(const MatrixType&, int options) |
| ykuroda | 0:13a5d365ba16 | 209 | */ |
| ykuroda | 0:13a5d365ba16 | 210 | SelfAdjointEigenSolver& computeDirect(const MatrixType& matrix, int options = ComputeEigenvectors); |
| ykuroda | 0:13a5d365ba16 | 211 | |
| ykuroda | 0:13a5d365ba16 | 212 | /** \brief Returns the eigenvectors of given matrix. |
| ykuroda | 0:13a5d365ba16 | 213 | * |
| ykuroda | 0:13a5d365ba16 | 214 | * \returns A const reference to the matrix whose columns are the eigenvectors. |
| ykuroda | 0:13a5d365ba16 | 215 | * |
| ykuroda | 0:13a5d365ba16 | 216 | * \pre The eigenvectors have been computed before. |
| ykuroda | 0:13a5d365ba16 | 217 | * |
| ykuroda | 0:13a5d365ba16 | 218 | * Column \f$ k \f$ of the returned matrix is an eigenvector corresponding |
| ykuroda | 0:13a5d365ba16 | 219 | * to eigenvalue number \f$ k \f$ as returned by eigenvalues(). The |
| ykuroda | 0:13a5d365ba16 | 220 | * eigenvectors are normalized to have (Euclidean) norm equal to one. If |
| ykuroda | 0:13a5d365ba16 | 221 | * this object was used to solve the eigenproblem for the selfadjoint |
| ykuroda | 0:13a5d365ba16 | 222 | * matrix \f$ A \f$, then the matrix returned by this function is the |
| ykuroda | 0:13a5d365ba16 | 223 | * matrix \f$ V \f$ in the eigendecomposition \f$ A = V D V^{-1} \f$. |
| ykuroda | 0:13a5d365ba16 | 224 | * |
| ykuroda | 0:13a5d365ba16 | 225 | * Example: \include SelfAdjointEigenSolver_eigenvectors.cpp |
| ykuroda | 0:13a5d365ba16 | 226 | * Output: \verbinclude SelfAdjointEigenSolver_eigenvectors.out |
| ykuroda | 0:13a5d365ba16 | 227 | * |
| ykuroda | 0:13a5d365ba16 | 228 | * \sa eigenvalues() |
| ykuroda | 0:13a5d365ba16 | 229 | */ |
| ykuroda | 0:13a5d365ba16 | 230 | const EigenvectorsType& eigenvectors() const |
| ykuroda | 0:13a5d365ba16 | 231 | { |
| ykuroda | 0:13a5d365ba16 | 232 | eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); |
| ykuroda | 0:13a5d365ba16 | 233 | eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); |
| ykuroda | 0:13a5d365ba16 | 234 | return m_eivec; |
| ykuroda | 0:13a5d365ba16 | 235 | } |
| ykuroda | 0:13a5d365ba16 | 236 | |
| ykuroda | 0:13a5d365ba16 | 237 | /** \brief Returns the eigenvalues of given matrix. |
| ykuroda | 0:13a5d365ba16 | 238 | * |
| ykuroda | 0:13a5d365ba16 | 239 | * \returns A const reference to the column vector containing the eigenvalues. |
| ykuroda | 0:13a5d365ba16 | 240 | * |
| ykuroda | 0:13a5d365ba16 | 241 | * \pre The eigenvalues have been computed before. |
| ykuroda | 0:13a5d365ba16 | 242 | * |
| ykuroda | 0:13a5d365ba16 | 243 | * The eigenvalues are repeated according to their algebraic multiplicity, |
| ykuroda | 0:13a5d365ba16 | 244 | * so there are as many eigenvalues as rows in the matrix. The eigenvalues |
| ykuroda | 0:13a5d365ba16 | 245 | * are sorted in increasing order. |
| ykuroda | 0:13a5d365ba16 | 246 | * |
| ykuroda | 0:13a5d365ba16 | 247 | * Example: \include SelfAdjointEigenSolver_eigenvalues.cpp |
| ykuroda | 0:13a5d365ba16 | 248 | * Output: \verbinclude SelfAdjointEigenSolver_eigenvalues.out |
| ykuroda | 0:13a5d365ba16 | 249 | * |
| ykuroda | 0:13a5d365ba16 | 250 | * \sa eigenvectors(), MatrixBase::eigenvalues() |
| ykuroda | 0:13a5d365ba16 | 251 | */ |
| ykuroda | 0:13a5d365ba16 | 252 | const RealVectorType& eigenvalues() const |
| ykuroda | 0:13a5d365ba16 | 253 | { |
| ykuroda | 0:13a5d365ba16 | 254 | eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); |
| ykuroda | 0:13a5d365ba16 | 255 | return m_eivalues; |
| ykuroda | 0:13a5d365ba16 | 256 | } |
| ykuroda | 0:13a5d365ba16 | 257 | |
| ykuroda | 0:13a5d365ba16 | 258 | /** \brief Computes the positive-definite square root of the matrix. |
| ykuroda | 0:13a5d365ba16 | 259 | * |
| ykuroda | 0:13a5d365ba16 | 260 | * \returns the positive-definite square root of the matrix |
| ykuroda | 0:13a5d365ba16 | 261 | * |
| ykuroda | 0:13a5d365ba16 | 262 | * \pre The eigenvalues and eigenvectors of a positive-definite matrix |
| ykuroda | 0:13a5d365ba16 | 263 | * have been computed before. |
| ykuroda | 0:13a5d365ba16 | 264 | * |
| ykuroda | 0:13a5d365ba16 | 265 | * The square root of a positive-definite matrix \f$ A \f$ is the |
| ykuroda | 0:13a5d365ba16 | 266 | * positive-definite matrix whose square equals \f$ A \f$. This function |
| ykuroda | 0:13a5d365ba16 | 267 | * uses the eigendecomposition \f$ A = V D V^{-1} \f$ to compute the |
| ykuroda | 0:13a5d365ba16 | 268 | * square root as \f$ A^{1/2} = V D^{1/2} V^{-1} \f$. |
| ykuroda | 0:13a5d365ba16 | 269 | * |
| ykuroda | 0:13a5d365ba16 | 270 | * Example: \include SelfAdjointEigenSolver_operatorSqrt.cpp |
| ykuroda | 0:13a5d365ba16 | 271 | * Output: \verbinclude SelfAdjointEigenSolver_operatorSqrt.out |
| ykuroda | 0:13a5d365ba16 | 272 | * |
| ykuroda | 0:13a5d365ba16 | 273 | * \sa operatorInverseSqrt(), |
| ykuroda | 0:13a5d365ba16 | 274 | * \ref MatrixFunctions_Module "MatrixFunctions Module" |
| ykuroda | 0:13a5d365ba16 | 275 | */ |
| ykuroda | 0:13a5d365ba16 | 276 | MatrixType operatorSqrt() const |
| ykuroda | 0:13a5d365ba16 | 277 | { |
| ykuroda | 0:13a5d365ba16 | 278 | eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); |
| ykuroda | 0:13a5d365ba16 | 279 | eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); |
| ykuroda | 0:13a5d365ba16 | 280 | return m_eivec * m_eivalues.cwiseSqrt().asDiagonal() * m_eivec.adjoint(); |
| ykuroda | 0:13a5d365ba16 | 281 | } |
| ykuroda | 0:13a5d365ba16 | 282 | |
| ykuroda | 0:13a5d365ba16 | 283 | /** \brief Computes the inverse square root of the matrix. |
| ykuroda | 0:13a5d365ba16 | 284 | * |
| ykuroda | 0:13a5d365ba16 | 285 | * \returns the inverse positive-definite square root of the matrix |
| ykuroda | 0:13a5d365ba16 | 286 | * |
| ykuroda | 0:13a5d365ba16 | 287 | * \pre The eigenvalues and eigenvectors of a positive-definite matrix |
| ykuroda | 0:13a5d365ba16 | 288 | * have been computed before. |
| ykuroda | 0:13a5d365ba16 | 289 | * |
| ykuroda | 0:13a5d365ba16 | 290 | * This function uses the eigendecomposition \f$ A = V D V^{-1} \f$ to |
| ykuroda | 0:13a5d365ba16 | 291 | * compute the inverse square root as \f$ V D^{-1/2} V^{-1} \f$. This is |
| ykuroda | 0:13a5d365ba16 | 292 | * cheaper than first computing the square root with operatorSqrt() and |
| ykuroda | 0:13a5d365ba16 | 293 | * then its inverse with MatrixBase::inverse(). |
| ykuroda | 0:13a5d365ba16 | 294 | * |
| ykuroda | 0:13a5d365ba16 | 295 | * Example: \include SelfAdjointEigenSolver_operatorInverseSqrt.cpp |
| ykuroda | 0:13a5d365ba16 | 296 | * Output: \verbinclude SelfAdjointEigenSolver_operatorInverseSqrt.out |
| ykuroda | 0:13a5d365ba16 | 297 | * |
| ykuroda | 0:13a5d365ba16 | 298 | * \sa operatorSqrt(), MatrixBase::inverse(), |
| ykuroda | 0:13a5d365ba16 | 299 | * \ref MatrixFunctions_Module "MatrixFunctions Module" |
| ykuroda | 0:13a5d365ba16 | 300 | */ |
| ykuroda | 0:13a5d365ba16 | 301 | MatrixType operatorInverseSqrt() const |
| ykuroda | 0:13a5d365ba16 | 302 | { |
| ykuroda | 0:13a5d365ba16 | 303 | eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); |
| ykuroda | 0:13a5d365ba16 | 304 | eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); |
| ykuroda | 0:13a5d365ba16 | 305 | return m_eivec * m_eivalues.cwiseInverse().cwiseSqrt().asDiagonal() * m_eivec.adjoint(); |
| ykuroda | 0:13a5d365ba16 | 306 | } |
| ykuroda | 0:13a5d365ba16 | 307 | |
| ykuroda | 0:13a5d365ba16 | 308 | /** \brief Reports whether previous computation was successful. |
| ykuroda | 0:13a5d365ba16 | 309 | * |
| ykuroda | 0:13a5d365ba16 | 310 | * \returns \c Success if computation was succesful, \c NoConvergence otherwise. |
| ykuroda | 0:13a5d365ba16 | 311 | */ |
| ykuroda | 0:13a5d365ba16 | 312 | ComputationInfo info() const |
| ykuroda | 0:13a5d365ba16 | 313 | { |
| ykuroda | 0:13a5d365ba16 | 314 | eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); |
| ykuroda | 0:13a5d365ba16 | 315 | return m_info; |
| ykuroda | 0:13a5d365ba16 | 316 | } |
| ykuroda | 0:13a5d365ba16 | 317 | |
| ykuroda | 0:13a5d365ba16 | 318 | /** \brief Maximum number of iterations. |
| ykuroda | 0:13a5d365ba16 | 319 | * |
| ykuroda | 0:13a5d365ba16 | 320 | * The algorithm terminates if it does not converge within m_maxIterations * n iterations, where n |
| ykuroda | 0:13a5d365ba16 | 321 | * denotes the size of the matrix. This value is currently set to 30 (copied from LAPACK). |
| ykuroda | 0:13a5d365ba16 | 322 | */ |
| ykuroda | 0:13a5d365ba16 | 323 | static const int m_maxIterations = 30; |
| ykuroda | 0:13a5d365ba16 | 324 | |
| ykuroda | 0:13a5d365ba16 | 325 | #ifdef EIGEN2_SUPPORT |
| ykuroda | 0:13a5d365ba16 | 326 | SelfAdjointEigenSolver(const MatrixType& matrix, bool computeEigenvectors) |
| ykuroda | 0:13a5d365ba16 | 327 | : m_eivec(matrix.rows(), matrix.cols()), |
| ykuroda | 0:13a5d365ba16 | 328 | m_eivalues(matrix.cols()), |
| ykuroda | 0:13a5d365ba16 | 329 | m_subdiag(matrix.rows() > 1 ? matrix.rows() - 1 : 1), |
| ykuroda | 0:13a5d365ba16 | 330 | m_isInitialized(false) |
| ykuroda | 0:13a5d365ba16 | 331 | { |
| ykuroda | 0:13a5d365ba16 | 332 | compute(matrix, computeEigenvectors); |
| ykuroda | 0:13a5d365ba16 | 333 | } |
| ykuroda | 0:13a5d365ba16 | 334 | |
| ykuroda | 0:13a5d365ba16 | 335 | SelfAdjointEigenSolver(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors = true) |
| ykuroda | 0:13a5d365ba16 | 336 | : m_eivec(matA.cols(), matA.cols()), |
| ykuroda | 0:13a5d365ba16 | 337 | m_eivalues(matA.cols()), |
| ykuroda | 0:13a5d365ba16 | 338 | m_subdiag(matA.cols() > 1 ? matA.cols() - 1 : 1), |
| ykuroda | 0:13a5d365ba16 | 339 | m_isInitialized(false) |
| ykuroda | 0:13a5d365ba16 | 340 | { |
| ykuroda | 0:13a5d365ba16 | 341 | static_cast<GeneralizedSelfAdjointEigenSolver<MatrixType>*>(this)->compute(matA, matB, computeEigenvectors ? ComputeEigenvectors : EigenvaluesOnly); |
| ykuroda | 0:13a5d365ba16 | 342 | } |
| ykuroda | 0:13a5d365ba16 | 343 | |
| ykuroda | 0:13a5d365ba16 | 344 | void compute(const MatrixType& matrix, bool computeEigenvectors) |
| ykuroda | 0:13a5d365ba16 | 345 | { |
| ykuroda | 0:13a5d365ba16 | 346 | compute(matrix, computeEigenvectors ? ComputeEigenvectors : EigenvaluesOnly); |
| ykuroda | 0:13a5d365ba16 | 347 | } |
| ykuroda | 0:13a5d365ba16 | 348 | |
| ykuroda | 0:13a5d365ba16 | 349 | void compute(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors = true) |
| ykuroda | 0:13a5d365ba16 | 350 | { |
| ykuroda | 0:13a5d365ba16 | 351 | compute(matA, matB, computeEigenvectors ? ComputeEigenvectors : EigenvaluesOnly); |
| ykuroda | 0:13a5d365ba16 | 352 | } |
| ykuroda | 0:13a5d365ba16 | 353 | #endif // EIGEN2_SUPPORT |
| ykuroda | 0:13a5d365ba16 | 354 | |
| ykuroda | 0:13a5d365ba16 | 355 | protected: |
| ykuroda | 0:13a5d365ba16 | 356 | static void check_template_parameters() |
| ykuroda | 0:13a5d365ba16 | 357 | { |
| ykuroda | 0:13a5d365ba16 | 358 | EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); |
| ykuroda | 0:13a5d365ba16 | 359 | } |
| ykuroda | 0:13a5d365ba16 | 360 | |
| ykuroda | 0:13a5d365ba16 | 361 | EigenvectorsType m_eivec; |
| ykuroda | 0:13a5d365ba16 | 362 | RealVectorType m_eivalues; |
| ykuroda | 0:13a5d365ba16 | 363 | typename TridiagonalizationType::SubDiagonalType m_subdiag; |
| ykuroda | 0:13a5d365ba16 | 364 | ComputationInfo m_info; |
| ykuroda | 0:13a5d365ba16 | 365 | bool m_isInitialized; |
| ykuroda | 0:13a5d365ba16 | 366 | bool m_eigenvectorsOk; |
| ykuroda | 0:13a5d365ba16 | 367 | }; |
| ykuroda | 0:13a5d365ba16 | 368 | |
| ykuroda | 0:13a5d365ba16 | 369 | /** \internal |
| ykuroda | 0:13a5d365ba16 | 370 | * |
| ykuroda | 0:13a5d365ba16 | 371 | * \eigenvalues_module \ingroup Eigenvalues_Module |
| ykuroda | 0:13a5d365ba16 | 372 | * |
| ykuroda | 0:13a5d365ba16 | 373 | * Performs a QR step on a tridiagonal symmetric matrix represented as a |
| ykuroda | 0:13a5d365ba16 | 374 | * pair of two vectors \a diag and \a subdiag. |
| ykuroda | 0:13a5d365ba16 | 375 | * |
| ykuroda | 0:13a5d365ba16 | 376 | * \param matA the input selfadjoint matrix |
| ykuroda | 0:13a5d365ba16 | 377 | * \param hCoeffs returned Householder coefficients |
| ykuroda | 0:13a5d365ba16 | 378 | * |
| ykuroda | 0:13a5d365ba16 | 379 | * For compilation efficiency reasons, this procedure does not use eigen expression |
| ykuroda | 0:13a5d365ba16 | 380 | * for its arguments. |
| ykuroda | 0:13a5d365ba16 | 381 | * |
| ykuroda | 0:13a5d365ba16 | 382 | * Implemented from Golub's "Matrix Computations", algorithm 8.3.2: |
| ykuroda | 0:13a5d365ba16 | 383 | * "implicit symmetric QR step with Wilkinson shift" |
| ykuroda | 0:13a5d365ba16 | 384 | */ |
| ykuroda | 0:13a5d365ba16 | 385 | namespace internal { |
| ykuroda | 0:13a5d365ba16 | 386 | template<typename RealScalar, typename Scalar, typename Index> |
| ykuroda | 0:13a5d365ba16 | 387 | static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n); |
| ykuroda | 0:13a5d365ba16 | 388 | } |
| ykuroda | 0:13a5d365ba16 | 389 | |
| ykuroda | 0:13a5d365ba16 | 390 | template<typename MatrixType> |
| ykuroda | 0:13a5d365ba16 | 391 | SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType> |
| ykuroda | 0:13a5d365ba16 | 392 | ::compute(const MatrixType& matrix, int options) |
| ykuroda | 0:13a5d365ba16 | 393 | { |
| ykuroda | 0:13a5d365ba16 | 394 | check_template_parameters(); |
| ykuroda | 0:13a5d365ba16 | 395 | |
| ykuroda | 0:13a5d365ba16 | 396 | using std::abs; |
| ykuroda | 0:13a5d365ba16 | 397 | eigen_assert(matrix.cols() == matrix.rows()); |
| ykuroda | 0:13a5d365ba16 | 398 | eigen_assert((options&~(EigVecMask|GenEigMask))==0 |
| ykuroda | 0:13a5d365ba16 | 399 | && (options&EigVecMask)!=EigVecMask |
| ykuroda | 0:13a5d365ba16 | 400 | && "invalid option parameter"); |
| ykuroda | 0:13a5d365ba16 | 401 | bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors; |
| ykuroda | 0:13a5d365ba16 | 402 | Index n = matrix.cols(); |
| ykuroda | 0:13a5d365ba16 | 403 | m_eivalues.resize(n,1); |
| ykuroda | 0:13a5d365ba16 | 404 | |
| ykuroda | 0:13a5d365ba16 | 405 | if(n==1) |
| ykuroda | 0:13a5d365ba16 | 406 | { |
| ykuroda | 0:13a5d365ba16 | 407 | m_eivalues.coeffRef(0,0) = numext::real(matrix.coeff(0,0)); |
| ykuroda | 0:13a5d365ba16 | 408 | if(computeEigenvectors) |
| ykuroda | 0:13a5d365ba16 | 409 | m_eivec.setOnes(n,n); |
| ykuroda | 0:13a5d365ba16 | 410 | m_info = Success; |
| ykuroda | 0:13a5d365ba16 | 411 | m_isInitialized = true; |
| ykuroda | 0:13a5d365ba16 | 412 | m_eigenvectorsOk = computeEigenvectors; |
| ykuroda | 0:13a5d365ba16 | 413 | return *this; |
| ykuroda | 0:13a5d365ba16 | 414 | } |
| ykuroda | 0:13a5d365ba16 | 415 | |
| ykuroda | 0:13a5d365ba16 | 416 | // declare some aliases |
| ykuroda | 0:13a5d365ba16 | 417 | RealVectorType& diag = m_eivalues; |
| ykuroda | 0:13a5d365ba16 | 418 | EigenvectorsType& mat = m_eivec; |
| ykuroda | 0:13a5d365ba16 | 419 | |
| ykuroda | 0:13a5d365ba16 | 420 | // map the matrix coefficients to [-1:1] to avoid over- and underflow. |
| ykuroda | 0:13a5d365ba16 | 421 | mat = matrix.template triangularView<Lower>(); |
| ykuroda | 0:13a5d365ba16 | 422 | RealScalar scale = mat.cwiseAbs().maxCoeff(); |
| ykuroda | 0:13a5d365ba16 | 423 | if(scale==RealScalar(0)) scale = RealScalar(1); |
| ykuroda | 0:13a5d365ba16 | 424 | mat.template triangularView<Lower>() /= scale; |
| ykuroda | 0:13a5d365ba16 | 425 | m_subdiag.resize(n-1); |
| ykuroda | 0:13a5d365ba16 | 426 | internal::tridiagonalization_inplace(mat, diag, m_subdiag, computeEigenvectors); |
| ykuroda | 0:13a5d365ba16 | 427 | |
| ykuroda | 0:13a5d365ba16 | 428 | Index end = n-1; |
| ykuroda | 0:13a5d365ba16 | 429 | Index start = 0; |
| ykuroda | 0:13a5d365ba16 | 430 | Index iter = 0; // total number of iterations |
| ykuroda | 0:13a5d365ba16 | 431 | |
| ykuroda | 0:13a5d365ba16 | 432 | while (end>0) |
| ykuroda | 0:13a5d365ba16 | 433 | { |
| ykuroda | 0:13a5d365ba16 | 434 | for (Index i = start; i<end; ++i) |
| ykuroda | 0:13a5d365ba16 | 435 | if (internal::isMuchSmallerThan(abs(m_subdiag[i]),(abs(diag[i])+abs(diag[i+1])))) |
| ykuroda | 0:13a5d365ba16 | 436 | m_subdiag[i] = 0; |
| ykuroda | 0:13a5d365ba16 | 437 | |
| ykuroda | 0:13a5d365ba16 | 438 | // find the largest unreduced block |
| ykuroda | 0:13a5d365ba16 | 439 | while (end>0 && m_subdiag[end-1]==0) |
| ykuroda | 0:13a5d365ba16 | 440 | { |
| ykuroda | 0:13a5d365ba16 | 441 | end--; |
| ykuroda | 0:13a5d365ba16 | 442 | } |
| ykuroda | 0:13a5d365ba16 | 443 | if (end<=0) |
| ykuroda | 0:13a5d365ba16 | 444 | break; |
| ykuroda | 0:13a5d365ba16 | 445 | |
| ykuroda | 0:13a5d365ba16 | 446 | // if we spent too many iterations, we give up |
| ykuroda | 0:13a5d365ba16 | 447 | iter++; |
| ykuroda | 0:13a5d365ba16 | 448 | if(iter > m_maxIterations * n) break; |
| ykuroda | 0:13a5d365ba16 | 449 | |
| ykuroda | 0:13a5d365ba16 | 450 | start = end - 1; |
| ykuroda | 0:13a5d365ba16 | 451 | while (start>0 && m_subdiag[start-1]!=0) |
| ykuroda | 0:13a5d365ba16 | 452 | start--; |
| ykuroda | 0:13a5d365ba16 | 453 | |
| ykuroda | 0:13a5d365ba16 | 454 | internal::tridiagonal_qr_step(diag.data(), m_subdiag.data(), start, end, computeEigenvectors ? m_eivec.data() : (Scalar*)0, n); |
| ykuroda | 0:13a5d365ba16 | 455 | } |
| ykuroda | 0:13a5d365ba16 | 456 | |
| ykuroda | 0:13a5d365ba16 | 457 | if (iter <= m_maxIterations * n) |
| ykuroda | 0:13a5d365ba16 | 458 | m_info = Success; |
| ykuroda | 0:13a5d365ba16 | 459 | else |
| ykuroda | 0:13a5d365ba16 | 460 | m_info = NoConvergence; |
| ykuroda | 0:13a5d365ba16 | 461 | |
| ykuroda | 0:13a5d365ba16 | 462 | // Sort eigenvalues and corresponding vectors. |
| ykuroda | 0:13a5d365ba16 | 463 | // TODO make the sort optional ? |
| ykuroda | 0:13a5d365ba16 | 464 | // TODO use a better sort algorithm !! |
| ykuroda | 0:13a5d365ba16 | 465 | if (m_info == Success) |
| ykuroda | 0:13a5d365ba16 | 466 | { |
| ykuroda | 0:13a5d365ba16 | 467 | for (Index i = 0; i < n-1; ++i) |
| ykuroda | 0:13a5d365ba16 | 468 | { |
| ykuroda | 0:13a5d365ba16 | 469 | Index k; |
| ykuroda | 0:13a5d365ba16 | 470 | m_eivalues.segment(i,n-i).minCoeff(&k); |
| ykuroda | 0:13a5d365ba16 | 471 | if (k > 0) |
| ykuroda | 0:13a5d365ba16 | 472 | { |
| ykuroda | 0:13a5d365ba16 | 473 | std::swap(m_eivalues[i], m_eivalues[k+i]); |
| ykuroda | 0:13a5d365ba16 | 474 | if(computeEigenvectors) |
| ykuroda | 0:13a5d365ba16 | 475 | m_eivec.col(i).swap(m_eivec.col(k+i)); |
| ykuroda | 0:13a5d365ba16 | 476 | } |
| ykuroda | 0:13a5d365ba16 | 477 | } |
| ykuroda | 0:13a5d365ba16 | 478 | } |
| ykuroda | 0:13a5d365ba16 | 479 | |
| ykuroda | 0:13a5d365ba16 | 480 | // scale back the eigen values |
| ykuroda | 0:13a5d365ba16 | 481 | m_eivalues *= scale; |
| ykuroda | 0:13a5d365ba16 | 482 | |
| ykuroda | 0:13a5d365ba16 | 483 | m_isInitialized = true; |
| ykuroda | 0:13a5d365ba16 | 484 | m_eigenvectorsOk = computeEigenvectors; |
| ykuroda | 0:13a5d365ba16 | 485 | return *this; |
| ykuroda | 0:13a5d365ba16 | 486 | } |
| ykuroda | 0:13a5d365ba16 | 487 | |
| ykuroda | 0:13a5d365ba16 | 488 | |
| ykuroda | 0:13a5d365ba16 | 489 | namespace internal { |
| ykuroda | 0:13a5d365ba16 | 490 | |
| ykuroda | 0:13a5d365ba16 | 491 | template<typename SolverType,int Size,bool IsComplex> struct direct_selfadjoint_eigenvalues |
| ykuroda | 0:13a5d365ba16 | 492 | { |
| ykuroda | 0:13a5d365ba16 | 493 | static inline void run(SolverType& eig, const typename SolverType::MatrixType& A, int options) |
| ykuroda | 0:13a5d365ba16 | 494 | { eig.compute(A,options); } |
| ykuroda | 0:13a5d365ba16 | 495 | }; |
| ykuroda | 0:13a5d365ba16 | 496 | |
| ykuroda | 0:13a5d365ba16 | 497 | template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3,false> |
| ykuroda | 0:13a5d365ba16 | 498 | { |
| ykuroda | 0:13a5d365ba16 | 499 | typedef typename SolverType::MatrixType MatrixType; |
| ykuroda | 0:13a5d365ba16 | 500 | typedef typename SolverType::RealVectorType VectorType; |
| ykuroda | 0:13a5d365ba16 | 501 | typedef typename SolverType::Scalar Scalar; |
| ykuroda | 0:13a5d365ba16 | 502 | typedef typename MatrixType::Index Index; |
| ykuroda | 0:13a5d365ba16 | 503 | typedef typename SolverType::EigenvectorsType EigenvectorsType; |
| ykuroda | 0:13a5d365ba16 | 504 | |
| ykuroda | 0:13a5d365ba16 | 505 | /** \internal |
| ykuroda | 0:13a5d365ba16 | 506 | * Computes the roots of the characteristic polynomial of \a m. |
| ykuroda | 0:13a5d365ba16 | 507 | * For numerical stability m.trace() should be near zero and to avoid over- or underflow m should be normalized. |
| ykuroda | 0:13a5d365ba16 | 508 | */ |
| ykuroda | 0:13a5d365ba16 | 509 | static inline void computeRoots(const MatrixType& m, VectorType& roots) |
| ykuroda | 0:13a5d365ba16 | 510 | { |
| ykuroda | 0:13a5d365ba16 | 511 | using std::sqrt; |
| ykuroda | 0:13a5d365ba16 | 512 | using std::atan2; |
| ykuroda | 0:13a5d365ba16 | 513 | using std::cos; |
| ykuroda | 0:13a5d365ba16 | 514 | using std::sin; |
| ykuroda | 0:13a5d365ba16 | 515 | const Scalar s_inv3 = Scalar(1.0)/Scalar(3.0); |
| ykuroda | 0:13a5d365ba16 | 516 | const Scalar s_sqrt3 = sqrt(Scalar(3.0)); |
| ykuroda | 0:13a5d365ba16 | 517 | |
| ykuroda | 0:13a5d365ba16 | 518 | // The characteristic equation is x^3 - c2*x^2 + c1*x - c0 = 0. The |
| ykuroda | 0:13a5d365ba16 | 519 | // eigenvalues are the roots to this equation, all guaranteed to be |
| ykuroda | 0:13a5d365ba16 | 520 | // real-valued, because the matrix is symmetric. |
| ykuroda | 0:13a5d365ba16 | 521 | Scalar c0 = m(0,0)*m(1,1)*m(2,2) + Scalar(2)*m(1,0)*m(2,0)*m(2,1) - m(0,0)*m(2,1)*m(2,1) - m(1,1)*m(2,0)*m(2,0) - m(2,2)*m(1,0)*m(1,0); |
| ykuroda | 0:13a5d365ba16 | 522 | Scalar c1 = m(0,0)*m(1,1) - m(1,0)*m(1,0) + m(0,0)*m(2,2) - m(2,0)*m(2,0) + m(1,1)*m(2,2) - m(2,1)*m(2,1); |
| ykuroda | 0:13a5d365ba16 | 523 | Scalar c2 = m(0,0) + m(1,1) + m(2,2); |
| ykuroda | 0:13a5d365ba16 | 524 | |
| ykuroda | 0:13a5d365ba16 | 525 | // Construct the parameters used in classifying the roots of the equation |
| ykuroda | 0:13a5d365ba16 | 526 | // and in solving the equation for the roots in closed form. |
| ykuroda | 0:13a5d365ba16 | 527 | Scalar c2_over_3 = c2*s_inv3; |
| ykuroda | 0:13a5d365ba16 | 528 | Scalar a_over_3 = (c2*c2_over_3 - c1)*s_inv3; |
| ykuroda | 0:13a5d365ba16 | 529 | if(a_over_3<Scalar(0)) |
| ykuroda | 0:13a5d365ba16 | 530 | a_over_3 = Scalar(0); |
| ykuroda | 0:13a5d365ba16 | 531 | |
| ykuroda | 0:13a5d365ba16 | 532 | Scalar half_b = Scalar(0.5)*(c0 + c2_over_3*(Scalar(2)*c2_over_3*c2_over_3 - c1)); |
| ykuroda | 0:13a5d365ba16 | 533 | |
| ykuroda | 0:13a5d365ba16 | 534 | Scalar q = a_over_3*a_over_3*a_over_3 - half_b*half_b; |
| ykuroda | 0:13a5d365ba16 | 535 | if(q<Scalar(0)) |
| ykuroda | 0:13a5d365ba16 | 536 | q = Scalar(0); |
| ykuroda | 0:13a5d365ba16 | 537 | |
| ykuroda | 0:13a5d365ba16 | 538 | // Compute the eigenvalues by solving for the roots of the polynomial. |
| ykuroda | 0:13a5d365ba16 | 539 | Scalar rho = sqrt(a_over_3); |
| ykuroda | 0:13a5d365ba16 | 540 | Scalar theta = atan2(sqrt(q),half_b)*s_inv3; // since sqrt(q) > 0, atan2 is in [0, pi] and theta is in [0, pi/3] |
| ykuroda | 0:13a5d365ba16 | 541 | Scalar cos_theta = cos(theta); |
| ykuroda | 0:13a5d365ba16 | 542 | Scalar sin_theta = sin(theta); |
| ykuroda | 0:13a5d365ba16 | 543 | // roots are already sorted, since cos is monotonically decreasing on [0, pi] |
| ykuroda | 0:13a5d365ba16 | 544 | roots(0) = c2_over_3 - rho*(cos_theta + s_sqrt3*sin_theta); // == 2*rho*cos(theta+2pi/3) |
| ykuroda | 0:13a5d365ba16 | 545 | roots(1) = c2_over_3 - rho*(cos_theta - s_sqrt3*sin_theta); // == 2*rho*cos(theta+ pi/3) |
| ykuroda | 0:13a5d365ba16 | 546 | roots(2) = c2_over_3 + Scalar(2)*rho*cos_theta; |
| ykuroda | 0:13a5d365ba16 | 547 | } |
| ykuroda | 0:13a5d365ba16 | 548 | |
| ykuroda | 0:13a5d365ba16 | 549 | static inline bool extract_kernel(MatrixType& mat, Ref<VectorType> res, Ref<VectorType> representative) |
| ykuroda | 0:13a5d365ba16 | 550 | { |
| ykuroda | 0:13a5d365ba16 | 551 | using std::abs; |
| ykuroda | 0:13a5d365ba16 | 552 | Index i0; |
| ykuroda | 0:13a5d365ba16 | 553 | // Find non-zero column i0 (by construction, there must exist a non zero coefficient on the diagonal): |
| ykuroda | 0:13a5d365ba16 | 554 | mat.diagonal().cwiseAbs().maxCoeff(&i0); |
| ykuroda | 0:13a5d365ba16 | 555 | // mat.col(i0) is a good candidate for an orthogonal vector to the current eigenvector, |
| ykuroda | 0:13a5d365ba16 | 556 | // so let's save it: |
| ykuroda | 0:13a5d365ba16 | 557 | representative = mat.col(i0); |
| ykuroda | 0:13a5d365ba16 | 558 | Scalar n0, n1; |
| ykuroda | 0:13a5d365ba16 | 559 | VectorType c0, c1; |
| ykuroda | 0:13a5d365ba16 | 560 | n0 = (c0 = representative.cross(mat.col((i0+1)%3))).squaredNorm(); |
| ykuroda | 0:13a5d365ba16 | 561 | n1 = (c1 = representative.cross(mat.col((i0+2)%3))).squaredNorm(); |
| ykuroda | 0:13a5d365ba16 | 562 | if(n0>n1) res = c0/std::sqrt(n0); |
| ykuroda | 0:13a5d365ba16 | 563 | else res = c1/std::sqrt(n1); |
| ykuroda | 0:13a5d365ba16 | 564 | |
| ykuroda | 0:13a5d365ba16 | 565 | return true; |
| ykuroda | 0:13a5d365ba16 | 566 | } |
| ykuroda | 0:13a5d365ba16 | 567 | |
| ykuroda | 0:13a5d365ba16 | 568 | static inline void run(SolverType& solver, const MatrixType& mat, int options) |
| ykuroda | 0:13a5d365ba16 | 569 | { |
| ykuroda | 0:13a5d365ba16 | 570 | eigen_assert(mat.cols() == 3 && mat.cols() == mat.rows()); |
| ykuroda | 0:13a5d365ba16 | 571 | eigen_assert((options&~(EigVecMask|GenEigMask))==0 |
| ykuroda | 0:13a5d365ba16 | 572 | && (options&EigVecMask)!=EigVecMask |
| ykuroda | 0:13a5d365ba16 | 573 | && "invalid option parameter"); |
| ykuroda | 0:13a5d365ba16 | 574 | bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors; |
| ykuroda | 0:13a5d365ba16 | 575 | |
| ykuroda | 0:13a5d365ba16 | 576 | EigenvectorsType& eivecs = solver.m_eivec; |
| ykuroda | 0:13a5d365ba16 | 577 | VectorType& eivals = solver.m_eivalues; |
| ykuroda | 0:13a5d365ba16 | 578 | |
| ykuroda | 0:13a5d365ba16 | 579 | // Shift the matrix to the mean eigenvalue and map the matrix coefficients to [-1:1] to avoid over- and underflow. |
| ykuroda | 0:13a5d365ba16 | 580 | Scalar shift = mat.trace() / Scalar(3); |
| ykuroda | 0:13a5d365ba16 | 581 | // TODO Avoid this copy. Currently it is necessary to suppress bogus values when determining maxCoeff and for computing the eigenvectors later |
| ykuroda | 0:13a5d365ba16 | 582 | MatrixType scaledMat = mat.template selfadjointView<Lower>(); |
| ykuroda | 0:13a5d365ba16 | 583 | scaledMat.diagonal().array() -= shift; |
| ykuroda | 0:13a5d365ba16 | 584 | Scalar scale = scaledMat.cwiseAbs().maxCoeff(); |
| ykuroda | 0:13a5d365ba16 | 585 | if(scale > 0) scaledMat /= scale; // TODO for scale==0 we could save the remaining operations |
| ykuroda | 0:13a5d365ba16 | 586 | |
| ykuroda | 0:13a5d365ba16 | 587 | // compute the eigenvalues |
| ykuroda | 0:13a5d365ba16 | 588 | computeRoots(scaledMat,eivals); |
| ykuroda | 0:13a5d365ba16 | 589 | |
| ykuroda | 0:13a5d365ba16 | 590 | // compute the eigenvectors |
| ykuroda | 0:13a5d365ba16 | 591 | if(computeEigenvectors) |
| ykuroda | 0:13a5d365ba16 | 592 | { |
| ykuroda | 0:13a5d365ba16 | 593 | if((eivals(2)-eivals(0))<=Eigen::NumTraits<Scalar>::epsilon()) |
| ykuroda | 0:13a5d365ba16 | 594 | { |
| ykuroda | 0:13a5d365ba16 | 595 | // All three eigenvalues are numerically the same |
| ykuroda | 0:13a5d365ba16 | 596 | eivecs.setIdentity(); |
| ykuroda | 0:13a5d365ba16 | 597 | } |
| ykuroda | 0:13a5d365ba16 | 598 | else |
| ykuroda | 0:13a5d365ba16 | 599 | { |
| ykuroda | 0:13a5d365ba16 | 600 | MatrixType tmp; |
| ykuroda | 0:13a5d365ba16 | 601 | tmp = scaledMat; |
| ykuroda | 0:13a5d365ba16 | 602 | |
| ykuroda | 0:13a5d365ba16 | 603 | // Compute the eigenvector of the most distinct eigenvalue |
| ykuroda | 0:13a5d365ba16 | 604 | Scalar d0 = eivals(2) - eivals(1); |
| ykuroda | 0:13a5d365ba16 | 605 | Scalar d1 = eivals(1) - eivals(0); |
| ykuroda | 0:13a5d365ba16 | 606 | Index k(0), l(2); |
| ykuroda | 0:13a5d365ba16 | 607 | if(d0 > d1) |
| ykuroda | 0:13a5d365ba16 | 608 | { |
| ykuroda | 0:13a5d365ba16 | 609 | std::swap(k,l); |
| ykuroda | 0:13a5d365ba16 | 610 | d0 = d1; |
| ykuroda | 0:13a5d365ba16 | 611 | } |
| ykuroda | 0:13a5d365ba16 | 612 | |
| ykuroda | 0:13a5d365ba16 | 613 | // Compute the eigenvector of index k |
| ykuroda | 0:13a5d365ba16 | 614 | { |
| ykuroda | 0:13a5d365ba16 | 615 | tmp.diagonal().array () -= eivals(k); |
| ykuroda | 0:13a5d365ba16 | 616 | // By construction, 'tmp' is of rank 2, and its kernel corresponds to the respective eigenvector. |
| ykuroda | 0:13a5d365ba16 | 617 | extract_kernel(tmp, eivecs.col(k), eivecs.col(l)); |
| ykuroda | 0:13a5d365ba16 | 618 | } |
| ykuroda | 0:13a5d365ba16 | 619 | |
| ykuroda | 0:13a5d365ba16 | 620 | // Compute eigenvector of index l |
| ykuroda | 0:13a5d365ba16 | 621 | if(d0<=2*Eigen::NumTraits<Scalar>::epsilon()*d1) |
| ykuroda | 0:13a5d365ba16 | 622 | { |
| ykuroda | 0:13a5d365ba16 | 623 | // If d0 is too small, then the two other eigenvalues are numerically the same, |
| ykuroda | 0:13a5d365ba16 | 624 | // and thus we only have to ortho-normalize the near orthogonal vector we saved above. |
| ykuroda | 0:13a5d365ba16 | 625 | eivecs.col(l) -= eivecs.col(k).dot(eivecs.col(l))*eivecs.col(l); |
| ykuroda | 0:13a5d365ba16 | 626 | eivecs.col(l).normalize(); |
| ykuroda | 0:13a5d365ba16 | 627 | } |
| ykuroda | 0:13a5d365ba16 | 628 | else |
| ykuroda | 0:13a5d365ba16 | 629 | { |
| ykuroda | 0:13a5d365ba16 | 630 | tmp = scaledMat; |
| ykuroda | 0:13a5d365ba16 | 631 | tmp.diagonal().array () -= eivals(l); |
| ykuroda | 0:13a5d365ba16 | 632 | |
| ykuroda | 0:13a5d365ba16 | 633 | VectorType dummy; |
| ykuroda | 0:13a5d365ba16 | 634 | extract_kernel(tmp, eivecs.col(l), dummy); |
| ykuroda | 0:13a5d365ba16 | 635 | } |
| ykuroda | 0:13a5d365ba16 | 636 | |
| ykuroda | 0:13a5d365ba16 | 637 | // Compute last eigenvector from the other two |
| ykuroda | 0:13a5d365ba16 | 638 | eivecs.col(1) = eivecs.col(2).cross(eivecs.col(0)).normalized(); |
| ykuroda | 0:13a5d365ba16 | 639 | } |
| ykuroda | 0:13a5d365ba16 | 640 | } |
| ykuroda | 0:13a5d365ba16 | 641 | |
| ykuroda | 0:13a5d365ba16 | 642 | // Rescale back to the original size. |
| ykuroda | 0:13a5d365ba16 | 643 | eivals *= scale; |
| ykuroda | 0:13a5d365ba16 | 644 | eivals.array() += shift; |
| ykuroda | 0:13a5d365ba16 | 645 | |
| ykuroda | 0:13a5d365ba16 | 646 | solver.m_info = Success; |
| ykuroda | 0:13a5d365ba16 | 647 | solver.m_isInitialized = true; |
| ykuroda | 0:13a5d365ba16 | 648 | solver.m_eigenvectorsOk = computeEigenvectors; |
| ykuroda | 0:13a5d365ba16 | 649 | } |
| ykuroda | 0:13a5d365ba16 | 650 | }; |
| ykuroda | 0:13a5d365ba16 | 651 | |
| ykuroda | 0:13a5d365ba16 | 652 | // 2x2 direct eigenvalues decomposition, code from Hauke Heibel |
| ykuroda | 0:13a5d365ba16 | 653 | template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2,false> |
| ykuroda | 0:13a5d365ba16 | 654 | { |
| ykuroda | 0:13a5d365ba16 | 655 | typedef typename SolverType::MatrixType MatrixType; |
| ykuroda | 0:13a5d365ba16 | 656 | typedef typename SolverType::RealVectorType VectorType; |
| ykuroda | 0:13a5d365ba16 | 657 | typedef typename SolverType::Scalar Scalar; |
| ykuroda | 0:13a5d365ba16 | 658 | typedef typename SolverType::EigenvectorsType EigenvectorsType; |
| ykuroda | 0:13a5d365ba16 | 659 | |
| ykuroda | 0:13a5d365ba16 | 660 | static inline void computeRoots(const MatrixType& m, VectorType& roots) |
| ykuroda | 0:13a5d365ba16 | 661 | { |
| ykuroda | 0:13a5d365ba16 | 662 | using std::sqrt; |
| ykuroda | 0:13a5d365ba16 | 663 | const Scalar t0 = Scalar(0.5) * sqrt( numext::abs2(m(0,0)-m(1,1)) + Scalar(4)*numext::abs2(m(1,0))); |
| ykuroda | 0:13a5d365ba16 | 664 | const Scalar t1 = Scalar(0.5) * (m(0,0) + m(1,1)); |
| ykuroda | 0:13a5d365ba16 | 665 | roots(0) = t1 - t0; |
| ykuroda | 0:13a5d365ba16 | 666 | roots(1) = t1 + t0; |
| ykuroda | 0:13a5d365ba16 | 667 | } |
| ykuroda | 0:13a5d365ba16 | 668 | |
| ykuroda | 0:13a5d365ba16 | 669 | static inline void run(SolverType& solver, const MatrixType& mat, int options) |
| ykuroda | 0:13a5d365ba16 | 670 | { |
| ykuroda | 0:13a5d365ba16 | 671 | using std::sqrt; |
| ykuroda | 0:13a5d365ba16 | 672 | using std::abs; |
| ykuroda | 0:13a5d365ba16 | 673 | |
| ykuroda | 0:13a5d365ba16 | 674 | eigen_assert(mat.cols() == 2 && mat.cols() == mat.rows()); |
| ykuroda | 0:13a5d365ba16 | 675 | eigen_assert((options&~(EigVecMask|GenEigMask))==0 |
| ykuroda | 0:13a5d365ba16 | 676 | && (options&EigVecMask)!=EigVecMask |
| ykuroda | 0:13a5d365ba16 | 677 | && "invalid option parameter"); |
| ykuroda | 0:13a5d365ba16 | 678 | bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors; |
| ykuroda | 0:13a5d365ba16 | 679 | |
| ykuroda | 0:13a5d365ba16 | 680 | EigenvectorsType& eivecs = solver.m_eivec; |
| ykuroda | 0:13a5d365ba16 | 681 | VectorType& eivals = solver.m_eivalues; |
| ykuroda | 0:13a5d365ba16 | 682 | |
| ykuroda | 0:13a5d365ba16 | 683 | // map the matrix coefficients to [-1:1] to avoid over- and underflow. |
| ykuroda | 0:13a5d365ba16 | 684 | Scalar scale = mat.cwiseAbs().maxCoeff(); |
| ykuroda | 0:13a5d365ba16 | 685 | scale = (std::max)(scale,Scalar(1)); |
| ykuroda | 0:13a5d365ba16 | 686 | MatrixType scaledMat = mat / scale; |
| ykuroda | 0:13a5d365ba16 | 687 | |
| ykuroda | 0:13a5d365ba16 | 688 | // Compute the eigenvalues |
| ykuroda | 0:13a5d365ba16 | 689 | computeRoots(scaledMat,eivals); |
| ykuroda | 0:13a5d365ba16 | 690 | |
| ykuroda | 0:13a5d365ba16 | 691 | // compute the eigen vectors |
| ykuroda | 0:13a5d365ba16 | 692 | if(computeEigenvectors) |
| ykuroda | 0:13a5d365ba16 | 693 | { |
| ykuroda | 0:13a5d365ba16 | 694 | if((eivals(1)-eivals(0))<=abs(eivals(1))*Eigen::NumTraits<Scalar>::epsilon()) |
| ykuroda | 0:13a5d365ba16 | 695 | { |
| ykuroda | 0:13a5d365ba16 | 696 | eivecs.setIdentity(); |
| ykuroda | 0:13a5d365ba16 | 697 | } |
| ykuroda | 0:13a5d365ba16 | 698 | else |
| ykuroda | 0:13a5d365ba16 | 699 | { |
| ykuroda | 0:13a5d365ba16 | 700 | scaledMat.diagonal().array () -= eivals(1); |
| ykuroda | 0:13a5d365ba16 | 701 | Scalar a2 = numext::abs2(scaledMat(0,0)); |
| ykuroda | 0:13a5d365ba16 | 702 | Scalar c2 = numext::abs2(scaledMat(1,1)); |
| ykuroda | 0:13a5d365ba16 | 703 | Scalar b2 = numext::abs2(scaledMat(1,0)); |
| ykuroda | 0:13a5d365ba16 | 704 | if(a2>c2) |
| ykuroda | 0:13a5d365ba16 | 705 | { |
| ykuroda | 0:13a5d365ba16 | 706 | eivecs.col(1) << -scaledMat(1,0), scaledMat(0,0); |
| ykuroda | 0:13a5d365ba16 | 707 | eivecs.col(1) /= sqrt(a2+b2); |
| ykuroda | 0:13a5d365ba16 | 708 | } |
| ykuroda | 0:13a5d365ba16 | 709 | else |
| ykuroda | 0:13a5d365ba16 | 710 | { |
| ykuroda | 0:13a5d365ba16 | 711 | eivecs.col(1) << -scaledMat(1,1), scaledMat(1,0); |
| ykuroda | 0:13a5d365ba16 | 712 | eivecs.col(1) /= sqrt(c2+b2); |
| ykuroda | 0:13a5d365ba16 | 713 | } |
| ykuroda | 0:13a5d365ba16 | 714 | |
| ykuroda | 0:13a5d365ba16 | 715 | eivecs.col(0) << eivecs.col(1).unitOrthogonal(); |
| ykuroda | 0:13a5d365ba16 | 716 | } |
| ykuroda | 0:13a5d365ba16 | 717 | } |
| ykuroda | 0:13a5d365ba16 | 718 | |
| ykuroda | 0:13a5d365ba16 | 719 | // Rescale back to the original size. |
| ykuroda | 0:13a5d365ba16 | 720 | eivals *= scale; |
| ykuroda | 0:13a5d365ba16 | 721 | |
| ykuroda | 0:13a5d365ba16 | 722 | solver.m_info = Success; |
| ykuroda | 0:13a5d365ba16 | 723 | solver.m_isInitialized = true; |
| ykuroda | 0:13a5d365ba16 | 724 | solver.m_eigenvectorsOk = computeEigenvectors; |
| ykuroda | 0:13a5d365ba16 | 725 | } |
| ykuroda | 0:13a5d365ba16 | 726 | }; |
| ykuroda | 0:13a5d365ba16 | 727 | |
| ykuroda | 0:13a5d365ba16 | 728 | } |
| ykuroda | 0:13a5d365ba16 | 729 | |
| ykuroda | 0:13a5d365ba16 | 730 | template<typename MatrixType> |
| ykuroda | 0:13a5d365ba16 | 731 | SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType> |
| ykuroda | 0:13a5d365ba16 | 732 | ::computeDirect(const MatrixType& matrix, int options) |
| ykuroda | 0:13a5d365ba16 | 733 | { |
| ykuroda | 0:13a5d365ba16 | 734 | internal::direct_selfadjoint_eigenvalues<SelfAdjointEigenSolver,Size,NumTraits<Scalar>::IsComplex>::run(*this,matrix,options); |
| ykuroda | 0:13a5d365ba16 | 735 | return *this; |
| ykuroda | 0:13a5d365ba16 | 736 | } |
| ykuroda | 0:13a5d365ba16 | 737 | |
| ykuroda | 0:13a5d365ba16 | 738 | namespace internal { |
| ykuroda | 0:13a5d365ba16 | 739 | template<typename RealScalar, typename Scalar, typename Index> |
| ykuroda | 0:13a5d365ba16 | 740 | static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n) |
| ykuroda | 0:13a5d365ba16 | 741 | { |
| ykuroda | 0:13a5d365ba16 | 742 | using std::abs; |
| ykuroda | 0:13a5d365ba16 | 743 | RealScalar td = (diag[end-1] - diag[end])*RealScalar(0.5); |
| ykuroda | 0:13a5d365ba16 | 744 | RealScalar e = subdiag[end-1]; |
| ykuroda | 0:13a5d365ba16 | 745 | // Note that thanks to scaling, e^2 or td^2 cannot overflow, however they can still |
| ykuroda | 0:13a5d365ba16 | 746 | // underflow thus leading to inf/NaN values when using the following commented code: |
| ykuroda | 0:13a5d365ba16 | 747 | // RealScalar e2 = numext::abs2(subdiag[end-1]); |
| ykuroda | 0:13a5d365ba16 | 748 | // RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2)); |
| ykuroda | 0:13a5d365ba16 | 749 | // This explain the following, somewhat more complicated, version: |
| ykuroda | 0:13a5d365ba16 | 750 | RealScalar mu = diag[end]; |
| ykuroda | 0:13a5d365ba16 | 751 | if(td==0) |
| ykuroda | 0:13a5d365ba16 | 752 | mu -= abs(e); |
| ykuroda | 0:13a5d365ba16 | 753 | else |
| ykuroda | 0:13a5d365ba16 | 754 | { |
| ykuroda | 0:13a5d365ba16 | 755 | RealScalar e2 = numext::abs2(subdiag[end-1]); |
| ykuroda | 0:13a5d365ba16 | 756 | RealScalar h = numext::hypot(td,e); |
| ykuroda | 0:13a5d365ba16 | 757 | if(e2==0) mu -= (e / (td + (td>0 ? 1 : -1))) * (e / h); |
| ykuroda | 0:13a5d365ba16 | 758 | else mu -= e2 / (td + (td>0 ? h : -h)); |
| ykuroda | 0:13a5d365ba16 | 759 | } |
| ykuroda | 0:13a5d365ba16 | 760 | |
| ykuroda | 0:13a5d365ba16 | 761 | RealScalar x = diag[start] - mu; |
| ykuroda | 0:13a5d365ba16 | 762 | RealScalar z = subdiag[start]; |
| ykuroda | 0:13a5d365ba16 | 763 | for (Index k = start; k < end; ++k) |
| ykuroda | 0:13a5d365ba16 | 764 | { |
| ykuroda | 0:13a5d365ba16 | 765 | JacobiRotation<RealScalar> rot; |
| ykuroda | 0:13a5d365ba16 | 766 | rot.makeGivens(x, z); |
| ykuroda | 0:13a5d365ba16 | 767 | |
| ykuroda | 0:13a5d365ba16 | 768 | // do T = G' T G |
| ykuroda | 0:13a5d365ba16 | 769 | RealScalar sdk = rot.s() * diag[k] + rot.c() * subdiag[k]; |
| ykuroda | 0:13a5d365ba16 | 770 | RealScalar dkp1 = rot.s() * subdiag[k] + rot.c() * diag[k+1]; |
| ykuroda | 0:13a5d365ba16 | 771 | |
| ykuroda | 0:13a5d365ba16 | 772 | diag[k] = rot.c() * (rot.c() * diag[k] - rot.s() * subdiag[k]) - rot.s() * (rot.c() * subdiag[k] - rot.s() * diag[k+1]); |
| ykuroda | 0:13a5d365ba16 | 773 | diag[k+1] = rot.s() * sdk + rot.c() * dkp1; |
| ykuroda | 0:13a5d365ba16 | 774 | subdiag[k] = rot.c() * sdk - rot.s() * dkp1; |
| ykuroda | 0:13a5d365ba16 | 775 | |
| ykuroda | 0:13a5d365ba16 | 776 | |
| ykuroda | 0:13a5d365ba16 | 777 | if (k > start) |
| ykuroda | 0:13a5d365ba16 | 778 | subdiag[k - 1] = rot.c() * subdiag[k-1] - rot.s() * z; |
| ykuroda | 0:13a5d365ba16 | 779 | |
| ykuroda | 0:13a5d365ba16 | 780 | x = subdiag[k]; |
| ykuroda | 0:13a5d365ba16 | 781 | |
| ykuroda | 0:13a5d365ba16 | 782 | if (k < end - 1) |
| ykuroda | 0:13a5d365ba16 | 783 | { |
| ykuroda | 0:13a5d365ba16 | 784 | z = -rot.s() * subdiag[k+1]; |
| ykuroda | 0:13a5d365ba16 | 785 | subdiag[k + 1] = rot.c() * subdiag[k+1]; |
| ykuroda | 0:13a5d365ba16 | 786 | } |
| ykuroda | 0:13a5d365ba16 | 787 | |
| ykuroda | 0:13a5d365ba16 | 788 | // apply the givens rotation to the unit matrix Q = Q * G |
| ykuroda | 0:13a5d365ba16 | 789 | if (matrixQ) |
| ykuroda | 0:13a5d365ba16 | 790 | { |
| ykuroda | 0:13a5d365ba16 | 791 | Map<Matrix<Scalar,Dynamic,Dynamic,ColMajor> > q(matrixQ,n,n); |
| ykuroda | 0:13a5d365ba16 | 792 | q.applyOnTheRight(k,k+1,rot); |
| ykuroda | 0:13a5d365ba16 | 793 | } |
| ykuroda | 0:13a5d365ba16 | 794 | } |
| ykuroda | 0:13a5d365ba16 | 795 | } |
| ykuroda | 0:13a5d365ba16 | 796 | |
| ykuroda | 0:13a5d365ba16 | 797 | } // end namespace internal |
| ykuroda | 0:13a5d365ba16 | 798 | |
| ykuroda | 0:13a5d365ba16 | 799 | } // end namespace Eigen |
| ykuroda | 0:13a5d365ba16 | 800 | |
| ykuroda | 0:13a5d365ba16 | 801 | #endif // EIGEN_SELFADJOINTEIGENSOLVER_H |