Eigne Matrix Class Library

Dependents:   Eigen_test Odometry_test AttitudeEstimation_usingTicker MPU9250_Quaternion_Binary_Serial ... more

Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers SelfAdjointEigenSolver.h Source File

SelfAdjointEigenSolver.h

00001 // This file is part of Eigen, a lightweight C++ template library
00002 // for linear algebra.
00003 //
00004 // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
00005 // Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
00006 //
00007 // This Source Code Form is subject to the terms of the Mozilla
00008 // Public License v. 2.0. If a copy of the MPL was not distributed
00009 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
00010 
00011 #ifndef EIGEN_SELFADJOINTEIGENSOLVER_H
00012 #define EIGEN_SELFADJOINTEIGENSOLVER_H
00013 
00014 #include "./Tridiagonalization.h"
00015 
00016 namespace Eigen { 
00017 
00018 template<typename _MatrixType>
00019 class GeneralizedSelfAdjointEigenSolver;
00020 
00021 namespace internal {
00022 template<typename SolverType,int Size,bool IsComplex> struct direct_selfadjoint_eigenvalues;
00023 }
00024 
00025 /** \eigenvalues_module \ingroup Eigenvalues_Module
00026   *
00027   *
00028   * \class SelfAdjointEigenSolver
00029   *
00030   * \brief Computes eigenvalues and eigenvectors of selfadjoint matrices
00031   *
00032   * \tparam _MatrixType the type of the matrix of which we are computing the
00033   * eigendecomposition; this is expected to be an instantiation of the Matrix
00034   * class template.
00035   *
00036   * A matrix \f$ A \f$ is selfadjoint if it equals its adjoint. For real
00037   * matrices, this means that the matrix is symmetric: it equals its
00038   * transpose. This class computes the eigenvalues and eigenvectors of a
00039   * selfadjoint matrix. These are the scalars \f$ \lambda \f$ and vectors
00040   * \f$ v \f$ such that \f$ Av = \lambda v \f$.  The eigenvalues of a
00041   * selfadjoint matrix are always real. If \f$ D \f$ is a diagonal matrix with
00042   * the eigenvalues on the diagonal, and \f$ V \f$ is a matrix with the
00043   * eigenvectors as its columns, then \f$ A = V D V^{-1} \f$ (for selfadjoint
00044   * matrices, the matrix \f$ V \f$ is always invertible). This is called the
00045   * eigendecomposition.
00046   *
00047   * The algorithm exploits the fact that the matrix is selfadjoint, making it
00048   * faster and more accurate than the general purpose eigenvalue algorithms
00049   * implemented in EigenSolver and ComplexEigenSolver.
00050   *
00051   * Only the \b lower \b triangular \b part of the input matrix is referenced.
00052   *
00053   * Call the function compute() to compute the eigenvalues and eigenvectors of
00054   * a given matrix. Alternatively, you can use the
00055   * SelfAdjointEigenSolver(const MatrixType&, int) constructor which computes
00056   * the eigenvalues and eigenvectors at construction time. Once the eigenvalue
00057   * and eigenvectors are computed, they can be retrieved with the eigenvalues()
00058   * and eigenvectors() functions.
00059   *
00060   * The documentation for SelfAdjointEigenSolver(const MatrixType&, int)
00061   * contains an example of the typical use of this class.
00062   *
00063   * To solve the \em generalized eigenvalue problem \f$ Av = \lambda Bv \f$ and
00064   * the likes, see the class GeneralizedSelfAdjointEigenSolver.
00065   *
00066   * \sa MatrixBase::eigenvalues(), class EigenSolver, class ComplexEigenSolver
00067   */
00068 template<typename _MatrixType> class SelfAdjointEigenSolver 
00069 {
00070   public:
00071 
00072     typedef _MatrixType MatrixType;
00073     enum {
00074       Size = MatrixType::RowsAtCompileTime,
00075       ColsAtCompileTime = MatrixType::ColsAtCompileTime,
00076       Options = MatrixType::Options,
00077       MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
00078     };
00079     
00080     /** \brief Scalar type for matrices of type \p _MatrixType. */
00081     typedef typename MatrixType::Scalar Scalar;
00082     typedef typename MatrixType::Index Index;
00083     
00084     typedef Matrix<Scalar,Size,Size,ColMajor,MaxColsAtCompileTime,MaxColsAtCompileTime>  EigenvectorsType ;
00085 
00086     /** \brief Real scalar type for \p _MatrixType.
00087       *
00088       * This is just \c Scalar if #Scalar is real (e.g., \c float or
00089       * \c double), and the type of the real part of \c Scalar if #Scalar is
00090       * complex.
00091       */
00092     typedef typename NumTraits<Scalar>::Real RealScalar;
00093     
00094     friend struct internal::direct_selfadjoint_eigenvalues<SelfAdjointEigenSolver ,Size,NumTraits<Scalar>::IsComplex>;
00095 
00096     /** \brief Type for vector of eigenvalues as returned by eigenvalues().
00097       *
00098       * This is a column vector with entries of type #RealScalar.
00099       * The length of the vector is the size of \p _MatrixType.
00100       */
00101     typedef typename internal::plain_col_type<MatrixType, RealScalar>::type RealVectorType;
00102     typedef Tridiagonalization <MatrixType> TridiagonalizationType ;
00103 
00104     /** \brief Default constructor for fixed-size matrices.
00105       *
00106       * The default constructor is useful in cases in which the user intends to
00107       * perform decompositions via compute(). This constructor
00108       * can only be used if \p _MatrixType is a fixed-size matrix; use
00109       * SelfAdjointEigenSolver(Index) for dynamic-size matrices.
00110       *
00111       * Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver.cpp
00112       * Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver.out
00113       */
00114     SelfAdjointEigenSolver ()
00115         : m_eivec(),
00116           m_eivalues(),
00117           m_subdiag(),
00118           m_isInitialized(false)
00119     { }
00120 
00121     /** \brief Constructor, pre-allocates memory for dynamic-size matrices.
00122       *
00123       * \param [in]  size  Positive integer, size of the matrix whose
00124       * eigenvalues and eigenvectors will be computed.
00125       *
00126       * This constructor is useful for dynamic-size matrices, when the user
00127       * intends to perform decompositions via compute(). The \p size
00128       * parameter is only used as a hint. It is not an error to give a wrong
00129       * \p size, but it may impair performance.
00130       *
00131       * \sa compute() for an example
00132       */
00133     SelfAdjointEigenSolver(Index size)
00134         : m_eivec(size, size),
00135           m_eivalues(size),
00136           m_subdiag(size > 1 ? size - 1 : 1),
00137           m_isInitialized(false)
00138     {}
00139 
00140     /** \brief Constructor; computes eigendecomposition of given matrix.
00141       *
00142       * \param[in]  matrix  Selfadjoint matrix whose eigendecomposition is to
00143       *    be computed. Only the lower triangular part of the matrix is referenced.
00144       * \param[in]  options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.
00145       *
00146       * This constructor calls compute(const MatrixType&, int) to compute the
00147       * eigenvalues of the matrix \p matrix. The eigenvectors are computed if
00148       * \p options equals #ComputeEigenvectors.
00149       *
00150       * Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.cpp
00151       * Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.out
00152       *
00153       * \sa compute(const MatrixType&, int)
00154       */
00155     SelfAdjointEigenSolver(const MatrixType& matrix, int options = ComputeEigenvectors)
00156       : m_eivec(matrix.rows(), matrix.cols()),
00157         m_eivalues(matrix.cols()),
00158         m_subdiag(matrix.rows() > 1 ? matrix.rows() - 1 : 1),
00159         m_isInitialized(false)
00160     {
00161       compute(matrix, options);
00162     }
00163 
00164     /** \brief Computes eigendecomposition of given matrix.
00165       *
00166       * \param[in]  matrix  Selfadjoint matrix whose eigendecomposition is to
00167       *    be computed. Only the lower triangular part of the matrix is referenced.
00168       * \param[in]  options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.
00169       * \returns    Reference to \c *this
00170       *
00171       * This function computes the eigenvalues of \p matrix.  The eigenvalues()
00172       * function can be used to retrieve them.  If \p options equals #ComputeEigenvectors,
00173       * then the eigenvectors are also computed and can be retrieved by
00174       * calling eigenvectors().
00175       *
00176       * This implementation uses a symmetric QR algorithm. The matrix is first
00177       * reduced to tridiagonal form using the Tridiagonalization class. The
00178       * tridiagonal matrix is then brought to diagonal form with implicit
00179       * symmetric QR steps with Wilkinson shift. Details can be found in
00180       * Section 8.3 of Golub \& Van Loan, <i>%Matrix Computations</i>.
00181       *
00182       * The cost of the computation is about \f$ 9n^3 \f$ if the eigenvectors
00183       * are required and \f$ 4n^3/3 \f$ if they are not required.
00184       *
00185       * This method reuses the memory in the SelfAdjointEigenSolver object that
00186       * was allocated when the object was constructed, if the size of the
00187       * matrix does not change.
00188       *
00189       * Example: \include SelfAdjointEigenSolver_compute_MatrixType.cpp
00190       * Output: \verbinclude SelfAdjointEigenSolver_compute_MatrixType.out
00191       *
00192       * \sa SelfAdjointEigenSolver(const MatrixType&, int)
00193       */
00194     SelfAdjointEigenSolver & compute(const MatrixType& matrix, int options = ComputeEigenvectors);
00195     
00196     /** \brief Computes eigendecomposition of given matrix using a direct algorithm
00197       *
00198       * This is a variant of compute(const MatrixType&, int options) which
00199       * directly solves the underlying polynomial equation.
00200       * 
00201       * Currently only 3x3 matrices for which the sizes are known at compile time are supported (e.g., Matrix3d).
00202       * 
00203       * This method is usually significantly faster than the QR algorithm
00204       * but it might also be less accurate. It is also worth noting that
00205       * for 3x3 matrices it involves trigonometric operations which are
00206       * not necessarily available for all scalar types.
00207       *
00208       * \sa compute(const MatrixType&, int options)
00209       */
00210     SelfAdjointEigenSolver & computeDirect(const MatrixType& matrix, int options = ComputeEigenvectors);
00211 
00212     /** \brief Returns the eigenvectors of given matrix.
00213       *
00214       * \returns  A const reference to the matrix whose columns are the eigenvectors.
00215       *
00216       * \pre The eigenvectors have been computed before.
00217       *
00218       * Column \f$ k \f$ of the returned matrix is an eigenvector corresponding
00219       * to eigenvalue number \f$ k \f$ as returned by eigenvalues().  The
00220       * eigenvectors are normalized to have (Euclidean) norm equal to one. If
00221       * this object was used to solve the eigenproblem for the selfadjoint
00222       * matrix \f$ A \f$, then the matrix returned by this function is the
00223       * matrix \f$ V \f$ in the eigendecomposition \f$ A = V D V^{-1} \f$.
00224       *
00225       * Example: \include SelfAdjointEigenSolver_eigenvectors.cpp
00226       * Output: \verbinclude SelfAdjointEigenSolver_eigenvectors.out
00227       *
00228       * \sa eigenvalues()
00229       */
00230     const EigenvectorsType & eigenvectors() const
00231     {
00232       eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
00233       eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
00234       return m_eivec;
00235     }
00236 
00237     /** \brief Returns the eigenvalues of given matrix.
00238       *
00239       * \returns A const reference to the column vector containing the eigenvalues.
00240       *
00241       * \pre The eigenvalues have been computed before.
00242       *
00243       * The eigenvalues are repeated according to their algebraic multiplicity,
00244       * so there are as many eigenvalues as rows in the matrix. The eigenvalues
00245       * are sorted in increasing order.
00246       *
00247       * Example: \include SelfAdjointEigenSolver_eigenvalues.cpp
00248       * Output: \verbinclude SelfAdjointEigenSolver_eigenvalues.out
00249       *
00250       * \sa eigenvectors(), MatrixBase::eigenvalues()
00251       */
00252     const RealVectorType& eigenvalues() const
00253     {
00254       eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
00255       return m_eivalues;
00256     }
00257 
00258     /** \brief Computes the positive-definite square root of the matrix.
00259       *
00260       * \returns the positive-definite square root of the matrix
00261       *
00262       * \pre The eigenvalues and eigenvectors of a positive-definite matrix
00263       * have been computed before.
00264       *
00265       * The square root of a positive-definite matrix \f$ A \f$ is the
00266       * positive-definite matrix whose square equals \f$ A \f$. This function
00267       * uses the eigendecomposition \f$ A = V D V^{-1} \f$ to compute the
00268       * square root as \f$ A^{1/2} = V D^{1/2} V^{-1} \f$.
00269       *
00270       * Example: \include SelfAdjointEigenSolver_operatorSqrt.cpp
00271       * Output: \verbinclude SelfAdjointEigenSolver_operatorSqrt.out
00272       *
00273       * \sa operatorInverseSqrt(),
00274       *     \ref MatrixFunctions_Module "MatrixFunctions Module"
00275       */
00276     MatrixType operatorSqrt() const
00277     {
00278       eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
00279       eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
00280       return m_eivec * m_eivalues.cwiseSqrt().asDiagonal() * m_eivec.adjoint();
00281     }
00282 
00283     /** \brief Computes the inverse square root of the matrix.
00284       *
00285       * \returns the inverse positive-definite square root of the matrix
00286       *
00287       * \pre The eigenvalues and eigenvectors of a positive-definite matrix
00288       * have been computed before.
00289       *
00290       * This function uses the eigendecomposition \f$ A = V D V^{-1} \f$ to
00291       * compute the inverse square root as \f$ V D^{-1/2} V^{-1} \f$. This is
00292       * cheaper than first computing the square root with operatorSqrt() and
00293       * then its inverse with MatrixBase::inverse().
00294       *
00295       * Example: \include SelfAdjointEigenSolver_operatorInverseSqrt.cpp
00296       * Output: \verbinclude SelfAdjointEigenSolver_operatorInverseSqrt.out
00297       *
00298       * \sa operatorSqrt(), MatrixBase::inverse(),
00299       *     \ref MatrixFunctions_Module "MatrixFunctions Module"
00300       */
00301     MatrixType operatorInverseSqrt() const
00302     {
00303       eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
00304       eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
00305       return m_eivec * m_eivalues.cwiseInverse().cwiseSqrt().asDiagonal() * m_eivec.adjoint();
00306     }
00307 
00308     /** \brief Reports whether previous computation was successful.
00309       *
00310       * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
00311       */
00312     ComputationInfo info() const
00313     {
00314       eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
00315       return m_info;
00316     }
00317 
00318     /** \brief Maximum number of iterations.
00319       *
00320       * The algorithm terminates if it does not converge within m_maxIterations * n iterations, where n
00321       * denotes the size of the matrix. This value is currently set to 30 (copied from LAPACK).
00322       */
00323     static const int m_maxIterations = 30;
00324 
00325     #ifdef EIGEN2_SUPPORT
00326     SelfAdjointEigenSolver(const MatrixType& matrix, bool computeEigenvectors)
00327       : m_eivec(matrix.rows(), matrix.cols()),
00328         m_eivalues(matrix.cols()),
00329         m_subdiag(matrix.rows() > 1 ? matrix.rows() - 1 : 1),
00330         m_isInitialized(false)
00331     {
00332       compute(matrix, computeEigenvectors);
00333     }
00334     
00335     SelfAdjointEigenSolver(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors = true)
00336         : m_eivec(matA.cols(), matA.cols()),
00337           m_eivalues(matA.cols()),
00338           m_subdiag(matA.cols() > 1 ? matA.cols() - 1 : 1),
00339           m_isInitialized(false)
00340     {
00341       static_cast<GeneralizedSelfAdjointEigenSolver<MatrixType> *>(this)->compute(matA, matB, computeEigenvectors ? ComputeEigenvectors : EigenvaluesOnly);
00342     }
00343     
00344     void compute(const MatrixType& matrix, bool computeEigenvectors)
00345     {
00346       compute(matrix, computeEigenvectors ? ComputeEigenvectors : EigenvaluesOnly);
00347     }
00348 
00349     void compute(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors = true)
00350     {
00351       compute(matA, matB, computeEigenvectors ? ComputeEigenvectors : EigenvaluesOnly);
00352     }
00353     #endif // EIGEN2_SUPPORT
00354 
00355   protected:
00356     static void check_template_parameters()
00357     {
00358       EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
00359     }
00360     
00361     EigenvectorsType m_eivec;
00362     RealVectorType m_eivalues;
00363     typename TridiagonalizationType::SubDiagonalType m_subdiag;
00364     ComputationInfo m_info;
00365     bool m_isInitialized;
00366     bool m_eigenvectorsOk;
00367 };
00368 
00369 /** \internal
00370   *
00371   * \eigenvalues_module \ingroup Eigenvalues_Module
00372   *
00373   * Performs a QR step on a tridiagonal symmetric matrix represented as a
00374   * pair of two vectors \a diag and \a subdiag.
00375   *
00376   * \param matA the input selfadjoint matrix
00377   * \param hCoeffs returned Householder coefficients
00378   *
00379   * For compilation efficiency reasons, this procedure does not use eigen expression
00380   * for its arguments.
00381   *
00382   * Implemented from Golub's "Matrix Computations", algorithm 8.3.2:
00383   * "implicit symmetric QR step with Wilkinson shift"
00384   */
00385 namespace internal {
00386 template<typename RealScalar, typename Scalar, typename Index>
00387 static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n);
00388 }
00389 
00390 template<typename MatrixType>
00391 SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
00392 ::compute(const MatrixType& matrix, int options)
00393 {
00394   check_template_parameters();
00395   
00396   using std::abs;
00397   eigen_assert(matrix.cols() == matrix.rows());
00398   eigen_assert((options&~(EigVecMask|GenEigMask))==0
00399           && (options&EigVecMask)!=EigVecMask
00400           && "invalid option parameter");
00401   bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
00402   Index n = matrix.cols();
00403   m_eivalues.resize(n,1);
00404 
00405   if(n==1)
00406   {
00407     m_eivalues.coeffRef(0,0) = numext::real(matrix.coeff(0,0));
00408     if(computeEigenvectors)
00409       m_eivec.setOnes(n,n);
00410     m_info = Success;
00411     m_isInitialized = true;
00412     m_eigenvectorsOk = computeEigenvectors;
00413     return *this;
00414   }
00415 
00416   // declare some aliases
00417   RealVectorType& diag = m_eivalues;
00418   EigenvectorsType & mat = m_eivec;
00419 
00420   // map the matrix coefficients to [-1:1] to avoid over- and underflow.
00421   mat = matrix.template triangularView<Lower>();
00422   RealScalar scale = mat.cwiseAbs().maxCoeff();
00423   if(scale==RealScalar(0)) scale = RealScalar(1);
00424   mat.template triangularView<Lower>() /= scale;
00425   m_subdiag.resize(n-1);
00426   internal::tridiagonalization_inplace(mat, diag, m_subdiag, computeEigenvectors);
00427   
00428   Index end = n-1;
00429   Index start = 0;
00430   Index iter = 0; // total number of iterations
00431 
00432   while (end>0)
00433   {
00434     for (Index i = start; i<end; ++i)
00435       if (internal::isMuchSmallerThan(abs(m_subdiag[i]),(abs(diag[i])+abs(diag[i+1]))))
00436         m_subdiag[i] = 0;
00437 
00438     // find the largest unreduced block
00439     while (end>0 && m_subdiag[end-1]==0)
00440     {
00441       end--;
00442     }
00443     if (end<=0)
00444       break;
00445 
00446     // if we spent too many iterations, we give up
00447     iter++;
00448     if(iter > m_maxIterations * n) break;
00449 
00450     start = end - 1;
00451     while (start>0 && m_subdiag[start-1]!=0)
00452       start--;
00453 
00454     internal::tridiagonal_qr_step(diag.data(), m_subdiag.data(), start, end, computeEigenvectors ? m_eivec.data() : (Scalar*)0, n);
00455   }
00456 
00457   if (iter <= m_maxIterations * n)
00458     m_info = Success;
00459   else
00460     m_info = NoConvergence;
00461 
00462   // Sort eigenvalues and corresponding vectors.
00463   // TODO make the sort optional ?
00464   // TODO use a better sort algorithm !!
00465   if (m_info == Success)
00466   {
00467     for (Index i = 0; i < n-1; ++i)
00468     {
00469       Index k;
00470       m_eivalues.segment(i,n-i).minCoeff(&k);
00471       if (k > 0)
00472       {
00473         std::swap(m_eivalues[i], m_eivalues[k+i]);
00474         if(computeEigenvectors)
00475           m_eivec.col(i).swap(m_eivec.col(k+i));
00476       }
00477     }
00478   }
00479   
00480   // scale back the eigen values
00481   m_eivalues *= scale;
00482 
00483   m_isInitialized = true;
00484   m_eigenvectorsOk = computeEigenvectors;
00485   return *this;
00486 }
00487 
00488 
00489 namespace internal {
00490   
00491 template<typename SolverType,int Size,bool IsComplex> struct direct_selfadjoint_eigenvalues
00492 {
00493   static inline void run(SolverType& eig, const typename SolverType::MatrixType& A, int options)
00494   { eig.compute(A,options); }
00495 };
00496 
00497 template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3,false>
00498 {
00499   typedef typename SolverType::MatrixType MatrixType;
00500   typedef typename SolverType::RealVectorType VectorType;
00501   typedef typename SolverType::Scalar Scalar;
00502   typedef typename MatrixType::Index Index;
00503   typedef typename SolverType::EigenvectorsType EigenvectorsType;
00504   
00505   /** \internal
00506    * Computes the roots of the characteristic polynomial of \a m.
00507    * For numerical stability m.trace() should be near zero and to avoid over- or underflow m should be normalized.
00508    */
00509   static inline void computeRoots(const MatrixType& m, VectorType& roots)
00510   {
00511     using std::sqrt;
00512     using std::atan2;
00513     using std::cos;
00514     using std::sin;
00515     const Scalar s_inv3 = Scalar(1.0)/Scalar(3.0);
00516     const Scalar s_sqrt3 = sqrt(Scalar(3.0));
00517 
00518     // The characteristic equation is x^3 - c2*x^2 + c1*x - c0 = 0.  The
00519     // eigenvalues are the roots to this equation, all guaranteed to be
00520     // real-valued, because the matrix is symmetric.
00521     Scalar c0 = m(0,0)*m(1,1)*m(2,2) + Scalar(2)*m(1,0)*m(2,0)*m(2,1) - m(0,0)*m(2,1)*m(2,1) - m(1,1)*m(2,0)*m(2,0) - m(2,2)*m(1,0)*m(1,0);
00522     Scalar c1 = m(0,0)*m(1,1) - m(1,0)*m(1,0) + m(0,0)*m(2,2) - m(2,0)*m(2,0) + m(1,1)*m(2,2) - m(2,1)*m(2,1);
00523     Scalar c2 = m(0,0) + m(1,1) + m(2,2);
00524 
00525     // Construct the parameters used in classifying the roots of the equation
00526     // and in solving the equation for the roots in closed form.
00527     Scalar c2_over_3 = c2*s_inv3;
00528     Scalar a_over_3 = (c2*c2_over_3 - c1)*s_inv3;
00529     if(a_over_3<Scalar(0))
00530       a_over_3 = Scalar(0);
00531 
00532     Scalar half_b = Scalar(0.5)*(c0 + c2_over_3*(Scalar(2)*c2_over_3*c2_over_3 - c1));
00533 
00534     Scalar q = a_over_3*a_over_3*a_over_3 - half_b*half_b;
00535     if(q<Scalar(0))
00536       q = Scalar(0);
00537 
00538     // Compute the eigenvalues by solving for the roots of the polynomial.
00539     Scalar rho = sqrt(a_over_3);
00540     Scalar theta = atan2(sqrt(q),half_b)*s_inv3;  // since sqrt(q) > 0, atan2 is in [0, pi] and theta is in [0, pi/3]
00541     Scalar cos_theta = cos(theta);
00542     Scalar sin_theta = sin(theta);
00543     // roots are already sorted, since cos is monotonically decreasing on [0, pi]
00544     roots(0) = c2_over_3 - rho*(cos_theta + s_sqrt3*sin_theta); // == 2*rho*cos(theta+2pi/3)
00545     roots(1) = c2_over_3 - rho*(cos_theta - s_sqrt3*sin_theta); // == 2*rho*cos(theta+ pi/3)
00546     roots(2) = c2_over_3 + Scalar(2)*rho*cos_theta;
00547   }
00548 
00549   static inline bool extract_kernel(MatrixType& mat, Ref<VectorType> res, Ref<VectorType> representative)
00550   {
00551     using std::abs;
00552     Index i0;
00553     // Find non-zero column i0 (by construction, there must exist a non zero coefficient on the diagonal):
00554     mat.diagonal().cwiseAbs().maxCoeff(&i0);
00555     // mat.col(i0) is a good candidate for an orthogonal vector to the current eigenvector,
00556     // so let's save it:
00557     representative = mat.col(i0);
00558     Scalar n0, n1;
00559     VectorType c0, c1;
00560     n0 = (c0 = representative.cross(mat.col((i0+1)%3))).squaredNorm();
00561     n1 = (c1 = representative.cross(mat.col((i0+2)%3))).squaredNorm();
00562     if(n0>n1) res = c0/std::sqrt(n0);
00563     else      res = c1/std::sqrt(n1);
00564 
00565     return true;
00566   }
00567 
00568   static inline void run(SolverType& solver, const MatrixType& mat, int options)
00569   {
00570     eigen_assert(mat.cols() == 3 && mat.cols() == mat.rows());
00571     eigen_assert((options&~(EigVecMask|GenEigMask))==0
00572             && (options&EigVecMask)!=EigVecMask
00573             && "invalid option parameter");
00574     bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
00575     
00576     EigenvectorsType& eivecs = solver.m_eivec;
00577     VectorType& eivals = solver.m_eivalues;
00578   
00579     // Shift the matrix to the mean eigenvalue and map the matrix coefficients to [-1:1] to avoid over- and underflow.
00580     Scalar shift = mat.trace() / Scalar(3);
00581     // TODO Avoid this copy. Currently it is necessary to suppress bogus values when determining maxCoeff and for computing the eigenvectors later
00582     MatrixType scaledMat = mat.template selfadjointView<Lower>();
00583     scaledMat.diagonal().array() -= shift;
00584     Scalar scale = scaledMat.cwiseAbs().maxCoeff();
00585     if(scale > 0) scaledMat /= scale;   // TODO for scale==0 we could save the remaining operations
00586 
00587     // compute the eigenvalues
00588     computeRoots(scaledMat,eivals);
00589 
00590     // compute the eigenvectors
00591     if(computeEigenvectors)
00592     {
00593       if((eivals(2)-eivals(0))<=Eigen::NumTraits<Scalar>::epsilon())
00594       {
00595         // All three eigenvalues are numerically the same
00596         eivecs.setIdentity();
00597       }
00598       else
00599       {
00600         MatrixType tmp;
00601         tmp = scaledMat;
00602 
00603         // Compute the eigenvector of the most distinct eigenvalue
00604         Scalar d0 = eivals(2) - eivals(1);
00605         Scalar d1 = eivals(1) - eivals(0);
00606         Index k(0), l(2);
00607         if(d0 > d1)
00608         {
00609           std::swap(k,l);
00610           d0 = d1;
00611         }
00612 
00613         // Compute the eigenvector of index k
00614         {
00615           tmp.diagonal().array () -= eivals(k);
00616           // By construction, 'tmp' is of rank 2, and its kernel corresponds to the respective eigenvector.
00617           extract_kernel(tmp, eivecs.col(k), eivecs.col(l));
00618         }
00619 
00620         // Compute eigenvector of index l
00621         if(d0<=2*Eigen::NumTraits<Scalar>::epsilon()*d1)
00622         {
00623           // If d0 is too small, then the two other eigenvalues are numerically the same,
00624           // and thus we only have to ortho-normalize the near orthogonal vector we saved above.
00625           eivecs.col(l) -= eivecs.col(k).dot(eivecs.col(l))*eivecs.col(l);
00626           eivecs.col(l).normalize();
00627         }
00628         else
00629         {
00630           tmp = scaledMat;
00631           tmp.diagonal().array () -= eivals(l);
00632 
00633           VectorType dummy;
00634           extract_kernel(tmp, eivecs.col(l), dummy);
00635         }
00636 
00637         // Compute last eigenvector from the other two
00638         eivecs.col(1) = eivecs.col(2).cross(eivecs.col(0)).normalized();
00639       }
00640     }
00641 
00642     // Rescale back to the original size.
00643     eivals *= scale;
00644     eivals.array() += shift;
00645     
00646     solver.m_info = Success;
00647     solver.m_isInitialized = true;
00648     solver.m_eigenvectorsOk = computeEigenvectors;
00649   }
00650 };
00651 
00652 // 2x2 direct eigenvalues decomposition, code from Hauke Heibel
00653 template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2,false>
00654 {
00655   typedef typename SolverType::MatrixType MatrixType;
00656   typedef typename SolverType::RealVectorType VectorType;
00657   typedef typename SolverType::Scalar Scalar;
00658   typedef typename SolverType::EigenvectorsType EigenvectorsType;
00659   
00660   static inline void computeRoots(const MatrixType& m, VectorType& roots)
00661   {
00662     using std::sqrt;
00663     const Scalar t0 = Scalar(0.5) * sqrt( numext::abs2(m(0,0)-m(1,1)) + Scalar(4)*numext::abs2(m(1,0)));
00664     const Scalar t1 = Scalar(0.5) * (m(0,0) + m(1,1));
00665     roots(0) = t1 - t0;
00666     roots(1) = t1 + t0;
00667   }
00668   
00669   static inline void run(SolverType& solver, const MatrixType& mat, int options)
00670   {
00671     using std::sqrt;
00672     using std::abs;
00673 
00674     eigen_assert(mat.cols() == 2 && mat.cols() == mat.rows());
00675     eigen_assert((options&~(EigVecMask|GenEigMask))==0
00676             && (options&EigVecMask)!=EigVecMask
00677             && "invalid option parameter");
00678     bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
00679     
00680     EigenvectorsType& eivecs = solver.m_eivec;
00681     VectorType& eivals = solver.m_eivalues;
00682   
00683     // map the matrix coefficients to [-1:1] to avoid over- and underflow.
00684     Scalar scale = mat.cwiseAbs().maxCoeff();
00685     scale = (std::max)(scale,Scalar(1));
00686     MatrixType scaledMat = mat / scale;
00687     
00688     // Compute the eigenvalues
00689     computeRoots(scaledMat,eivals);
00690     
00691     // compute the eigen vectors
00692     if(computeEigenvectors)
00693     {
00694       if((eivals(1)-eivals(0))<=abs(eivals(1))*Eigen::NumTraits<Scalar>::epsilon())
00695       {
00696         eivecs.setIdentity();
00697       }
00698       else
00699       {
00700         scaledMat.diagonal().array () -= eivals(1);
00701         Scalar a2 = numext::abs2(scaledMat(0,0));
00702         Scalar c2 = numext::abs2(scaledMat(1,1));
00703         Scalar b2 = numext::abs2(scaledMat(1,0));
00704         if(a2>c2)
00705         {
00706           eivecs.col(1) << -scaledMat(1,0), scaledMat(0,0);
00707           eivecs.col(1) /= sqrt(a2+b2);
00708         }
00709         else
00710         {
00711           eivecs.col(1) << -scaledMat(1,1), scaledMat(1,0);
00712           eivecs.col(1) /= sqrt(c2+b2);
00713         }
00714 
00715         eivecs.col(0) << eivecs.col(1).unitOrthogonal();
00716       }
00717     }
00718     
00719     // Rescale back to the original size.
00720     eivals *= scale;
00721     
00722     solver.m_info = Success;
00723     solver.m_isInitialized = true;
00724     solver.m_eigenvectorsOk = computeEigenvectors;
00725   }
00726 };
00727 
00728 }
00729 
00730 template<typename MatrixType>
00731 SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
00732 ::computeDirect(const MatrixType& matrix, int options)
00733 {
00734   internal::direct_selfadjoint_eigenvalues<SelfAdjointEigenSolver,Size,NumTraits<Scalar>::IsComplex>::run(*this,matrix,options);
00735   return *this;
00736 }
00737 
00738 namespace internal {
00739 template<typename RealScalar, typename Scalar, typename Index>
00740 static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n)
00741 {
00742   using std::abs;
00743   RealScalar td = (diag[end-1] - diag[end])*RealScalar(0.5);
00744   RealScalar e = subdiag[end-1];
00745   // Note that thanks to scaling, e^2 or td^2 cannot overflow, however they can still
00746   // underflow thus leading to inf/NaN values when using the following commented code:
00747 //   RealScalar e2 = numext::abs2(subdiag[end-1]);
00748 //   RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2));
00749   // This explain the following, somewhat more complicated, version:
00750   RealScalar mu = diag[end];
00751   if(td==0)
00752     mu -= abs(e);
00753   else
00754   {
00755     RealScalar e2 = numext::abs2(subdiag[end-1]);
00756     RealScalar h = numext::hypot(td,e);
00757     if(e2==0)  mu -= (e / (td + (td>0 ? 1 : -1))) * (e / h);
00758     else       mu -= e2 / (td + (td>0 ? h : -h));
00759   }
00760   
00761   RealScalar x = diag[start] - mu;
00762   RealScalar z = subdiag[start];
00763   for (Index k = start; k < end; ++k)
00764   {
00765     JacobiRotation<RealScalar> rot;
00766     rot.makeGivens(x, z);
00767 
00768     // do T = G' T G
00769     RealScalar sdk = rot.s() * diag[k] + rot.c() * subdiag[k];
00770     RealScalar dkp1 = rot.s() * subdiag[k] + rot.c() * diag[k+1];
00771 
00772     diag[k] = rot.c() * (rot.c() * diag[k] - rot.s() * subdiag[k]) - rot.s() * (rot.c() * subdiag[k] - rot.s() * diag[k+1]);
00773     diag[k+1] = rot.s() * sdk + rot.c() * dkp1;
00774     subdiag[k] = rot.c() * sdk - rot.s() * dkp1;
00775     
00776 
00777     if (k > start)
00778       subdiag[k - 1] = rot.c() * subdiag[k-1] - rot.s() * z;
00779 
00780     x = subdiag[k];
00781 
00782     if (k < end - 1)
00783     {
00784       z = -rot.s() * subdiag[k+1];
00785       subdiag[k + 1] = rot.c() * subdiag[k+1];
00786     }
00787     
00788     // apply the givens rotation to the unit matrix Q = Q * G
00789     if (matrixQ)
00790     {
00791       Map<Matrix<Scalar,Dynamic,Dynamic,ColMajor> > q(matrixQ,n,n);
00792       q.applyOnTheRight(k,k+1,rot);
00793     }
00794   }
00795 }
00796 
00797 } // end namespace internal
00798 
00799 } // end namespace Eigen
00800 
00801 #endif // EIGEN_SELFADJOINTEIGENSOLVER_H