Important changes to repositories hosted on mbed.com
Mbed hosted mercurial repositories are deprecated and are due to be permanently deleted in July 2026.
To keep a copy of this software download the repository Zip archive or clone locally using Mercurial.
It is also possible to export all your personal repositories from the account settings page.
FullPivLU.h
00001 // This file is part of Eigen, a lightweight C++ template library 00002 // for linear algebra. 00003 // 00004 // Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com> 00005 // 00006 // This Source Code Form is subject to the terms of the Mozilla 00007 // Public License v. 2.0. If a copy of the MPL was not distributed 00008 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. 00009 00010 #ifndef EIGEN_LU_H 00011 #define EIGEN_LU_H 00012 00013 namespace Eigen { 00014 00015 /** \ingroup LU_Module 00016 * 00017 * \class FullPivLU 00018 * 00019 * \brief LU decomposition of a matrix with complete pivoting, and related features 00020 * 00021 * \param MatrixType the type of the matrix of which we are computing the LU decomposition 00022 * 00023 * This class represents a LU decomposition of any matrix, with complete pivoting: the matrix A is 00024 * decomposed as \f$ A = P^{-1} L U Q^{-1} \f$ where L is unit-lower-triangular, U is 00025 * upper-triangular, and P and Q are permutation matrices. This is a rank-revealing LU 00026 * decomposition. The eigenvalues (diagonal coefficients) of U are sorted in such a way that any 00027 * zeros are at the end. 00028 * 00029 * This decomposition provides the generic approach to solving systems of linear equations, computing 00030 * the rank, invertibility, inverse, kernel, and determinant. 00031 * 00032 * This LU decomposition is very stable and well tested with large matrices. However there are use cases where the SVD 00033 * decomposition is inherently more stable and/or flexible. For example, when computing the kernel of a matrix, 00034 * working with the SVD allows to select the smallest singular values of the matrix, something that 00035 * the LU decomposition doesn't see. 00036 * 00037 * The data of the LU decomposition can be directly accessed through the methods matrixLU(), 00038 * permutationP(), permutationQ(). 00039 * 00040 * As an exemple, here is how the original matrix can be retrieved: 00041 * \include class_FullPivLU.cpp 00042 * Output: \verbinclude class_FullPivLU.out 00043 * 00044 * \sa MatrixBase::fullPivLu(), MatrixBase::determinant(), MatrixBase::inverse() 00045 */ 00046 template<typename _MatrixType> class FullPivLU 00047 { 00048 public: 00049 typedef _MatrixType MatrixType; 00050 enum { 00051 RowsAtCompileTime = MatrixType::RowsAtCompileTime, 00052 ColsAtCompileTime = MatrixType::ColsAtCompileTime, 00053 Options = MatrixType::Options, 00054 MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, 00055 MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime 00056 }; 00057 typedef typename MatrixType::Scalar Scalar; 00058 typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar; 00059 typedef typename internal::traits<MatrixType>::StorageKind StorageKind; 00060 typedef typename MatrixType::Index Index; 00061 typedef typename internal::plain_row_type<MatrixType, Index>::type IntRowVectorType; 00062 typedef typename internal::plain_col_type<MatrixType, Index>::type IntColVectorType; 00063 typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationQType ; 00064 typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationPType ; 00065 00066 /** 00067 * \brief Default Constructor. 00068 * 00069 * The default constructor is useful in cases in which the user intends to 00070 * perform decompositions via LU::compute(const MatrixType&). 00071 */ 00072 FullPivLU(); 00073 00074 /** \brief Default Constructor with memory preallocation 00075 * 00076 * Like the default constructor but with preallocation of the internal data 00077 * according to the specified problem \a size. 00078 * \sa FullPivLU() 00079 */ 00080 FullPivLU(Index rows, Index cols); 00081 00082 /** Constructor. 00083 * 00084 * \param matrix the matrix of which to compute the LU decomposition. 00085 * It is required to be nonzero. 00086 */ 00087 FullPivLU(const MatrixType& matrix); 00088 00089 /** Computes the LU decomposition of the given matrix. 00090 * 00091 * \param matrix the matrix of which to compute the LU decomposition. 00092 * It is required to be nonzero. 00093 * 00094 * \returns a reference to *this 00095 */ 00096 FullPivLU& compute(const MatrixType& matrix); 00097 00098 /** \returns the LU decomposition matrix: the upper-triangular part is U, the 00099 * unit-lower-triangular part is L (at least for square matrices; in the non-square 00100 * case, special care is needed, see the documentation of class FullPivLU). 00101 * 00102 * \sa matrixL(), matrixU() 00103 */ 00104 inline const MatrixType& matrixLU () const 00105 { 00106 eigen_assert(m_isInitialized && "LU is not initialized."); 00107 return m_lu; 00108 } 00109 00110 /** \returns the number of nonzero pivots in the LU decomposition. 00111 * Here nonzero is meant in the exact sense, not in a fuzzy sense. 00112 * So that notion isn't really intrinsically interesting, but it is 00113 * still useful when implementing algorithms. 00114 * 00115 * \sa rank() 00116 */ 00117 inline Index nonzeroPivots () const 00118 { 00119 eigen_assert(m_isInitialized && "LU is not initialized."); 00120 return m_nonzero_pivots; 00121 } 00122 00123 /** \returns the absolute value of the biggest pivot, i.e. the biggest 00124 * diagonal coefficient of U. 00125 */ 00126 RealScalar maxPivot () const { return m_maxpivot; } 00127 00128 /** \returns the permutation matrix P 00129 * 00130 * \sa permutationQ() 00131 */ 00132 inline const PermutationPType & permutationP () const 00133 { 00134 eigen_assert(m_isInitialized && "LU is not initialized."); 00135 return m_p; 00136 } 00137 00138 /** \returns the permutation matrix Q 00139 * 00140 * \sa permutationP() 00141 */ 00142 inline const PermutationQType & permutationQ () const 00143 { 00144 eigen_assert(m_isInitialized && "LU is not initialized."); 00145 return m_q; 00146 } 00147 00148 /** \returns the kernel of the matrix, also called its null-space. The columns of the returned matrix 00149 * will form a basis of the kernel. 00150 * 00151 * \note If the kernel has dimension zero, then the returned matrix is a column-vector filled with zeros. 00152 * 00153 * \note This method has to determine which pivots should be considered nonzero. 00154 * For that, it uses the threshold value that you can control by calling 00155 * setThreshold(const RealScalar&). 00156 * 00157 * Example: \include FullPivLU_kernel.cpp 00158 * Output: \verbinclude FullPivLU_kernel.out 00159 * 00160 * \sa image() 00161 */ 00162 inline const internal::kernel_retval<FullPivLU> kernel () const 00163 { 00164 eigen_assert(m_isInitialized && "LU is not initialized."); 00165 return internal::kernel_retval<FullPivLU>(*this); 00166 } 00167 00168 /** \returns the image of the matrix, also called its column-space. The columns of the returned matrix 00169 * will form a basis of the kernel. 00170 * 00171 * \param originalMatrix the original matrix, of which *this is the LU decomposition. 00172 * The reason why it is needed to pass it here, is that this allows 00173 * a large optimization, as otherwise this method would need to reconstruct it 00174 * from the LU decomposition. 00175 * 00176 * \note If the image has dimension zero, then the returned matrix is a column-vector filled with zeros. 00177 * 00178 * \note This method has to determine which pivots should be considered nonzero. 00179 * For that, it uses the threshold value that you can control by calling 00180 * setThreshold(const RealScalar&). 00181 * 00182 * Example: \include FullPivLU_image.cpp 00183 * Output: \verbinclude FullPivLU_image.out 00184 * 00185 * \sa kernel() 00186 */ 00187 inline const internal::image_retval<FullPivLU> 00188 image (const MatrixType& originalMatrix) const 00189 { 00190 eigen_assert(m_isInitialized && "LU is not initialized."); 00191 return internal::image_retval<FullPivLU>(*this, originalMatrix); 00192 } 00193 00194 /** \return a solution x to the equation Ax=b, where A is the matrix of which 00195 * *this is the LU decomposition. 00196 * 00197 * \param b the right-hand-side of the equation to solve. Can be a vector or a matrix, 00198 * the only requirement in order for the equation to make sense is that 00199 * b.rows()==A.rows(), where A is the matrix of which *this is the LU decomposition. 00200 * 00201 * \returns a solution. 00202 * 00203 * \note_about_checking_solutions 00204 * 00205 * \note_about_arbitrary_choice_of_solution 00206 * \note_about_using_kernel_to_study_multiple_solutions 00207 * 00208 * Example: \include FullPivLU_solve.cpp 00209 * Output: \verbinclude FullPivLU_solve.out 00210 * 00211 * \sa TriangularView::solve(), kernel(), inverse() 00212 */ 00213 template<typename Rhs> 00214 inline const internal::solve_retval<FullPivLU, Rhs> 00215 solve (const MatrixBase<Rhs>& b) const 00216 { 00217 eigen_assert(m_isInitialized && "LU is not initialized."); 00218 return internal::solve_retval<FullPivLU, Rhs>(*this, b.derived()); 00219 } 00220 00221 /** \returns the determinant of the matrix of which 00222 * *this is the LU decomposition. It has only linear complexity 00223 * (that is, O(n) where n is the dimension of the square matrix) 00224 * as the LU decomposition has already been computed. 00225 * 00226 * \note This is only for square matrices. 00227 * 00228 * \note For fixed-size matrices of size up to 4, MatrixBase::determinant() offers 00229 * optimized paths. 00230 * 00231 * \warning a determinant can be very big or small, so for matrices 00232 * of large enough dimension, there is a risk of overflow/underflow. 00233 * 00234 * \sa MatrixBase::determinant() 00235 */ 00236 typename internal::traits<MatrixType>::Scalar determinant () const; 00237 00238 /** Allows to prescribe a threshold to be used by certain methods, such as rank(), 00239 * who need to determine when pivots are to be considered nonzero. This is not used for the 00240 * LU decomposition itself. 00241 * 00242 * When it needs to get the threshold value, Eigen calls threshold(). By default, this 00243 * uses a formula to automatically determine a reasonable threshold. 00244 * Once you have called the present method setThreshold(const RealScalar&), 00245 * your value is used instead. 00246 * 00247 * \param threshold The new value to use as the threshold. 00248 * 00249 * A pivot will be considered nonzero if its absolute value is strictly greater than 00250 * \f$ \vert pivot \vert \leqslant threshold \times \vert maxpivot \vert \f$ 00251 * where maxpivot is the biggest pivot. 00252 * 00253 * If you want to come back to the default behavior, call setThreshold(Default_t) 00254 */ 00255 FullPivLU& setThreshold(const RealScalar& threshold) 00256 { 00257 m_usePrescribedThreshold = true; 00258 m_prescribedThreshold = threshold; 00259 return *this; 00260 } 00261 00262 /** Allows to come back to the default behavior, letting Eigen use its default formula for 00263 * determining the threshold. 00264 * 00265 * You should pass the special object Eigen::Default as parameter here. 00266 * \code lu.setThreshold(Eigen::Default); \endcode 00267 * 00268 * See the documentation of setThreshold(const RealScalar&). 00269 */ 00270 FullPivLU& setThreshold(Default_t) 00271 { 00272 m_usePrescribedThreshold = false; 00273 return *this; 00274 } 00275 00276 /** Returns the threshold that will be used by certain methods such as rank(). 00277 * 00278 * See the documentation of setThreshold(const RealScalar&). 00279 */ 00280 RealScalar threshold() const 00281 { 00282 eigen_assert(m_isInitialized || m_usePrescribedThreshold); 00283 return m_usePrescribedThreshold ? m_prescribedThreshold 00284 // this formula comes from experimenting (see "LU precision tuning" thread on the list) 00285 // and turns out to be identical to Higham's formula used already in LDLt. 00286 : NumTraits<Scalar>::epsilon() * m_lu.diagonalSize(); 00287 } 00288 00289 /** \returns the rank of the matrix of which *this is the LU decomposition. 00290 * 00291 * \note This method has to determine which pivots should be considered nonzero. 00292 * For that, it uses the threshold value that you can control by calling 00293 * setThreshold(const RealScalar&). 00294 */ 00295 inline Index rank () const 00296 { 00297 using std::abs; 00298 eigen_assert(m_isInitialized && "LU is not initialized."); 00299 RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold(); 00300 Index result = 0; 00301 for(Index i = 0; i < m_nonzero_pivots; ++i) 00302 result += (abs(m_lu.coeff(i,i)) > premultiplied_threshold); 00303 return result; 00304 } 00305 00306 /** \returns the dimension of the kernel of the matrix of which *this is the LU decomposition. 00307 * 00308 * \note This method has to determine which pivots should be considered nonzero. 00309 * For that, it uses the threshold value that you can control by calling 00310 * setThreshold(const RealScalar&). 00311 */ 00312 inline Index dimensionOfKernel () const 00313 { 00314 eigen_assert(m_isInitialized && "LU is not initialized."); 00315 return cols() - rank (); 00316 } 00317 00318 /** \returns true if the matrix of which *this is the LU decomposition represents an injective 00319 * linear map, i.e. has trivial kernel; false otherwise. 00320 * 00321 * \note This method has to determine which pivots should be considered nonzero. 00322 * For that, it uses the threshold value that you can control by calling 00323 * setThreshold(const RealScalar&). 00324 */ 00325 inline bool isInjective () const 00326 { 00327 eigen_assert(m_isInitialized && "LU is not initialized."); 00328 return rank () == cols(); 00329 } 00330 00331 /** \returns true if the matrix of which *this is the LU decomposition represents a surjective 00332 * linear map; false otherwise. 00333 * 00334 * \note This method has to determine which pivots should be considered nonzero. 00335 * For that, it uses the threshold value that you can control by calling 00336 * setThreshold(const RealScalar&). 00337 */ 00338 inline bool isSurjective () const 00339 { 00340 eigen_assert(m_isInitialized && "LU is not initialized."); 00341 return rank () == rows(); 00342 } 00343 00344 /** \returns true if the matrix of which *this is the LU decomposition is invertible. 00345 * 00346 * \note This method has to determine which pivots should be considered nonzero. 00347 * For that, it uses the threshold value that you can control by calling 00348 * setThreshold(const RealScalar&). 00349 */ 00350 inline bool isInvertible () const 00351 { 00352 eigen_assert(m_isInitialized && "LU is not initialized."); 00353 return isInjective () && (m_lu.rows() == m_lu.cols()); 00354 } 00355 00356 /** \returns the inverse of the matrix of which *this is the LU decomposition. 00357 * 00358 * \note If this matrix is not invertible, the returned matrix has undefined coefficients. 00359 * Use isInvertible() to first determine whether this matrix is invertible. 00360 * 00361 * \sa MatrixBase::inverse() 00362 */ 00363 inline const internal::solve_retval<FullPivLU,typename MatrixType::IdentityReturnType> inverse () const 00364 { 00365 eigen_assert(m_isInitialized && "LU is not initialized."); 00366 eigen_assert(m_lu.rows() == m_lu.cols() && "You can't take the inverse of a non-square matrix!"); 00367 return internal::solve_retval<FullPivLU,typename MatrixType::IdentityReturnType> 00368 (*this, MatrixType::Identity(m_lu.rows(), m_lu.cols())); 00369 } 00370 00371 MatrixType reconstructedMatrix () const; 00372 00373 inline Index rows() const { return m_lu.rows(); } 00374 inline Index cols() const { return m_lu.cols(); } 00375 00376 protected: 00377 00378 static void check_template_parameters() 00379 { 00380 EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); 00381 } 00382 00383 MatrixType m_lu; 00384 PermutationPType m_p; 00385 PermutationQType m_q; 00386 IntColVectorType m_rowsTranspositions; 00387 IntRowVectorType m_colsTranspositions; 00388 Index m_det_pq, m_nonzero_pivots; 00389 RealScalar m_maxpivot, m_prescribedThreshold; 00390 bool m_isInitialized, m_usePrescribedThreshold; 00391 }; 00392 00393 template<typename MatrixType> 00394 FullPivLU<MatrixType>::FullPivLU() 00395 : m_isInitialized(false), m_usePrescribedThreshold(false) 00396 { 00397 } 00398 00399 template<typename MatrixType> 00400 FullPivLU<MatrixType>::FullPivLU(Index rows, Index cols) 00401 : m_lu(rows, cols), 00402 m_p(rows), 00403 m_q(cols), 00404 m_rowsTranspositions(rows), 00405 m_colsTranspositions(cols), 00406 m_isInitialized(false), 00407 m_usePrescribedThreshold(false) 00408 { 00409 } 00410 00411 template<typename MatrixType> 00412 FullPivLU<MatrixType>::FullPivLU(const MatrixType& matrix) 00413 : m_lu(matrix.rows(), matrix.cols()), 00414 m_p(matrix.rows()), 00415 m_q(matrix.cols()), 00416 m_rowsTranspositions(matrix.rows()), 00417 m_colsTranspositions(matrix.cols()), 00418 m_isInitialized(false), 00419 m_usePrescribedThreshold(false) 00420 { 00421 compute(matrix); 00422 } 00423 00424 template<typename MatrixType> 00425 FullPivLU<MatrixType>& FullPivLU<MatrixType>::compute(const MatrixType& matrix) 00426 { 00427 check_template_parameters(); 00428 00429 // the permutations are stored as int indices, so just to be sure: 00430 eigen_assert(matrix.rows()<=NumTraits<int>::highest() && matrix.cols()<=NumTraits<int>::highest()); 00431 00432 m_isInitialized = true; 00433 m_lu = matrix; 00434 00435 const Index size = matrix.diagonalSize(); 00436 const Index rows = matrix.rows(); 00437 const Index cols = matrix.cols(); 00438 00439 // will store the transpositions, before we accumulate them at the end. 00440 // can't accumulate on-the-fly because that will be done in reverse order for the rows. 00441 m_rowsTranspositions.resize(matrix.rows()); 00442 m_colsTranspositions.resize(matrix.cols()); 00443 Index number_of_transpositions = 0; // number of NONTRIVIAL transpositions, i.e. m_rowsTranspositions[i]!=i 00444 00445 m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case) 00446 m_maxpivot = RealScalar(0); 00447 00448 for(Index k = 0; k < size; ++k) 00449 { 00450 // First, we need to find the pivot. 00451 00452 // biggest coefficient in the remaining bottom-right corner (starting at row k, col k) 00453 Index row_of_biggest_in_corner, col_of_biggest_in_corner; 00454 RealScalar biggest_in_corner; 00455 biggest_in_corner = m_lu.bottomRightCorner(rows-k, cols-k) 00456 .cwiseAbs() 00457 .maxCoeff(&row_of_biggest_in_corner, &col_of_biggest_in_corner); 00458 row_of_biggest_in_corner += k; // correct the values! since they were computed in the corner, 00459 col_of_biggest_in_corner += k; // need to add k to them. 00460 00461 if(biggest_in_corner==RealScalar(0)) 00462 { 00463 // before exiting, make sure to initialize the still uninitialized transpositions 00464 // in a sane state without destroying what we already have. 00465 m_nonzero_pivots = k; 00466 for(Index i = k; i < size; ++i) 00467 { 00468 m_rowsTranspositions.coeffRef(i) = i; 00469 m_colsTranspositions.coeffRef(i) = i; 00470 } 00471 break; 00472 } 00473 00474 if(biggest_in_corner > m_maxpivot) m_maxpivot = biggest_in_corner; 00475 00476 // Now that we've found the pivot, we need to apply the row/col swaps to 00477 // bring it to the location (k,k). 00478 00479 m_rowsTranspositions.coeffRef(k) = row_of_biggest_in_corner; 00480 m_colsTranspositions.coeffRef(k) = col_of_biggest_in_corner; 00481 if(k != row_of_biggest_in_corner) { 00482 m_lu.row(k).swap(m_lu.row(row_of_biggest_in_corner)); 00483 ++number_of_transpositions; 00484 } 00485 if(k != col_of_biggest_in_corner) { 00486 m_lu.col(k).swap(m_lu.col(col_of_biggest_in_corner)); 00487 ++number_of_transpositions; 00488 } 00489 00490 // Now that the pivot is at the right location, we update the remaining 00491 // bottom-right corner by Gaussian elimination. 00492 00493 if(k<rows-1) 00494 m_lu.col(k).tail(rows-k-1) /= m_lu.coeff(k,k); 00495 if(k<size-1) 00496 m_lu.block(k+1,k+1,rows-k-1,cols-k-1).noalias() -= m_lu.col(k).tail(rows-k-1) * m_lu.row(k).tail(cols-k-1); 00497 } 00498 00499 // the main loop is over, we still have to accumulate the transpositions to find the 00500 // permutations P and Q 00501 00502 m_p.setIdentity(rows); 00503 for(Index k = size-1; k >= 0; --k) 00504 m_p.applyTranspositionOnTheRight(k, m_rowsTranspositions.coeff(k)); 00505 00506 m_q.setIdentity(cols); 00507 for(Index k = 0; k < size; ++k) 00508 m_q.applyTranspositionOnTheRight(k, m_colsTranspositions.coeff(k)); 00509 00510 m_det_pq = (number_of_transpositions%2) ? -1 : 1; 00511 return *this; 00512 } 00513 00514 template<typename MatrixType> 00515 typename internal::traits<MatrixType>::Scalar FullPivLU<MatrixType>::determinant () const 00516 { 00517 eigen_assert(m_isInitialized && "LU is not initialized."); 00518 eigen_assert(m_lu.rows() == m_lu.cols() && "You can't take the determinant of a non-square matrix!"); 00519 return Scalar(m_det_pq) * Scalar(m_lu.diagonal().prod()); 00520 } 00521 00522 /** \returns the matrix represented by the decomposition, 00523 * i.e., it returns the product: \f$ P^{-1} L U Q^{-1} \f$. 00524 * This function is provided for debug purposes. */ 00525 template<typename MatrixType> 00526 MatrixType FullPivLU<MatrixType>::reconstructedMatrix () const 00527 { 00528 eigen_assert(m_isInitialized && "LU is not initialized."); 00529 const Index smalldim = (std::min)(m_lu.rows(), m_lu.cols()); 00530 // LU 00531 MatrixType res(m_lu.rows(),m_lu.cols()); 00532 // FIXME the .toDenseMatrix() should not be needed... 00533 res = m_lu.leftCols(smalldim) 00534 .template triangularView<UnitLower>().toDenseMatrix() 00535 * m_lu.topRows(smalldim) 00536 .template triangularView<Upper>().toDenseMatrix(); 00537 00538 // P^{-1}(LU) 00539 res = m_p.inverse() * res; 00540 00541 // (P^{-1}LU)Q^{-1} 00542 res = res * m_q.inverse(); 00543 00544 return res; 00545 } 00546 00547 /********* Implementation of kernel() **************************************************/ 00548 00549 namespace internal { 00550 template<typename _MatrixType> 00551 struct kernel_retval<FullPivLU<_MatrixType> > 00552 : kernel_retval_base<FullPivLU<_MatrixType> > 00553 { 00554 EIGEN_MAKE_KERNEL_HELPERS(FullPivLU<_MatrixType>) 00555 00556 enum { MaxSmallDimAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED( 00557 MatrixType::MaxColsAtCompileTime, 00558 MatrixType::MaxRowsAtCompileTime) 00559 }; 00560 00561 template<typename Dest> void evalTo(Dest& dst) const 00562 { 00563 using std::abs; 00564 const Index cols = dec().matrixLU ().cols(), dimker = cols - rank(); 00565 if(dimker == 0) 00566 { 00567 // The Kernel is just {0}, so it doesn't have a basis properly speaking, but let's 00568 // avoid crashing/asserting as that depends on floating point calculations. Let's 00569 // just return a single column vector filled with zeros. 00570 dst.setZero(); 00571 return; 00572 } 00573 00574 /* Let us use the following lemma: 00575 * 00576 * Lemma: If the matrix A has the LU decomposition PAQ = LU, 00577 * then Ker A = Q(Ker U). 00578 * 00579 * Proof: trivial: just keep in mind that P, Q, L are invertible. 00580 */ 00581 00582 /* Thus, all we need to do is to compute Ker U, and then apply Q. 00583 * 00584 * U is upper triangular, with eigenvalues sorted so that any zeros appear at the end. 00585 * Thus, the diagonal of U ends with exactly 00586 * dimKer zero's. Let us use that to construct dimKer linearly 00587 * independent vectors in Ker U. 00588 */ 00589 00590 Matrix<Index, Dynamic, 1, 0, MaxSmallDimAtCompileTime, 1> pivots(rank()); 00591 RealScalar premultiplied_threshold = dec().maxPivot() * dec().threshold(); 00592 Index p = 0; 00593 for(Index i = 0; i < dec().nonzeroPivots(); ++i) 00594 if(abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold) 00595 pivots.coeffRef(p++) = i; 00596 eigen_internal_assert(p == rank()); 00597 00598 // we construct a temporaty trapezoid matrix m, by taking the U matrix and 00599 // permuting the rows and cols to bring the nonnegligible pivots to the top of 00600 // the main diagonal. We need that to be able to apply our triangular solvers. 00601 // FIXME when we get triangularView-for-rectangular-matrices, this can be simplified 00602 Matrix<typename MatrixType::Scalar, Dynamic, Dynamic, MatrixType::Options, 00603 MaxSmallDimAtCompileTime, MatrixType::MaxColsAtCompileTime> 00604 m(dec().matrixLU().block(0, 0, rank(), cols)); 00605 for(Index i = 0; i < rank(); ++i) 00606 { 00607 if(i) m.row(i).head(i).setZero(); 00608 m.row(i).tail(cols-i) = dec().matrixLU().row(pivots.coeff(i)).tail(cols-i); 00609 } 00610 m.block(0, 0, rank(), rank()); 00611 m.block(0, 0, rank(), rank()).template triangularView<StrictlyLower>().setZero(); 00612 for(Index i = 0; i < rank(); ++i) 00613 m.col(i).swap(m.col(pivots.coeff(i))); 00614 00615 // ok, we have our trapezoid matrix, we can apply the triangular solver. 00616 // notice that the math behind this suggests that we should apply this to the 00617 // negative of the RHS, but for performance we just put the negative sign elsewhere, see below. 00618 m.topLeftCorner(rank(), rank()) 00619 .template triangularView<Upper>().solveInPlace( 00620 m.topRightCorner(rank(), dimker) 00621 ); 00622 00623 // now we must undo the column permutation that we had applied! 00624 for(Index i = rank()-1; i >= 0; --i) 00625 m.col(i).swap(m.col(pivots.coeff(i))); 00626 00627 // see the negative sign in the next line, that's what we were talking about above. 00628 for(Index i = 0; i < rank(); ++i) dst.row(dec().permutationQ().indices().coeff(i)) = -m.row(i).tail(dimker); 00629 for(Index i = rank(); i < cols; ++i) dst.row(dec().permutationQ().indices().coeff(i)).setZero(); 00630 for(Index k = 0; k < dimker; ++k) dst.coeffRef(dec().permutationQ().indices().coeff(rank()+k), k) = Scalar(1); 00631 } 00632 }; 00633 00634 /***** Implementation of image() *****************************************************/ 00635 00636 template<typename _MatrixType> 00637 struct image_retval<FullPivLU<_MatrixType> > 00638 : image_retval_base<FullPivLU<_MatrixType> > 00639 { 00640 EIGEN_MAKE_IMAGE_HELPERS(FullPivLU<_MatrixType>) 00641 00642 enum { MaxSmallDimAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED( 00643 MatrixType::MaxColsAtCompileTime, 00644 MatrixType::MaxRowsAtCompileTime) 00645 }; 00646 00647 template<typename Dest> void evalTo(Dest& dst) const 00648 { 00649 using std::abs; 00650 if(rank() == 0) 00651 { 00652 // The Image is just {0}, so it doesn't have a basis properly speaking, but let's 00653 // avoid crashing/asserting as that depends on floating point calculations. Let's 00654 // just return a single column vector filled with zeros. 00655 dst.setZero(); 00656 return; 00657 } 00658 00659 Matrix<Index, Dynamic, 1, 0, MaxSmallDimAtCompileTime, 1> pivots(rank()); 00660 RealScalar premultiplied_threshold = dec().maxPivot() * dec().threshold(); 00661 Index p = 0; 00662 for(Index i = 0; i < dec().nonzeroPivots(); ++i) 00663 if(abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold) 00664 pivots.coeffRef(p++) = i; 00665 eigen_internal_assert(p == rank()); 00666 00667 for(Index i = 0; i < rank(); ++i) 00668 dst.col(i) = originalMatrix().col(dec().permutationQ().indices().coeff(pivots.coeff(i))); 00669 } 00670 }; 00671 00672 /***** Implementation of solve() *****************************************************/ 00673 00674 template<typename _MatrixType, typename Rhs> 00675 struct solve_retval<FullPivLU<_MatrixType>, Rhs> 00676 : solve_retval_base<FullPivLU<_MatrixType>, Rhs> 00677 { 00678 EIGEN_MAKE_SOLVE_HELPERS(FullPivLU<_MatrixType>,Rhs) 00679 00680 template<typename Dest> void evalTo(Dest& dst) const 00681 { 00682 /* The decomposition PAQ = LU can be rewritten as A = P^{-1} L U Q^{-1}. 00683 * So we proceed as follows: 00684 * Step 1: compute c = P * rhs. 00685 * Step 2: replace c by the solution x to Lx = c. Exists because L is invertible. 00686 * Step 3: replace c by the solution x to Ux = c. May or may not exist. 00687 * Step 4: result = Q * c; 00688 */ 00689 00690 const Index rows = dec().rows(), cols = dec().cols(), 00691 nonzero_pivots = dec().rank(); 00692 eigen_assert(rhs().rows() == rows); 00693 const Index smalldim = (std::min)(rows, cols); 00694 00695 if(nonzero_pivots == 0) 00696 { 00697 dst.setZero(); 00698 return; 00699 } 00700 00701 typename Rhs::PlainObject c(rhs().rows(), rhs().cols()); 00702 00703 // Step 1 00704 c = dec().permutationP() * rhs(); 00705 00706 // Step 2 00707 dec().matrixLU() 00708 .topLeftCorner(smalldim,smalldim) 00709 .template triangularView<UnitLower>() 00710 .solveInPlace(c.topRows(smalldim)); 00711 if(rows>cols) 00712 { 00713 c.bottomRows(rows-cols) 00714 -= dec().matrixLU().bottomRows(rows-cols) 00715 * c.topRows(cols); 00716 } 00717 00718 // Step 3 00719 dec().matrixLU() 00720 .topLeftCorner(nonzero_pivots, nonzero_pivots) 00721 .template triangularView<Upper>() 00722 .solveInPlace(c.topRows(nonzero_pivots)); 00723 00724 // Step 4 00725 for(Index i = 0; i < nonzero_pivots; ++i) 00726 dst.row(dec().permutationQ().indices().coeff(i)) = c.row(i); 00727 for(Index i = nonzero_pivots; i < dec().matrixLU().cols(); ++i) 00728 dst.row(dec().permutationQ().indices().coeff(i)).setZero(); 00729 } 00730 }; 00731 00732 } // end namespace internal 00733 00734 /******* MatrixBase methods *****************************************************************/ 00735 00736 /** \lu_module 00737 * 00738 * \return the full-pivoting LU decomposition of \c *this. 00739 * 00740 * \sa class FullPivLU 00741 */ 00742 template<typename Derived> 00743 inline const FullPivLU<typename MatrixBase<Derived>::PlainObject> 00744 MatrixBase<Derived>::fullPivLu () const 00745 { 00746 return FullPivLU<PlainObject>(eval()); 00747 } 00748 00749 } // end namespace Eigen 00750 00751 #endif // EIGEN_LU_H
Generated on Thu Nov 17 2022 22:01:28 by
1.7.2