Eigne Matrix Class Library
Dependents: Eigen_test Odometry_test AttitudeEstimation_usingTicker MPU9250_Quaternion_Binary_Serial ... more
Memory.h
00001 // This file is part of Eigen, a lightweight C++ template library 00002 // for linear algebra. 00003 // 00004 // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr> 00005 // Copyright (C) 2008-2009 Benoit Jacob <jacob.benoit.1@gmail.com> 00006 // Copyright (C) 2009 Kenneth Riddile <kfriddile@yahoo.com> 00007 // Copyright (C) 2010 Hauke Heibel <hauke.heibel@gmail.com> 00008 // Copyright (C) 2010 Thomas Capricelli <orzel@freehackers.org> 00009 // 00010 // This Source Code Form is subject to the terms of the Mozilla 00011 // Public License v. 2.0. If a copy of the MPL was not distributed 00012 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. 00013 00014 00015 /***************************************************************************** 00016 *** Platform checks for aligned malloc functions *** 00017 *****************************************************************************/ 00018 00019 #ifndef EIGEN_MEMORY_H 00020 #define EIGEN_MEMORY_H 00021 00022 #ifndef EIGEN_MALLOC_ALREADY_ALIGNED 00023 00024 // Try to determine automatically if malloc is already aligned. 00025 00026 // On 64-bit systems, glibc's malloc returns 16-byte-aligned pointers, see: 00027 // http://www.gnu.org/s/libc/manual/html_node/Aligned-Memory-Blocks.html 00028 // This is true at least since glibc 2.8. 00029 // This leaves the question how to detect 64-bit. According to this document, 00030 // http://gcc.fyxm.net/summit/2003/Porting%20to%2064%20bit.pdf 00031 // page 114, "[The] LP64 model [...] is used by all 64-bit UNIX ports" so it's indeed 00032 // quite safe, at least within the context of glibc, to equate 64-bit with LP64. 00033 #if defined(__GLIBC__) && ((__GLIBC__>=2 && __GLIBC_MINOR__ >= 8) || __GLIBC__>2) \ 00034 && defined(__LP64__) && ! defined( __SANITIZE_ADDRESS__ ) 00035 #define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 1 00036 #else 00037 #define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 0 00038 #endif 00039 00040 // FreeBSD 6 seems to have 16-byte aligned malloc 00041 // See http://svn.freebsd.org/viewvc/base/stable/6/lib/libc/stdlib/malloc.c?view=markup 00042 // FreeBSD 7 seems to have 16-byte aligned malloc except on ARM and MIPS architectures 00043 // See http://svn.freebsd.org/viewvc/base/stable/7/lib/libc/stdlib/malloc.c?view=markup 00044 #if defined(__FreeBSD__) && !defined(__arm__) && !defined(__mips__) 00045 #define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED 1 00046 #else 00047 #define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED 0 00048 #endif 00049 00050 #if defined(__APPLE__) \ 00051 || defined(_WIN64) \ 00052 || EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED \ 00053 || EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED 00054 #define EIGEN_MALLOC_ALREADY_ALIGNED 1 00055 #else 00056 #define EIGEN_MALLOC_ALREADY_ALIGNED 0 00057 #endif 00058 00059 #endif 00060 00061 // See bug 554 (http://eigen.tuxfamily.org/bz/show_bug.cgi?id=554) 00062 // It seems to be unsafe to check _POSIX_ADVISORY_INFO without including unistd.h first. 00063 // Currently, let's include it only on unix systems: 00064 #if defined(__unix__) || defined(__unix) 00065 #include <unistd.h> 00066 #if ((defined __QNXNTO__) || (defined _GNU_SOURCE) || (defined __PGI) || ((defined _XOPEN_SOURCE) && (_XOPEN_SOURCE >= 600))) && (defined _POSIX_ADVISORY_INFO) && (_POSIX_ADVISORY_INFO > 0) 00067 #define EIGEN_HAS_POSIX_MEMALIGN 1 00068 #endif 00069 #endif 00070 00071 #ifndef EIGEN_HAS_POSIX_MEMALIGN 00072 #define EIGEN_HAS_POSIX_MEMALIGN 0 00073 #endif 00074 00075 #ifdef EIGEN_VECTORIZE_SSE 00076 #define EIGEN_HAS_MM_MALLOC 1 00077 #else 00078 #define EIGEN_HAS_MM_MALLOC 0 00079 #endif 00080 00081 namespace Eigen { 00082 00083 namespace internal { 00084 00085 inline void throw_std_bad_alloc() 00086 { 00087 #ifdef EIGEN_EXCEPTIONS 00088 throw std::bad_alloc(); 00089 #else 00090 std::size_t huge = -1; 00091 new int[huge]; 00092 #endif 00093 } 00094 00095 /***************************************************************************** 00096 *** Implementation of handmade aligned functions *** 00097 *****************************************************************************/ 00098 00099 /* ----- Hand made implementations of aligned malloc/free and realloc ----- */ 00100 00101 /** \internal Like malloc, but the returned pointer is guaranteed to be 16-byte aligned. 00102 * Fast, but wastes 16 additional bytes of memory. Does not throw any exception. 00103 */ 00104 inline void* handmade_aligned_malloc(std::size_t size) 00105 { 00106 void *original = std::malloc(size+16); 00107 if (original == 0) return 0; 00108 void *aligned = reinterpret_cast<void*>((reinterpret_cast<std::size_t>(original) & ~(std::size_t(15))) + 16); 00109 *(reinterpret_cast<void**>(aligned) - 1) = original; 00110 return aligned; 00111 } 00112 00113 /** \internal Frees memory allocated with handmade_aligned_malloc */ 00114 inline void handmade_aligned_free(void *ptr) 00115 { 00116 if (ptr) std::free(*(reinterpret_cast<void**>(ptr) - 1)); 00117 } 00118 00119 /** \internal 00120 * \brief Reallocates aligned memory. 00121 * Since we know that our handmade version is based on std::realloc 00122 * we can use std::realloc to implement efficient reallocation. 00123 */ 00124 inline void* handmade_aligned_realloc(void* ptr, std::size_t size, std::size_t = 0) 00125 { 00126 if (ptr == 0) return handmade_aligned_malloc(size); 00127 void *original = *(reinterpret_cast<void**>(ptr) - 1); 00128 std::ptrdiff_t previous_offset = static_cast<char *>(ptr)-static_cast<char *>(original); 00129 original = std::realloc(original,size+16); 00130 if (original == 0) return 0; 00131 void *aligned = reinterpret_cast<void*>((reinterpret_cast<std::size_t>(original) & ~(std::size_t(15))) + 16); 00132 void *previous_aligned = static_cast<char *>(original)+previous_offset; 00133 if(aligned!=previous_aligned) 00134 std::memmove(aligned, previous_aligned, size); 00135 00136 *(reinterpret_cast<void**>(aligned) - 1) = original; 00137 return aligned; 00138 } 00139 00140 /***************************************************************************** 00141 *** Implementation of generic aligned realloc (when no realloc can be used)*** 00142 *****************************************************************************/ 00143 00144 void* aligned_malloc(std::size_t size); 00145 void aligned_free(void *ptr); 00146 00147 /** \internal 00148 * \brief Reallocates aligned memory. 00149 * Allows reallocation with aligned ptr types. This implementation will 00150 * always create a new memory chunk and copy the old data. 00151 */ 00152 inline void* generic_aligned_realloc(void* ptr, size_t size, size_t old_size) 00153 { 00154 if (ptr==0) 00155 return aligned_malloc(size); 00156 00157 if (size==0) 00158 { 00159 aligned_free(ptr); 00160 return 0; 00161 } 00162 00163 void* newptr = aligned_malloc(size); 00164 if (newptr == 0) 00165 { 00166 #ifdef EIGEN_HAS_ERRNO 00167 errno = ENOMEM; // according to the standard 00168 #endif 00169 return 0; 00170 } 00171 00172 if (ptr != 0) 00173 { 00174 std::memcpy(newptr, ptr, (std::min)(size,old_size)); 00175 aligned_free(ptr); 00176 } 00177 00178 return newptr; 00179 } 00180 00181 /***************************************************************************** 00182 *** Implementation of portable aligned versions of malloc/free/realloc *** 00183 *****************************************************************************/ 00184 00185 #ifdef EIGEN_NO_MALLOC 00186 inline void check_that_malloc_is_allowed() 00187 { 00188 eigen_assert(false && "heap allocation is forbidden (EIGEN_NO_MALLOC is defined)"); 00189 } 00190 #elif defined EIGEN_RUNTIME_NO_MALLOC 00191 inline bool is_malloc_allowed_impl(bool update, bool new_value = false) 00192 { 00193 static bool value = true; 00194 if (update == 1) 00195 value = new_value; 00196 return value; 00197 } 00198 inline bool is_malloc_allowed() { return is_malloc_allowed_impl(false); } 00199 inline bool set_is_malloc_allowed(bool new_value) { return is_malloc_allowed_impl(true, new_value); } 00200 inline void check_that_malloc_is_allowed() 00201 { 00202 eigen_assert(is_malloc_allowed() && "heap allocation is forbidden (EIGEN_RUNTIME_NO_MALLOC is defined and g_is_malloc_allowed is false)"); 00203 } 00204 #else 00205 inline void check_that_malloc_is_allowed() 00206 {} 00207 #endif 00208 00209 /** \internal Allocates \a size bytes. The returned pointer is guaranteed to have 16 bytes alignment. 00210 * On allocation error, the returned pointer is null, and std::bad_alloc is thrown. 00211 */ 00212 inline void* aligned_malloc(size_t size) 00213 { 00214 check_that_malloc_is_allowed(); 00215 00216 void *result; 00217 #if !EIGEN_ALIGN 00218 result = std::malloc(size); 00219 #elif EIGEN_MALLOC_ALREADY_ALIGNED 00220 result = std::malloc(size); 00221 #elif EIGEN_HAS_POSIX_MEMALIGN 00222 if(posix_memalign(&result, 16, size)) result = 0; 00223 #elif EIGEN_HAS_MM_MALLOC 00224 result = _mm_malloc(size, 16); 00225 #elif defined(_MSC_VER) && (!defined(_WIN32_WCE)) 00226 result = _aligned_malloc(size, 16); 00227 #else 00228 result = handmade_aligned_malloc(size); 00229 #endif 00230 00231 if(!result && size) 00232 throw_std_bad_alloc(); 00233 00234 return result; 00235 } 00236 00237 /** \internal Frees memory allocated with aligned_malloc. */ 00238 inline void aligned_free(void *ptr) 00239 { 00240 #if !EIGEN_ALIGN 00241 std::free(ptr); 00242 #elif EIGEN_MALLOC_ALREADY_ALIGNED 00243 std::free(ptr); 00244 #elif EIGEN_HAS_POSIX_MEMALIGN 00245 std::free(ptr); 00246 #elif EIGEN_HAS_MM_MALLOC 00247 _mm_free(ptr); 00248 #elif defined(_MSC_VER) && (!defined(_WIN32_WCE)) 00249 _aligned_free(ptr); 00250 #else 00251 handmade_aligned_free(ptr); 00252 #endif 00253 } 00254 00255 /** 00256 * \internal 00257 * \brief Reallocates an aligned block of memory. 00258 * \throws std::bad_alloc on allocation failure 00259 **/ 00260 inline void* aligned_realloc(void *ptr, size_t new_size, size_t old_size) 00261 { 00262 EIGEN_UNUSED_VARIABLE(old_size); 00263 00264 void *result; 00265 #if !EIGEN_ALIGN 00266 result = std::realloc(ptr,new_size); 00267 #elif EIGEN_MALLOC_ALREADY_ALIGNED 00268 result = std::realloc(ptr,new_size); 00269 #elif EIGEN_HAS_POSIX_MEMALIGN 00270 result = generic_aligned_realloc(ptr,new_size,old_size); 00271 #elif EIGEN_HAS_MM_MALLOC 00272 // The defined(_mm_free) is just here to verify that this MSVC version 00273 // implements _mm_malloc/_mm_free based on the corresponding _aligned_ 00274 // functions. This may not always be the case and we just try to be safe. 00275 #if defined(_MSC_VER) && (!defined(_WIN32_WCE)) && defined(_mm_free) 00276 result = _aligned_realloc(ptr,new_size,16); 00277 #else 00278 result = generic_aligned_realloc(ptr,new_size,old_size); 00279 #endif 00280 #elif defined(_MSC_VER) && (!defined(_WIN32_WCE)) 00281 result = _aligned_realloc(ptr,new_size,16); 00282 #else 00283 result = handmade_aligned_realloc(ptr,new_size,old_size); 00284 #endif 00285 00286 if (!result && new_size) 00287 throw_std_bad_alloc(); 00288 00289 return result; 00290 } 00291 00292 /***************************************************************************** 00293 *** Implementation of conditionally aligned functions *** 00294 *****************************************************************************/ 00295 00296 /** \internal Allocates \a size bytes. If Align is true, then the returned ptr is 16-byte-aligned. 00297 * On allocation error, the returned pointer is null, and a std::bad_alloc is thrown. 00298 */ 00299 template<bool Align> inline void* conditional_aligned_malloc(size_t size) 00300 { 00301 return aligned_malloc(size); 00302 } 00303 00304 template<> inline void* conditional_aligned_malloc<false>(size_t size) 00305 { 00306 check_that_malloc_is_allowed(); 00307 00308 void *result = std::malloc(size); 00309 if(!result && size) 00310 throw_std_bad_alloc(); 00311 return result; 00312 } 00313 00314 /** \internal Frees memory allocated with conditional_aligned_malloc */ 00315 template<bool Align> inline void conditional_aligned_free(void *ptr) 00316 { 00317 aligned_free(ptr); 00318 } 00319 00320 template<> inline void conditional_aligned_free<false>(void *ptr) 00321 { 00322 std::free(ptr); 00323 } 00324 00325 template<bool Align> inline void* conditional_aligned_realloc(void* ptr, size_t new_size, size_t old_size) 00326 { 00327 return aligned_realloc(ptr, new_size, old_size); 00328 } 00329 00330 template<> inline void* conditional_aligned_realloc<false>(void* ptr, size_t new_size, size_t) 00331 { 00332 return std::realloc(ptr, new_size); 00333 } 00334 00335 /***************************************************************************** 00336 *** Construction/destruction of array elements *** 00337 *****************************************************************************/ 00338 00339 /** \internal Constructs the elements of an array. 00340 * The \a size parameter tells on how many objects to call the constructor of T. 00341 */ 00342 template<typename T> inline T* construct_elements_of_array(T *ptr, size_t size) 00343 { 00344 for (size_t i=0; i < size; ++i) ::new (ptr + i) T; 00345 return ptr; 00346 } 00347 00348 /** \internal Destructs the elements of an array. 00349 * The \a size parameters tells on how many objects to call the destructor of T. 00350 */ 00351 template<typename T> inline void destruct_elements_of_array(T *ptr, size_t size) 00352 { 00353 // always destruct an array starting from the end. 00354 if(ptr) 00355 while(size) ptr[--size].~T(); 00356 } 00357 00358 /***************************************************************************** 00359 *** Implementation of aligned new/delete-like functions *** 00360 *****************************************************************************/ 00361 00362 template<typename T> 00363 EIGEN_ALWAYS_INLINE void check_size_for_overflow(size_t size) 00364 { 00365 if(size > size_t(-1) / sizeof(T)) 00366 throw_std_bad_alloc(); 00367 } 00368 00369 /** \internal Allocates \a size objects of type T. The returned pointer is guaranteed to have 16 bytes alignment. 00370 * On allocation error, the returned pointer is undefined, but a std::bad_alloc is thrown. 00371 * The default constructor of T is called. 00372 */ 00373 template<typename T> inline T* aligned_new(size_t size) 00374 { 00375 check_size_for_overflow<T>(size); 00376 T *result = reinterpret_cast<T*>(aligned_malloc(sizeof(T)*size)); 00377 return construct_elements_of_array(result, size); 00378 } 00379 00380 template<typename T, bool Align> inline T* conditional_aligned_new(size_t size) 00381 { 00382 check_size_for_overflow<T>(size); 00383 T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size)); 00384 return construct_elements_of_array(result, size); 00385 } 00386 00387 /** \internal Deletes objects constructed with aligned_new 00388 * The \a size parameters tells on how many objects to call the destructor of T. 00389 */ 00390 template<typename T> inline void aligned_delete(T *ptr, size_t size) 00391 { 00392 destruct_elements_of_array<T>(ptr, size); 00393 aligned_free(ptr); 00394 } 00395 00396 /** \internal Deletes objects constructed with conditional_aligned_new 00397 * The \a size parameters tells on how many objects to call the destructor of T. 00398 */ 00399 template<typename T, bool Align> inline void conditional_aligned_delete(T *ptr, size_t size) 00400 { 00401 destruct_elements_of_array<T>(ptr, size); 00402 conditional_aligned_free<Align>(ptr); 00403 } 00404 00405 template<typename T, bool Align> inline T* conditional_aligned_realloc_new(T* pts, size_t new_size, size_t old_size) 00406 { 00407 check_size_for_overflow<T>(new_size); 00408 check_size_for_overflow<T>(old_size); 00409 if(new_size < old_size) 00410 destruct_elements_of_array(pts+new_size, old_size-new_size); 00411 T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size)); 00412 if(new_size > old_size) 00413 construct_elements_of_array(result+old_size, new_size-old_size); 00414 return result; 00415 } 00416 00417 00418 template<typename T, bool Align> inline T* conditional_aligned_new_auto(size_t size) 00419 { 00420 if(size==0) 00421 return 0; // short-cut. Also fixes Bug 884 00422 check_size_for_overflow<T>(size); 00423 T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size)); 00424 if(NumTraits<T>::RequireInitialization) 00425 construct_elements_of_array(result, size); 00426 return result; 00427 } 00428 00429 template<typename T, bool Align> inline T* conditional_aligned_realloc_new_auto(T* pts, size_t new_size, size_t old_size) 00430 { 00431 check_size_for_overflow<T>(new_size); 00432 check_size_for_overflow<T>(old_size); 00433 if(NumTraits<T>::RequireInitialization && (new_size < old_size)) 00434 destruct_elements_of_array(pts+new_size, old_size-new_size); 00435 T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size)); 00436 if(NumTraits<T>::RequireInitialization && (new_size > old_size)) 00437 construct_elements_of_array(result+old_size, new_size-old_size); 00438 return result; 00439 } 00440 00441 template<typename T, bool Align> inline void conditional_aligned_delete_auto(T *ptr, size_t size) 00442 { 00443 if(NumTraits<T>::RequireInitialization) 00444 destruct_elements_of_array<T>(ptr, size); 00445 conditional_aligned_free<Align>(ptr); 00446 } 00447 00448 /****************************************************************************/ 00449 00450 /** \internal Returns the index of the first element of the array that is well aligned for vectorization. 00451 * 00452 * \param array the address of the start of the array 00453 * \param size the size of the array 00454 * 00455 * \note If no element of the array is well aligned, the size of the array is returned. Typically, 00456 * for example with SSE, "well aligned" means 16-byte-aligned. If vectorization is disabled or if the 00457 * packet size for the given scalar type is 1, then everything is considered well-aligned. 00458 * 00459 * \note If the scalar type is vectorizable, we rely on the following assumptions: sizeof(Scalar) is a 00460 * power of 2, the packet size in bytes is also a power of 2, and is a multiple of sizeof(Scalar). On the 00461 * other hand, we do not assume that the array address is a multiple of sizeof(Scalar), as that fails for 00462 * example with Scalar=double on certain 32-bit platforms, see bug #79. 00463 * 00464 * There is also the variant first_aligned(const MatrixBase&) defined in DenseCoeffsBase.h. 00465 */ 00466 template<typename Scalar, typename Index> 00467 static inline Index first_aligned(const Scalar* array, Index size) 00468 { 00469 static const Index PacketSize = packet_traits<Scalar>::size; 00470 static const Index PacketAlignedMask = PacketSize-1; 00471 00472 if(PacketSize==1) 00473 { 00474 // Either there is no vectorization, or a packet consists of exactly 1 scalar so that all elements 00475 // of the array have the same alignment. 00476 return 0; 00477 } 00478 else if(size_t(array) & (sizeof(Scalar)-1)) 00479 { 00480 // There is vectorization for this scalar type, but the array is not aligned to the size of a single scalar. 00481 // Consequently, no element of the array is well aligned. 00482 return size; 00483 } 00484 else 00485 { 00486 return std::min<Index>( (PacketSize - (Index((size_t(array)/sizeof(Scalar))) & PacketAlignedMask)) 00487 & PacketAlignedMask, size); 00488 } 00489 } 00490 00491 /** \internal Returns the smallest integer multiple of \a base and greater or equal to \a size 00492 */ 00493 template<typename Index> 00494 inline static Index first_multiple(Index size, Index base) 00495 { 00496 return ((size+base-1)/base)*base; 00497 } 00498 00499 // std::copy is much slower than memcpy, so let's introduce a smart_copy which 00500 // use memcpy on trivial types, i.e., on types that does not require an initialization ctor. 00501 template<typename T, bool UseMemcpy> struct smart_copy_helper; 00502 00503 template<typename T> void smart_copy(const T* start, const T* end, T* target) 00504 { 00505 smart_copy_helper<T,!NumTraits<T>::RequireInitialization>::run(start, end, target); 00506 } 00507 00508 template<typename T> struct smart_copy_helper<T,true> { 00509 static inline void run(const T* start, const T* end, T* target) 00510 { memcpy(target, start, std::ptrdiff_t(end)-std::ptrdiff_t(start)); } 00511 }; 00512 00513 template<typename T> struct smart_copy_helper<T,false> { 00514 static inline void run(const T* start, const T* end, T* target) 00515 { std::copy(start, end, target); } 00516 }; 00517 00518 00519 /***************************************************************************** 00520 *** Implementation of runtime stack allocation (falling back to malloc) *** 00521 *****************************************************************************/ 00522 00523 // you can overwrite Eigen's default behavior regarding alloca by defining EIGEN_ALLOCA 00524 // to the appropriate stack allocation function 00525 #ifndef EIGEN_ALLOCA 00526 #if (defined __linux__) || (defined __APPLE__) || (defined alloca) 00527 #define EIGEN_ALLOCA alloca 00528 #elif defined(_MSC_VER) 00529 #define EIGEN_ALLOCA _alloca 00530 #endif 00531 #endif 00532 00533 // This helper class construct the allocated memory, and takes care of destructing and freeing the handled data 00534 // at destruction time. In practice this helper class is mainly useful to avoid memory leak in case of exceptions. 00535 template<typename T> class aligned_stack_memory_handler 00536 { 00537 public: 00538 /* Creates a stack_memory_handler responsible for the buffer \a ptr of size \a size. 00539 * Note that \a ptr can be 0 regardless of the other parameters. 00540 * This constructor takes care of constructing/initializing the elements of the buffer if required by the scalar type T (see NumTraits<T>::RequireInitialization). 00541 * In this case, the buffer elements will also be destructed when this handler will be destructed. 00542 * Finally, if \a dealloc is true, then the pointer \a ptr is freed. 00543 **/ 00544 aligned_stack_memory_handler(T* ptr, size_t size, bool dealloc) 00545 : m_ptr(ptr), m_size(size), m_deallocate(dealloc) 00546 { 00547 if(NumTraits<T>::RequireInitialization && m_ptr) 00548 Eigen::internal::construct_elements_of_array(m_ptr, size); 00549 } 00550 ~aligned_stack_memory_handler() 00551 { 00552 if(NumTraits<T>::RequireInitialization && m_ptr) 00553 Eigen::internal::destruct_elements_of_array<T>(m_ptr, m_size); 00554 if(m_deallocate) 00555 Eigen::internal::aligned_free(m_ptr); 00556 } 00557 protected: 00558 T* m_ptr; 00559 size_t m_size; 00560 bool m_deallocate; 00561 }; 00562 00563 } // end namespace internal 00564 00565 /** \internal 00566 * Declares, allocates and construct an aligned buffer named NAME of SIZE elements of type TYPE on the stack 00567 * if SIZE is smaller than EIGEN_STACK_ALLOCATION_LIMIT, and if stack allocation is supported by the platform 00568 * (currently, this is Linux and Visual Studio only). Otherwise the memory is allocated on the heap. 00569 * The allocated buffer is automatically deleted when exiting the scope of this declaration. 00570 * If BUFFER is non null, then the declared variable is simply an alias for BUFFER, and no allocation/deletion occurs. 00571 * Here is an example: 00572 * \code 00573 * { 00574 * ei_declare_aligned_stack_constructed_variable(float,data,size,0); 00575 * // use data[0] to data[size-1] 00576 * } 00577 * \endcode 00578 * The underlying stack allocation function can controlled with the EIGEN_ALLOCA preprocessor token. 00579 */ 00580 #ifdef EIGEN_ALLOCA 00581 00582 #if defined(__arm__) || defined(_WIN32) 00583 #define EIGEN_ALIGNED_ALLOCA(SIZE) reinterpret_cast<void*>((reinterpret_cast<size_t>(EIGEN_ALLOCA(SIZE+16)) & ~(size_t(15))) + 16) 00584 #else 00585 #define EIGEN_ALIGNED_ALLOCA EIGEN_ALLOCA 00586 #endif 00587 00588 #define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \ 00589 Eigen::internal::check_size_for_overflow<TYPE>(SIZE); \ 00590 TYPE* NAME = (BUFFER)!=0 ? (BUFFER) \ 00591 : reinterpret_cast<TYPE*>( \ 00592 (sizeof(TYPE)*SIZE<=EIGEN_STACK_ALLOCATION_LIMIT) ? EIGEN_ALIGNED_ALLOCA(sizeof(TYPE)*SIZE) \ 00593 : Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE) ); \ 00594 Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,sizeof(TYPE)*SIZE>EIGEN_STACK_ALLOCATION_LIMIT) 00595 00596 #else 00597 00598 #define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \ 00599 Eigen::internal::check_size_for_overflow<TYPE>(SIZE); \ 00600 TYPE* NAME = (BUFFER)!=0 ? BUFFER : reinterpret_cast<TYPE*>(Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE)); \ 00601 Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,true) 00602 00603 #endif 00604 00605 00606 /***************************************************************************** 00607 *** Implementation of EIGEN_MAKE_ALIGNED_OPERATOR_NEW [_IF] *** 00608 *****************************************************************************/ 00609 00610 #if EIGEN_ALIGN 00611 #ifdef EIGEN_EXCEPTIONS 00612 #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \ 00613 void* operator new(size_t size, const std::nothrow_t&) throw() { \ 00614 try { return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); } \ 00615 catch (...) { return 0; } \ 00616 } 00617 #else 00618 #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \ 00619 void* operator new(size_t size, const std::nothrow_t&) throw() { \ 00620 return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \ 00621 } 00622 #endif 00623 00624 #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) \ 00625 void *operator new(size_t size) { \ 00626 return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \ 00627 } \ 00628 void *operator new[](size_t size) { \ 00629 return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \ 00630 } \ 00631 void operator delete(void * ptr) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \ 00632 void operator delete[](void * ptr) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \ 00633 void operator delete(void * ptr, std::size_t /* sz */) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \ 00634 void operator delete[](void * ptr, std::size_t /* sz */) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \ 00635 /* in-place new and delete. since (at least afaik) there is no actual */ \ 00636 /* memory allocated we can safely let the default implementation handle */ \ 00637 /* this particular case. */ \ 00638 static void *operator new(size_t size, void *ptr) { return ::operator new(size,ptr); } \ 00639 static void *operator new[](size_t size, void* ptr) { return ::operator new[](size,ptr); } \ 00640 void operator delete(void * memory, void *ptr) throw() { return ::operator delete(memory,ptr); } \ 00641 void operator delete[](void * memory, void *ptr) throw() { return ::operator delete[](memory,ptr); } \ 00642 /* nothrow-new (returns zero instead of std::bad_alloc) */ \ 00643 EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \ 00644 void operator delete(void *ptr, const std::nothrow_t&) throw() { \ 00645 Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); \ 00646 } \ 00647 typedef void eigen_aligned_operator_new_marker_type; 00648 #else 00649 #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) 00650 #endif 00651 00652 #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(true) 00653 #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar,Size) \ 00654 EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(bool(((Size)!=Eigen::Dynamic) && ((sizeof(Scalar)*(Size))%16==0))) 00655 00656 /****************************************************************************/ 00657 00658 /** \class aligned_allocator 00659 * \ingroup Core_Module 00660 * 00661 * \brief STL compatible allocator to use with with 16 byte aligned types 00662 * 00663 * Example: 00664 * \code 00665 * // Matrix4f requires 16 bytes alignment: 00666 * std::map< int, Matrix4f, std::less<int>, 00667 * aligned_allocator<std::pair<const int, Matrix4f> > > my_map_mat4; 00668 * // Vector3f does not require 16 bytes alignment, no need to use Eigen's allocator: 00669 * std::map< int, Vector3f > my_map_vec3; 00670 * \endcode 00671 * 00672 * \sa \ref TopicStlContainers. 00673 */ 00674 template<class T> 00675 class aligned_allocator 00676 { 00677 public: 00678 typedef size_t size_type; 00679 typedef std::ptrdiff_t difference_type; 00680 typedef T* pointer; 00681 typedef const T* const_pointer; 00682 typedef T& reference; 00683 typedef const T& const_reference; 00684 typedef T value_type; 00685 00686 template<class U> 00687 struct rebind 00688 { 00689 typedef aligned_allocator<U> other; 00690 }; 00691 00692 pointer address( reference value ) const 00693 { 00694 return &value; 00695 } 00696 00697 const_pointer address( const_reference value ) const 00698 { 00699 return &value; 00700 } 00701 00702 aligned_allocator() 00703 { 00704 } 00705 00706 aligned_allocator( const aligned_allocator& ) 00707 { 00708 } 00709 00710 template<class U> 00711 aligned_allocator( const aligned_allocator<U>& ) 00712 { 00713 } 00714 00715 ~aligned_allocator() 00716 { 00717 } 00718 00719 size_type max_size() const 00720 { 00721 return (std::numeric_limits<size_type>::max)(); 00722 } 00723 00724 pointer allocate( size_type num, const void* hint = 0 ) 00725 { 00726 EIGEN_UNUSED_VARIABLE(hint); 00727 internal::check_size_for_overflow<T>(num); 00728 return static_cast<pointer>( internal::aligned_malloc( num * sizeof(T) ) ); 00729 } 00730 00731 void construct( pointer p, const T& value ) 00732 { 00733 ::new( p ) T( value ); 00734 } 00735 00736 void destroy( pointer p ) 00737 { 00738 p->~T(); 00739 } 00740 00741 void deallocate( pointer p, size_type /*num*/ ) 00742 { 00743 internal::aligned_free( p ); 00744 } 00745 00746 bool operator!=(const aligned_allocator<T>& ) const 00747 { return false; } 00748 00749 bool operator==(const aligned_allocator<T>& ) const 00750 { return true; } 00751 }; 00752 00753 //---------- Cache sizes ---------- 00754 00755 #if !defined(EIGEN_NO_CPUID) 00756 # if defined(__GNUC__) && ( defined(__i386__) || defined(__x86_64__) ) 00757 # if defined(__PIC__) && defined(__i386__) 00758 // Case for x86 with PIC 00759 # define EIGEN_CPUID(abcd,func,id) \ 00760 __asm__ __volatile__ ("xchgl %%ebx, %k1;cpuid; xchgl %%ebx,%k1": "=a" (abcd[0]), "=&r" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "a" (func), "c" (id)); 00761 # elif defined(__PIC__) && defined(__x86_64__) 00762 // Case for x64 with PIC. In theory this is only a problem with recent gcc and with medium or large code model, not with the default small code model. 00763 // However, we cannot detect which code model is used, and the xchg overhead is negligible anyway. 00764 # define EIGEN_CPUID(abcd,func,id) \ 00765 __asm__ __volatile__ ("xchg{q}\t{%%}rbx, %q1; cpuid; xchg{q}\t{%%}rbx, %q1": "=a" (abcd[0]), "=&r" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "0" (func), "2" (id)); 00766 # else 00767 // Case for x86_64 or x86 w/o PIC 00768 # define EIGEN_CPUID(abcd,func,id) \ 00769 __asm__ __volatile__ ("cpuid": "=a" (abcd[0]), "=b" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "0" (func), "2" (id) ); 00770 # endif 00771 # elif defined(_MSC_VER) 00772 # if (_MSC_VER > 1500) && ( defined(_M_IX86) || defined(_M_X64) ) 00773 # define EIGEN_CPUID(abcd,func,id) __cpuidex((int*)abcd,func,id) 00774 # endif 00775 # endif 00776 #endif 00777 00778 namespace internal { 00779 00780 #ifdef EIGEN_CPUID 00781 00782 inline bool cpuid_is_vendor(int abcd[4], const int vendor[3]) 00783 { 00784 return abcd[1]==vendor[0] && abcd[3]==vendor[1] && abcd[2]==vendor[2]; 00785 } 00786 00787 inline void queryCacheSizes_intel_direct(int& l1, int& l2, int& l3) 00788 { 00789 int abcd[4]; 00790 l1 = l2 = l3 = 0; 00791 int cache_id = 0; 00792 int cache_type = 0; 00793 do { 00794 abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0; 00795 EIGEN_CPUID(abcd,0x4,cache_id); 00796 cache_type = (abcd[0] & 0x0F) >> 0; 00797 if(cache_type==1||cache_type==3) // data or unified cache 00798 { 00799 int cache_level = (abcd[0] & 0xE0) >> 5; // A[7:5] 00800 int ways = (abcd[1] & 0xFFC00000) >> 22; // B[31:22] 00801 int partitions = (abcd[1] & 0x003FF000) >> 12; // B[21:12] 00802 int line_size = (abcd[1] & 0x00000FFF) >> 0; // B[11:0] 00803 int sets = (abcd[2]); // C[31:0] 00804 00805 int cache_size = (ways+1) * (partitions+1) * (line_size+1) * (sets+1); 00806 00807 switch(cache_level) 00808 { 00809 case 1: l1 = cache_size; break; 00810 case 2: l2 = cache_size; break; 00811 case 3: l3 = cache_size; break; 00812 default: break; 00813 } 00814 } 00815 cache_id++; 00816 } while(cache_type>0 && cache_id<16); 00817 } 00818 00819 inline void queryCacheSizes_intel_codes(int& l1, int& l2, int& l3) 00820 { 00821 int abcd[4]; 00822 abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0; 00823 l1 = l2 = l3 = 0; 00824 EIGEN_CPUID(abcd,0x00000002,0); 00825 unsigned char * bytes = reinterpret_cast<unsigned char *>(abcd)+2; 00826 bool check_for_p2_core2 = false; 00827 for(int i=0; i<14; ++i) 00828 { 00829 switch(bytes[i]) 00830 { 00831 case 0x0A: l1 = 8; break; // 0Ah data L1 cache, 8 KB, 2 ways, 32 byte lines 00832 case 0x0C: l1 = 16; break; // 0Ch data L1 cache, 16 KB, 4 ways, 32 byte lines 00833 case 0x0E: l1 = 24; break; // 0Eh data L1 cache, 24 KB, 6 ways, 64 byte lines 00834 case 0x10: l1 = 16; break; // 10h data L1 cache, 16 KB, 4 ways, 32 byte lines (IA-64) 00835 case 0x15: l1 = 16; break; // 15h code L1 cache, 16 KB, 4 ways, 32 byte lines (IA-64) 00836 case 0x2C: l1 = 32; break; // 2Ch data L1 cache, 32 KB, 8 ways, 64 byte lines 00837 case 0x30: l1 = 32; break; // 30h code L1 cache, 32 KB, 8 ways, 64 byte lines 00838 case 0x60: l1 = 16; break; // 60h data L1 cache, 16 KB, 8 ways, 64 byte lines, sectored 00839 case 0x66: l1 = 8; break; // 66h data L1 cache, 8 KB, 4 ways, 64 byte lines, sectored 00840 case 0x67: l1 = 16; break; // 67h data L1 cache, 16 KB, 4 ways, 64 byte lines, sectored 00841 case 0x68: l1 = 32; break; // 68h data L1 cache, 32 KB, 4 ways, 64 byte lines, sectored 00842 case 0x1A: l2 = 96; break; // code and data L2 cache, 96 KB, 6 ways, 64 byte lines (IA-64) 00843 case 0x22: l3 = 512; break; // code and data L3 cache, 512 KB, 4 ways (!), 64 byte lines, dual-sectored 00844 case 0x23: l3 = 1024; break; // code and data L3 cache, 1024 KB, 8 ways, 64 byte lines, dual-sectored 00845 case 0x25: l3 = 2048; break; // code and data L3 cache, 2048 KB, 8 ways, 64 byte lines, dual-sectored 00846 case 0x29: l3 = 4096; break; // code and data L3 cache, 4096 KB, 8 ways, 64 byte lines, dual-sectored 00847 case 0x39: l2 = 128; break; // code and data L2 cache, 128 KB, 4 ways, 64 byte lines, sectored 00848 case 0x3A: l2 = 192; break; // code and data L2 cache, 192 KB, 6 ways, 64 byte lines, sectored 00849 case 0x3B: l2 = 128; break; // code and data L2 cache, 128 KB, 2 ways, 64 byte lines, sectored 00850 case 0x3C: l2 = 256; break; // code and data L2 cache, 256 KB, 4 ways, 64 byte lines, sectored 00851 case 0x3D: l2 = 384; break; // code and data L2 cache, 384 KB, 6 ways, 64 byte lines, sectored 00852 case 0x3E: l2 = 512; break; // code and data L2 cache, 512 KB, 4 ways, 64 byte lines, sectored 00853 case 0x40: l2 = 0; break; // no integrated L2 cache (P6 core) or L3 cache (P4 core) 00854 case 0x41: l2 = 128; break; // code and data L2 cache, 128 KB, 4 ways, 32 byte lines 00855 case 0x42: l2 = 256; break; // code and data L2 cache, 256 KB, 4 ways, 32 byte lines 00856 case 0x43: l2 = 512; break; // code and data L2 cache, 512 KB, 4 ways, 32 byte lines 00857 case 0x44: l2 = 1024; break; // code and data L2 cache, 1024 KB, 4 ways, 32 byte lines 00858 case 0x45: l2 = 2048; break; // code and data L2 cache, 2048 KB, 4 ways, 32 byte lines 00859 case 0x46: l3 = 4096; break; // code and data L3 cache, 4096 KB, 4 ways, 64 byte lines 00860 case 0x47: l3 = 8192; break; // code and data L3 cache, 8192 KB, 8 ways, 64 byte lines 00861 case 0x48: l2 = 3072; break; // code and data L2 cache, 3072 KB, 12 ways, 64 byte lines 00862 case 0x49: if(l2!=0) l3 = 4096; else {check_for_p2_core2=true; l3 = l2 = 4096;} break;// code and data L3 cache, 4096 KB, 16 ways, 64 byte lines (P4) or L2 for core2 00863 case 0x4A: l3 = 6144; break; // code and data L3 cache, 6144 KB, 12 ways, 64 byte lines 00864 case 0x4B: l3 = 8192; break; // code and data L3 cache, 8192 KB, 16 ways, 64 byte lines 00865 case 0x4C: l3 = 12288; break; // code and data L3 cache, 12288 KB, 12 ways, 64 byte lines 00866 case 0x4D: l3 = 16384; break; // code and data L3 cache, 16384 KB, 16 ways, 64 byte lines 00867 case 0x4E: l2 = 6144; break; // code and data L2 cache, 6144 KB, 24 ways, 64 byte lines 00868 case 0x78: l2 = 1024; break; // code and data L2 cache, 1024 KB, 4 ways, 64 byte lines 00869 case 0x79: l2 = 128; break; // code and data L2 cache, 128 KB, 8 ways, 64 byte lines, dual-sectored 00870 case 0x7A: l2 = 256; break; // code and data L2 cache, 256 KB, 8 ways, 64 byte lines, dual-sectored 00871 case 0x7B: l2 = 512; break; // code and data L2 cache, 512 KB, 8 ways, 64 byte lines, dual-sectored 00872 case 0x7C: l2 = 1024; break; // code and data L2 cache, 1024 KB, 8 ways, 64 byte lines, dual-sectored 00873 case 0x7D: l2 = 2048; break; // code and data L2 cache, 2048 KB, 8 ways, 64 byte lines 00874 case 0x7E: l2 = 256; break; // code and data L2 cache, 256 KB, 8 ways, 128 byte lines, sect. (IA-64) 00875 case 0x7F: l2 = 512; break; // code and data L2 cache, 512 KB, 2 ways, 64 byte lines 00876 case 0x80: l2 = 512; break; // code and data L2 cache, 512 KB, 8 ways, 64 byte lines 00877 case 0x81: l2 = 128; break; // code and data L2 cache, 128 KB, 8 ways, 32 byte lines 00878 case 0x82: l2 = 256; break; // code and data L2 cache, 256 KB, 8 ways, 32 byte lines 00879 case 0x83: l2 = 512; break; // code and data L2 cache, 512 KB, 8 ways, 32 byte lines 00880 case 0x84: l2 = 1024; break; // code and data L2 cache, 1024 KB, 8 ways, 32 byte lines 00881 case 0x85: l2 = 2048; break; // code and data L2 cache, 2048 KB, 8 ways, 32 byte lines 00882 case 0x86: l2 = 512; break; // code and data L2 cache, 512 KB, 4 ways, 64 byte lines 00883 case 0x87: l2 = 1024; break; // code and data L2 cache, 1024 KB, 8 ways, 64 byte lines 00884 case 0x88: l3 = 2048; break; // code and data L3 cache, 2048 KB, 4 ways, 64 byte lines (IA-64) 00885 case 0x89: l3 = 4096; break; // code and data L3 cache, 4096 KB, 4 ways, 64 byte lines (IA-64) 00886 case 0x8A: l3 = 8192; break; // code and data L3 cache, 8192 KB, 4 ways, 64 byte lines (IA-64) 00887 case 0x8D: l3 = 3072; break; // code and data L3 cache, 3072 KB, 12 ways, 128 byte lines (IA-64) 00888 00889 default: break; 00890 } 00891 } 00892 if(check_for_p2_core2 && l2 == l3) 00893 l3 = 0; 00894 l1 *= 1024; 00895 l2 *= 1024; 00896 l3 *= 1024; 00897 } 00898 00899 inline void queryCacheSizes_intel(int& l1, int& l2, int& l3, int max_std_funcs) 00900 { 00901 if(max_std_funcs>=4) 00902 queryCacheSizes_intel_direct(l1,l2,l3); 00903 else 00904 queryCacheSizes_intel_codes(l1,l2,l3); 00905 } 00906 00907 inline void queryCacheSizes_amd(int& l1, int& l2, int& l3) 00908 { 00909 int abcd[4]; 00910 abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0; 00911 EIGEN_CPUID(abcd,0x80000005,0); 00912 l1 = (abcd[2] >> 24) * 1024; // C[31:24] = L1 size in KB 00913 abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0; 00914 EIGEN_CPUID(abcd,0x80000006,0); 00915 l2 = (abcd[2] >> 16) * 1024; // C[31;16] = l2 cache size in KB 00916 l3 = ((abcd[3] & 0xFFFC000) >> 18) * 512 * 1024; // D[31;18] = l3 cache size in 512KB 00917 } 00918 #endif 00919 00920 /** \internal 00921 * Queries and returns the cache sizes in Bytes of the L1, L2, and L3 data caches respectively */ 00922 inline void queryCacheSizes(int& l1, int& l2, int& l3) 00923 { 00924 #ifdef EIGEN_CPUID 00925 int abcd[4]; 00926 const int GenuineIntel[] = {0x756e6547, 0x49656e69, 0x6c65746e}; 00927 const int AuthenticAMD[] = {0x68747541, 0x69746e65, 0x444d4163}; 00928 const int AMDisbetter_[] = {0x69444d41, 0x74656273, 0x21726574}; // "AMDisbetter!" 00929 00930 // identify the CPU vendor 00931 EIGEN_CPUID(abcd,0x0,0); 00932 int max_std_funcs = abcd[1]; 00933 if(cpuid_is_vendor(abcd,GenuineIntel)) 00934 queryCacheSizes_intel(l1,l2,l3,max_std_funcs); 00935 else if(cpuid_is_vendor(abcd,AuthenticAMD) || cpuid_is_vendor(abcd,AMDisbetter_)) 00936 queryCacheSizes_amd(l1,l2,l3); 00937 else 00938 // by default let's use Intel's API 00939 queryCacheSizes_intel(l1,l2,l3,max_std_funcs); 00940 00941 // here is the list of other vendors: 00942 // ||cpuid_is_vendor(abcd,"VIA VIA VIA ") 00943 // ||cpuid_is_vendor(abcd,"CyrixInstead") 00944 // ||cpuid_is_vendor(abcd,"CentaurHauls") 00945 // ||cpuid_is_vendor(abcd,"GenuineTMx86") 00946 // ||cpuid_is_vendor(abcd,"TransmetaCPU") 00947 // ||cpuid_is_vendor(abcd,"RiseRiseRise") 00948 // ||cpuid_is_vendor(abcd,"Geode by NSC") 00949 // ||cpuid_is_vendor(abcd,"SiS SiS SiS ") 00950 // ||cpuid_is_vendor(abcd,"UMC UMC UMC ") 00951 // ||cpuid_is_vendor(abcd,"NexGenDriven") 00952 #else 00953 l1 = l2 = l3 = -1; 00954 #endif 00955 } 00956 00957 /** \internal 00958 * \returns the size in Bytes of the L1 data cache */ 00959 inline int queryL1CacheSize() 00960 { 00961 int l1(-1), l2, l3; 00962 queryCacheSizes(l1,l2,l3); 00963 return l1; 00964 } 00965 00966 /** \internal 00967 * \returns the size in Bytes of the L2 or L3 cache if this later is present */ 00968 inline int queryTopLevelCacheSize() 00969 { 00970 int l1, l2(-1), l3(-1); 00971 queryCacheSizes(l1,l2,l3); 00972 return (std::max)(l2,l3); 00973 } 00974 00975 } // end namespace internal 00976 00977 } // end namespace Eigen 00978 00979 #endif // EIGEN_MEMORY_H
Generated on Tue Jul 12 2022 17:46:58 by 1.7.2