Eigen libary for mbed
src/Core/util/Memory.h@1:3b8049da21b8, 2019-09-24 (annotated)
- Committer:
- jsoh91
- Date:
- Tue Sep 24 00:18:23 2019 +0000
- Revision:
- 1:3b8049da21b8
- Parent:
- 0:13a5d365ba16
ignore and revise some of error parts
Who changed what in which revision?
| User | Revision | Line number | New contents of line |
|---|---|---|---|
| ykuroda | 0:13a5d365ba16 | 1 | // This file is part of Eigen, a lightweight C++ template library |
| ykuroda | 0:13a5d365ba16 | 2 | // for linear algebra. |
| ykuroda | 0:13a5d365ba16 | 3 | // |
| ykuroda | 0:13a5d365ba16 | 4 | // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr> |
| ykuroda | 0:13a5d365ba16 | 5 | // Copyright (C) 2008-2009 Benoit Jacob <jacob.benoit.1@gmail.com> |
| ykuroda | 0:13a5d365ba16 | 6 | // Copyright (C) 2009 Kenneth Riddile <kfriddile@yahoo.com> |
| ykuroda | 0:13a5d365ba16 | 7 | // Copyright (C) 2010 Hauke Heibel <hauke.heibel@gmail.com> |
| ykuroda | 0:13a5d365ba16 | 8 | // Copyright (C) 2010 Thomas Capricelli <orzel@freehackers.org> |
| ykuroda | 0:13a5d365ba16 | 9 | // |
| ykuroda | 0:13a5d365ba16 | 10 | // This Source Code Form is subject to the terms of the Mozilla |
| ykuroda | 0:13a5d365ba16 | 11 | // Public License v. 2.0. If a copy of the MPL was not distributed |
| ykuroda | 0:13a5d365ba16 | 12 | // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. |
| ykuroda | 0:13a5d365ba16 | 13 | |
| ykuroda | 0:13a5d365ba16 | 14 | |
| ykuroda | 0:13a5d365ba16 | 15 | /***************************************************************************** |
| ykuroda | 0:13a5d365ba16 | 16 | *** Platform checks for aligned malloc functions *** |
| ykuroda | 0:13a5d365ba16 | 17 | *****************************************************************************/ |
| ykuroda | 0:13a5d365ba16 | 18 | |
| ykuroda | 0:13a5d365ba16 | 19 | #ifndef EIGEN_MEMORY_H |
| ykuroda | 0:13a5d365ba16 | 20 | #define EIGEN_MEMORY_H |
| ykuroda | 0:13a5d365ba16 | 21 | |
| ykuroda | 0:13a5d365ba16 | 22 | #ifndef EIGEN_MALLOC_ALREADY_ALIGNED |
| ykuroda | 0:13a5d365ba16 | 23 | |
| ykuroda | 0:13a5d365ba16 | 24 | // Try to determine automatically if malloc is already aligned. |
| ykuroda | 0:13a5d365ba16 | 25 | |
| ykuroda | 0:13a5d365ba16 | 26 | // On 64-bit systems, glibc's malloc returns 16-byte-aligned pointers, see: |
| ykuroda | 0:13a5d365ba16 | 27 | // http://www.gnu.org/s/libc/manual/html_node/Aligned-Memory-Blocks.html |
| ykuroda | 0:13a5d365ba16 | 28 | // This is true at least since glibc 2.8. |
| ykuroda | 0:13a5d365ba16 | 29 | // This leaves the question how to detect 64-bit. According to this document, |
| ykuroda | 0:13a5d365ba16 | 30 | // http://gcc.fyxm.net/summit/2003/Porting%20to%2064%20bit.pdf |
| ykuroda | 0:13a5d365ba16 | 31 | // page 114, "[The] LP64 model [...] is used by all 64-bit UNIX ports" so it's indeed |
| ykuroda | 0:13a5d365ba16 | 32 | // quite safe, at least within the context of glibc, to equate 64-bit with LP64. |
| ykuroda | 0:13a5d365ba16 | 33 | #if defined(__GLIBC__) && ((__GLIBC__>=2 && __GLIBC_MINOR__ >= 8) || __GLIBC__>2) \ |
| ykuroda | 0:13a5d365ba16 | 34 | && defined(__LP64__) && ! defined( __SANITIZE_ADDRESS__ ) |
| ykuroda | 0:13a5d365ba16 | 35 | #define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 1 |
| ykuroda | 0:13a5d365ba16 | 36 | #else |
| ykuroda | 0:13a5d365ba16 | 37 | #define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 0 |
| ykuroda | 0:13a5d365ba16 | 38 | #endif |
| ykuroda | 0:13a5d365ba16 | 39 | |
| ykuroda | 0:13a5d365ba16 | 40 | // FreeBSD 6 seems to have 16-byte aligned malloc |
| ykuroda | 0:13a5d365ba16 | 41 | // See http://svn.freebsd.org/viewvc/base/stable/6/lib/libc/stdlib/malloc.c?view=markup |
| ykuroda | 0:13a5d365ba16 | 42 | // FreeBSD 7 seems to have 16-byte aligned malloc except on ARM and MIPS architectures |
| ykuroda | 0:13a5d365ba16 | 43 | // See http://svn.freebsd.org/viewvc/base/stable/7/lib/libc/stdlib/malloc.c?view=markup |
| ykuroda | 0:13a5d365ba16 | 44 | #if defined(__FreeBSD__) && !defined(__arm__) && !defined(__mips__) |
| ykuroda | 0:13a5d365ba16 | 45 | #define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED 1 |
| ykuroda | 0:13a5d365ba16 | 46 | #else |
| ykuroda | 0:13a5d365ba16 | 47 | #define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED 0 |
| ykuroda | 0:13a5d365ba16 | 48 | #endif |
| ykuroda | 0:13a5d365ba16 | 49 | |
| ykuroda | 0:13a5d365ba16 | 50 | #if defined(__APPLE__) \ |
| ykuroda | 0:13a5d365ba16 | 51 | || defined(_WIN64) \ |
| ykuroda | 0:13a5d365ba16 | 52 | || EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED \ |
| ykuroda | 0:13a5d365ba16 | 53 | || EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED |
| ykuroda | 0:13a5d365ba16 | 54 | #define EIGEN_MALLOC_ALREADY_ALIGNED 1 |
| ykuroda | 0:13a5d365ba16 | 55 | #else |
| ykuroda | 0:13a5d365ba16 | 56 | #define EIGEN_MALLOC_ALREADY_ALIGNED 0 |
| ykuroda | 0:13a5d365ba16 | 57 | #endif |
| ykuroda | 0:13a5d365ba16 | 58 | |
| ykuroda | 0:13a5d365ba16 | 59 | #endif |
| ykuroda | 0:13a5d365ba16 | 60 | |
| ykuroda | 0:13a5d365ba16 | 61 | // See bug 554 (http://eigen.tuxfamily.org/bz/show_bug.cgi?id=554) |
| ykuroda | 0:13a5d365ba16 | 62 | // It seems to be unsafe to check _POSIX_ADVISORY_INFO without including unistd.h first. |
| ykuroda | 0:13a5d365ba16 | 63 | // Currently, let's include it only on unix systems: |
| ykuroda | 0:13a5d365ba16 | 64 | #if defined(__unix__) || defined(__unix) |
| ykuroda | 0:13a5d365ba16 | 65 | #include <unistd.h> |
| ykuroda | 0:13a5d365ba16 | 66 | #if ((defined __QNXNTO__) || (defined _GNU_SOURCE) || (defined __PGI) || ((defined _XOPEN_SOURCE) && (_XOPEN_SOURCE >= 600))) && (defined _POSIX_ADVISORY_INFO) && (_POSIX_ADVISORY_INFO > 0) |
| ykuroda | 0:13a5d365ba16 | 67 | #define EIGEN_HAS_POSIX_MEMALIGN 1 |
| ykuroda | 0:13a5d365ba16 | 68 | #endif |
| ykuroda | 0:13a5d365ba16 | 69 | #endif |
| ykuroda | 0:13a5d365ba16 | 70 | |
| ykuroda | 0:13a5d365ba16 | 71 | #ifndef EIGEN_HAS_POSIX_MEMALIGN |
| ykuroda | 0:13a5d365ba16 | 72 | #define EIGEN_HAS_POSIX_MEMALIGN 0 |
| ykuroda | 0:13a5d365ba16 | 73 | #endif |
| ykuroda | 0:13a5d365ba16 | 74 | |
| ykuroda | 0:13a5d365ba16 | 75 | #ifdef EIGEN_VECTORIZE_SSE |
| ykuroda | 0:13a5d365ba16 | 76 | #define EIGEN_HAS_MM_MALLOC 1 |
| ykuroda | 0:13a5d365ba16 | 77 | #else |
| ykuroda | 0:13a5d365ba16 | 78 | #define EIGEN_HAS_MM_MALLOC 0 |
| ykuroda | 0:13a5d365ba16 | 79 | #endif |
| ykuroda | 0:13a5d365ba16 | 80 | |
| ykuroda | 0:13a5d365ba16 | 81 | namespace Eigen { |
| ykuroda | 0:13a5d365ba16 | 82 | |
| ykuroda | 0:13a5d365ba16 | 83 | namespace internal { |
| ykuroda | 0:13a5d365ba16 | 84 | |
| ykuroda | 0:13a5d365ba16 | 85 | inline void throw_std_bad_alloc() |
| ykuroda | 0:13a5d365ba16 | 86 | { |
| ykuroda | 0:13a5d365ba16 | 87 | #ifdef EIGEN_EXCEPTIONS |
| ykuroda | 0:13a5d365ba16 | 88 | throw std::bad_alloc(); |
| ykuroda | 0:13a5d365ba16 | 89 | #else |
| jsoh91 | 1:3b8049da21b8 | 90 | // std::size_t huge = -1; |
| jsoh91 | 1:3b8049da21b8 | 91 | // new int[huge]; |
| ykuroda | 0:13a5d365ba16 | 92 | #endif |
| ykuroda | 0:13a5d365ba16 | 93 | } |
| ykuroda | 0:13a5d365ba16 | 94 | |
| ykuroda | 0:13a5d365ba16 | 95 | /***************************************************************************** |
| ykuroda | 0:13a5d365ba16 | 96 | *** Implementation of handmade aligned functions *** |
| ykuroda | 0:13a5d365ba16 | 97 | *****************************************************************************/ |
| ykuroda | 0:13a5d365ba16 | 98 | |
| ykuroda | 0:13a5d365ba16 | 99 | /* ----- Hand made implementations of aligned malloc/free and realloc ----- */ |
| ykuroda | 0:13a5d365ba16 | 100 | |
| ykuroda | 0:13a5d365ba16 | 101 | /** \internal Like malloc, but the returned pointer is guaranteed to be 16-byte aligned. |
| ykuroda | 0:13a5d365ba16 | 102 | * Fast, but wastes 16 additional bytes of memory. Does not throw any exception. |
| ykuroda | 0:13a5d365ba16 | 103 | */ |
| ykuroda | 0:13a5d365ba16 | 104 | inline void* handmade_aligned_malloc(std::size_t size) |
| ykuroda | 0:13a5d365ba16 | 105 | { |
| ykuroda | 0:13a5d365ba16 | 106 | void *original = std::malloc(size+16); |
| ykuroda | 0:13a5d365ba16 | 107 | if (original == 0) return 0; |
| ykuroda | 0:13a5d365ba16 | 108 | void *aligned = reinterpret_cast<void*>((reinterpret_cast<std::size_t>(original) & ~(std::size_t(15))) + 16); |
| ykuroda | 0:13a5d365ba16 | 109 | *(reinterpret_cast<void**>(aligned) - 1) = original; |
| ykuroda | 0:13a5d365ba16 | 110 | return aligned; |
| ykuroda | 0:13a5d365ba16 | 111 | } |
| ykuroda | 0:13a5d365ba16 | 112 | |
| ykuroda | 0:13a5d365ba16 | 113 | /** \internal Frees memory allocated with handmade_aligned_malloc */ |
| ykuroda | 0:13a5d365ba16 | 114 | inline void handmade_aligned_free(void *ptr) |
| ykuroda | 0:13a5d365ba16 | 115 | { |
| ykuroda | 0:13a5d365ba16 | 116 | if (ptr) std::free(*(reinterpret_cast<void**>(ptr) - 1)); |
| ykuroda | 0:13a5d365ba16 | 117 | } |
| ykuroda | 0:13a5d365ba16 | 118 | |
| ykuroda | 0:13a5d365ba16 | 119 | /** \internal |
| ykuroda | 0:13a5d365ba16 | 120 | * \brief Reallocates aligned memory. |
| ykuroda | 0:13a5d365ba16 | 121 | * Since we know that our handmade version is based on std::realloc |
| ykuroda | 0:13a5d365ba16 | 122 | * we can use std::realloc to implement efficient reallocation. |
| ykuroda | 0:13a5d365ba16 | 123 | */ |
| ykuroda | 0:13a5d365ba16 | 124 | inline void* handmade_aligned_realloc(void* ptr, std::size_t size, std::size_t = 0) |
| ykuroda | 0:13a5d365ba16 | 125 | { |
| ykuroda | 0:13a5d365ba16 | 126 | if (ptr == 0) return handmade_aligned_malloc(size); |
| ykuroda | 0:13a5d365ba16 | 127 | void *original = *(reinterpret_cast<void**>(ptr) - 1); |
| ykuroda | 0:13a5d365ba16 | 128 | std::ptrdiff_t previous_offset = static_cast<char *>(ptr)-static_cast<char *>(original); |
| ykuroda | 0:13a5d365ba16 | 129 | original = std::realloc(original,size+16); |
| ykuroda | 0:13a5d365ba16 | 130 | if (original == 0) return 0; |
| ykuroda | 0:13a5d365ba16 | 131 | void *aligned = reinterpret_cast<void*>((reinterpret_cast<std::size_t>(original) & ~(std::size_t(15))) + 16); |
| ykuroda | 0:13a5d365ba16 | 132 | void *previous_aligned = static_cast<char *>(original)+previous_offset; |
| ykuroda | 0:13a5d365ba16 | 133 | if(aligned!=previous_aligned) |
| ykuroda | 0:13a5d365ba16 | 134 | std::memmove(aligned, previous_aligned, size); |
| ykuroda | 0:13a5d365ba16 | 135 | |
| ykuroda | 0:13a5d365ba16 | 136 | *(reinterpret_cast<void**>(aligned) - 1) = original; |
| ykuroda | 0:13a5d365ba16 | 137 | return aligned; |
| ykuroda | 0:13a5d365ba16 | 138 | } |
| ykuroda | 0:13a5d365ba16 | 139 | |
| ykuroda | 0:13a5d365ba16 | 140 | /***************************************************************************** |
| ykuroda | 0:13a5d365ba16 | 141 | *** Implementation of generic aligned realloc (when no realloc can be used)*** |
| ykuroda | 0:13a5d365ba16 | 142 | *****************************************************************************/ |
| ykuroda | 0:13a5d365ba16 | 143 | |
| ykuroda | 0:13a5d365ba16 | 144 | void* aligned_malloc(std::size_t size); |
| ykuroda | 0:13a5d365ba16 | 145 | void aligned_free(void *ptr); |
| ykuroda | 0:13a5d365ba16 | 146 | |
| ykuroda | 0:13a5d365ba16 | 147 | /** \internal |
| ykuroda | 0:13a5d365ba16 | 148 | * \brief Reallocates aligned memory. |
| ykuroda | 0:13a5d365ba16 | 149 | * Allows reallocation with aligned ptr types. This implementation will |
| ykuroda | 0:13a5d365ba16 | 150 | * always create a new memory chunk and copy the old data. |
| ykuroda | 0:13a5d365ba16 | 151 | */ |
| ykuroda | 0:13a5d365ba16 | 152 | inline void* generic_aligned_realloc(void* ptr, size_t size, size_t old_size) |
| ykuroda | 0:13a5d365ba16 | 153 | { |
| ykuroda | 0:13a5d365ba16 | 154 | if (ptr==0) |
| ykuroda | 0:13a5d365ba16 | 155 | return aligned_malloc(size); |
| ykuroda | 0:13a5d365ba16 | 156 | |
| ykuroda | 0:13a5d365ba16 | 157 | if (size==0) |
| ykuroda | 0:13a5d365ba16 | 158 | { |
| ykuroda | 0:13a5d365ba16 | 159 | aligned_free(ptr); |
| ykuroda | 0:13a5d365ba16 | 160 | return 0; |
| ykuroda | 0:13a5d365ba16 | 161 | } |
| ykuroda | 0:13a5d365ba16 | 162 | |
| ykuroda | 0:13a5d365ba16 | 163 | void* newptr = aligned_malloc(size); |
| ykuroda | 0:13a5d365ba16 | 164 | if (newptr == 0) |
| ykuroda | 0:13a5d365ba16 | 165 | { |
| ykuroda | 0:13a5d365ba16 | 166 | #ifdef EIGEN_HAS_ERRNO |
| ykuroda | 0:13a5d365ba16 | 167 | errno = ENOMEM; // according to the standard |
| ykuroda | 0:13a5d365ba16 | 168 | #endif |
| ykuroda | 0:13a5d365ba16 | 169 | return 0; |
| ykuroda | 0:13a5d365ba16 | 170 | } |
| ykuroda | 0:13a5d365ba16 | 171 | |
| ykuroda | 0:13a5d365ba16 | 172 | if (ptr != 0) |
| ykuroda | 0:13a5d365ba16 | 173 | { |
| ykuroda | 0:13a5d365ba16 | 174 | std::memcpy(newptr, ptr, (std::min)(size,old_size)); |
| ykuroda | 0:13a5d365ba16 | 175 | aligned_free(ptr); |
| ykuroda | 0:13a5d365ba16 | 176 | } |
| ykuroda | 0:13a5d365ba16 | 177 | |
| ykuroda | 0:13a5d365ba16 | 178 | return newptr; |
| ykuroda | 0:13a5d365ba16 | 179 | } |
| ykuroda | 0:13a5d365ba16 | 180 | |
| ykuroda | 0:13a5d365ba16 | 181 | /***************************************************************************** |
| ykuroda | 0:13a5d365ba16 | 182 | *** Implementation of portable aligned versions of malloc/free/realloc *** |
| ykuroda | 0:13a5d365ba16 | 183 | *****************************************************************************/ |
| ykuroda | 0:13a5d365ba16 | 184 | |
| ykuroda | 0:13a5d365ba16 | 185 | #ifdef EIGEN_NO_MALLOC |
| ykuroda | 0:13a5d365ba16 | 186 | inline void check_that_malloc_is_allowed() |
| ykuroda | 0:13a5d365ba16 | 187 | { |
| ykuroda | 0:13a5d365ba16 | 188 | eigen_assert(false && "heap allocation is forbidden (EIGEN_NO_MALLOC is defined)"); |
| ykuroda | 0:13a5d365ba16 | 189 | } |
| ykuroda | 0:13a5d365ba16 | 190 | #elif defined EIGEN_RUNTIME_NO_MALLOC |
| ykuroda | 0:13a5d365ba16 | 191 | inline bool is_malloc_allowed_impl(bool update, bool new_value = false) |
| ykuroda | 0:13a5d365ba16 | 192 | { |
| ykuroda | 0:13a5d365ba16 | 193 | static bool value = true; |
| ykuroda | 0:13a5d365ba16 | 194 | if (update == 1) |
| ykuroda | 0:13a5d365ba16 | 195 | value = new_value; |
| ykuroda | 0:13a5d365ba16 | 196 | return value; |
| ykuroda | 0:13a5d365ba16 | 197 | } |
| ykuroda | 0:13a5d365ba16 | 198 | inline bool is_malloc_allowed() { return is_malloc_allowed_impl(false); } |
| ykuroda | 0:13a5d365ba16 | 199 | inline bool set_is_malloc_allowed(bool new_value) { return is_malloc_allowed_impl(true, new_value); } |
| ykuroda | 0:13a5d365ba16 | 200 | inline void check_that_malloc_is_allowed() |
| ykuroda | 0:13a5d365ba16 | 201 | { |
| ykuroda | 0:13a5d365ba16 | 202 | eigen_assert(is_malloc_allowed() && "heap allocation is forbidden (EIGEN_RUNTIME_NO_MALLOC is defined and g_is_malloc_allowed is false)"); |
| ykuroda | 0:13a5d365ba16 | 203 | } |
| ykuroda | 0:13a5d365ba16 | 204 | #else |
| ykuroda | 0:13a5d365ba16 | 205 | inline void check_that_malloc_is_allowed() |
| ykuroda | 0:13a5d365ba16 | 206 | {} |
| ykuroda | 0:13a5d365ba16 | 207 | #endif |
| ykuroda | 0:13a5d365ba16 | 208 | |
| ykuroda | 0:13a5d365ba16 | 209 | /** \internal Allocates \a size bytes. The returned pointer is guaranteed to have 16 bytes alignment. |
| ykuroda | 0:13a5d365ba16 | 210 | * On allocation error, the returned pointer is null, and std::bad_alloc is thrown. |
| ykuroda | 0:13a5d365ba16 | 211 | */ |
| ykuroda | 0:13a5d365ba16 | 212 | inline void* aligned_malloc(size_t size) |
| ykuroda | 0:13a5d365ba16 | 213 | { |
| ykuroda | 0:13a5d365ba16 | 214 | check_that_malloc_is_allowed(); |
| ykuroda | 0:13a5d365ba16 | 215 | |
| ykuroda | 0:13a5d365ba16 | 216 | void *result; |
| ykuroda | 0:13a5d365ba16 | 217 | #if !EIGEN_ALIGN |
| ykuroda | 0:13a5d365ba16 | 218 | result = std::malloc(size); |
| ykuroda | 0:13a5d365ba16 | 219 | #elif EIGEN_MALLOC_ALREADY_ALIGNED |
| ykuroda | 0:13a5d365ba16 | 220 | result = std::malloc(size); |
| ykuroda | 0:13a5d365ba16 | 221 | #elif EIGEN_HAS_POSIX_MEMALIGN |
| ykuroda | 0:13a5d365ba16 | 222 | if(posix_memalign(&result, 16, size)) result = 0; |
| ykuroda | 0:13a5d365ba16 | 223 | #elif EIGEN_HAS_MM_MALLOC |
| ykuroda | 0:13a5d365ba16 | 224 | result = _mm_malloc(size, 16); |
| ykuroda | 0:13a5d365ba16 | 225 | #elif defined(_MSC_VER) && (!defined(_WIN32_WCE)) |
| ykuroda | 0:13a5d365ba16 | 226 | result = _aligned_malloc(size, 16); |
| ykuroda | 0:13a5d365ba16 | 227 | #else |
| ykuroda | 0:13a5d365ba16 | 228 | result = handmade_aligned_malloc(size); |
| ykuroda | 0:13a5d365ba16 | 229 | #endif |
| ykuroda | 0:13a5d365ba16 | 230 | |
| ykuroda | 0:13a5d365ba16 | 231 | if(!result && size) |
| ykuroda | 0:13a5d365ba16 | 232 | throw_std_bad_alloc(); |
| ykuroda | 0:13a5d365ba16 | 233 | |
| ykuroda | 0:13a5d365ba16 | 234 | return result; |
| ykuroda | 0:13a5d365ba16 | 235 | } |
| ykuroda | 0:13a5d365ba16 | 236 | |
| ykuroda | 0:13a5d365ba16 | 237 | /** \internal Frees memory allocated with aligned_malloc. */ |
| ykuroda | 0:13a5d365ba16 | 238 | inline void aligned_free(void *ptr) |
| ykuroda | 0:13a5d365ba16 | 239 | { |
| ykuroda | 0:13a5d365ba16 | 240 | #if !EIGEN_ALIGN |
| ykuroda | 0:13a5d365ba16 | 241 | std::free(ptr); |
| ykuroda | 0:13a5d365ba16 | 242 | #elif EIGEN_MALLOC_ALREADY_ALIGNED |
| ykuroda | 0:13a5d365ba16 | 243 | std::free(ptr); |
| ykuroda | 0:13a5d365ba16 | 244 | #elif EIGEN_HAS_POSIX_MEMALIGN |
| ykuroda | 0:13a5d365ba16 | 245 | std::free(ptr); |
| ykuroda | 0:13a5d365ba16 | 246 | #elif EIGEN_HAS_MM_MALLOC |
| ykuroda | 0:13a5d365ba16 | 247 | _mm_free(ptr); |
| ykuroda | 0:13a5d365ba16 | 248 | #elif defined(_MSC_VER) && (!defined(_WIN32_WCE)) |
| ykuroda | 0:13a5d365ba16 | 249 | _aligned_free(ptr); |
| ykuroda | 0:13a5d365ba16 | 250 | #else |
| ykuroda | 0:13a5d365ba16 | 251 | handmade_aligned_free(ptr); |
| ykuroda | 0:13a5d365ba16 | 252 | #endif |
| ykuroda | 0:13a5d365ba16 | 253 | } |
| ykuroda | 0:13a5d365ba16 | 254 | |
| ykuroda | 0:13a5d365ba16 | 255 | /** |
| ykuroda | 0:13a5d365ba16 | 256 | * \internal |
| ykuroda | 0:13a5d365ba16 | 257 | * \brief Reallocates an aligned block of memory. |
| ykuroda | 0:13a5d365ba16 | 258 | * \throws std::bad_alloc on allocation failure |
| ykuroda | 0:13a5d365ba16 | 259 | **/ |
| ykuroda | 0:13a5d365ba16 | 260 | inline void* aligned_realloc(void *ptr, size_t new_size, size_t old_size) |
| ykuroda | 0:13a5d365ba16 | 261 | { |
| ykuroda | 0:13a5d365ba16 | 262 | EIGEN_UNUSED_VARIABLE(old_size); |
| ykuroda | 0:13a5d365ba16 | 263 | |
| ykuroda | 0:13a5d365ba16 | 264 | void *result; |
| ykuroda | 0:13a5d365ba16 | 265 | #if !EIGEN_ALIGN |
| ykuroda | 0:13a5d365ba16 | 266 | result = std::realloc(ptr,new_size); |
| ykuroda | 0:13a5d365ba16 | 267 | #elif EIGEN_MALLOC_ALREADY_ALIGNED |
| ykuroda | 0:13a5d365ba16 | 268 | result = std::realloc(ptr,new_size); |
| ykuroda | 0:13a5d365ba16 | 269 | #elif EIGEN_HAS_POSIX_MEMALIGN |
| ykuroda | 0:13a5d365ba16 | 270 | result = generic_aligned_realloc(ptr,new_size,old_size); |
| ykuroda | 0:13a5d365ba16 | 271 | #elif EIGEN_HAS_MM_MALLOC |
| ykuroda | 0:13a5d365ba16 | 272 | // The defined(_mm_free) is just here to verify that this MSVC version |
| ykuroda | 0:13a5d365ba16 | 273 | // implements _mm_malloc/_mm_free based on the corresponding _aligned_ |
| ykuroda | 0:13a5d365ba16 | 274 | // functions. This may not always be the case and we just try to be safe. |
| ykuroda | 0:13a5d365ba16 | 275 | #if defined(_MSC_VER) && (!defined(_WIN32_WCE)) && defined(_mm_free) |
| ykuroda | 0:13a5d365ba16 | 276 | result = _aligned_realloc(ptr,new_size,16); |
| ykuroda | 0:13a5d365ba16 | 277 | #else |
| ykuroda | 0:13a5d365ba16 | 278 | result = generic_aligned_realloc(ptr,new_size,old_size); |
| ykuroda | 0:13a5d365ba16 | 279 | #endif |
| ykuroda | 0:13a5d365ba16 | 280 | #elif defined(_MSC_VER) && (!defined(_WIN32_WCE)) |
| ykuroda | 0:13a5d365ba16 | 281 | result = _aligned_realloc(ptr,new_size,16); |
| ykuroda | 0:13a5d365ba16 | 282 | #else |
| ykuroda | 0:13a5d365ba16 | 283 | result = handmade_aligned_realloc(ptr,new_size,old_size); |
| ykuroda | 0:13a5d365ba16 | 284 | #endif |
| ykuroda | 0:13a5d365ba16 | 285 | |
| ykuroda | 0:13a5d365ba16 | 286 | if (!result && new_size) |
| ykuroda | 0:13a5d365ba16 | 287 | throw_std_bad_alloc(); |
| ykuroda | 0:13a5d365ba16 | 288 | |
| ykuroda | 0:13a5d365ba16 | 289 | return result; |
| ykuroda | 0:13a5d365ba16 | 290 | } |
| ykuroda | 0:13a5d365ba16 | 291 | |
| ykuroda | 0:13a5d365ba16 | 292 | /***************************************************************************** |
| ykuroda | 0:13a5d365ba16 | 293 | *** Implementation of conditionally aligned functions *** |
| ykuroda | 0:13a5d365ba16 | 294 | *****************************************************************************/ |
| ykuroda | 0:13a5d365ba16 | 295 | |
| ykuroda | 0:13a5d365ba16 | 296 | /** \internal Allocates \a size bytes. If Align is true, then the returned ptr is 16-byte-aligned. |
| ykuroda | 0:13a5d365ba16 | 297 | * On allocation error, the returned pointer is null, and a std::bad_alloc is thrown. |
| ykuroda | 0:13a5d365ba16 | 298 | */ |
| ykuroda | 0:13a5d365ba16 | 299 | template<bool Align> inline void* conditional_aligned_malloc(size_t size) |
| ykuroda | 0:13a5d365ba16 | 300 | { |
| ykuroda | 0:13a5d365ba16 | 301 | return aligned_malloc(size); |
| ykuroda | 0:13a5d365ba16 | 302 | } |
| ykuroda | 0:13a5d365ba16 | 303 | |
| ykuroda | 0:13a5d365ba16 | 304 | template<> inline void* conditional_aligned_malloc<false>(size_t size) |
| ykuroda | 0:13a5d365ba16 | 305 | { |
| ykuroda | 0:13a5d365ba16 | 306 | check_that_malloc_is_allowed(); |
| ykuroda | 0:13a5d365ba16 | 307 | |
| ykuroda | 0:13a5d365ba16 | 308 | void *result = std::malloc(size); |
| ykuroda | 0:13a5d365ba16 | 309 | if(!result && size) |
| ykuroda | 0:13a5d365ba16 | 310 | throw_std_bad_alloc(); |
| ykuroda | 0:13a5d365ba16 | 311 | return result; |
| ykuroda | 0:13a5d365ba16 | 312 | } |
| ykuroda | 0:13a5d365ba16 | 313 | |
| ykuroda | 0:13a5d365ba16 | 314 | /** \internal Frees memory allocated with conditional_aligned_malloc */ |
| ykuroda | 0:13a5d365ba16 | 315 | template<bool Align> inline void conditional_aligned_free(void *ptr) |
| ykuroda | 0:13a5d365ba16 | 316 | { |
| ykuroda | 0:13a5d365ba16 | 317 | aligned_free(ptr); |
| ykuroda | 0:13a5d365ba16 | 318 | } |
| ykuroda | 0:13a5d365ba16 | 319 | |
| ykuroda | 0:13a5d365ba16 | 320 | template<> inline void conditional_aligned_free<false>(void *ptr) |
| ykuroda | 0:13a5d365ba16 | 321 | { |
| ykuroda | 0:13a5d365ba16 | 322 | std::free(ptr); |
| ykuroda | 0:13a5d365ba16 | 323 | } |
| ykuroda | 0:13a5d365ba16 | 324 | |
| ykuroda | 0:13a5d365ba16 | 325 | template<bool Align> inline void* conditional_aligned_realloc(void* ptr, size_t new_size, size_t old_size) |
| ykuroda | 0:13a5d365ba16 | 326 | { |
| ykuroda | 0:13a5d365ba16 | 327 | return aligned_realloc(ptr, new_size, old_size); |
| ykuroda | 0:13a5d365ba16 | 328 | } |
| ykuroda | 0:13a5d365ba16 | 329 | |
| ykuroda | 0:13a5d365ba16 | 330 | template<> inline void* conditional_aligned_realloc<false>(void* ptr, size_t new_size, size_t) |
| ykuroda | 0:13a5d365ba16 | 331 | { |
| ykuroda | 0:13a5d365ba16 | 332 | return std::realloc(ptr, new_size); |
| ykuroda | 0:13a5d365ba16 | 333 | } |
| ykuroda | 0:13a5d365ba16 | 334 | |
| ykuroda | 0:13a5d365ba16 | 335 | /***************************************************************************** |
| ykuroda | 0:13a5d365ba16 | 336 | *** Construction/destruction of array elements *** |
| ykuroda | 0:13a5d365ba16 | 337 | *****************************************************************************/ |
| ykuroda | 0:13a5d365ba16 | 338 | |
| ykuroda | 0:13a5d365ba16 | 339 | /** \internal Constructs the elements of an array. |
| ykuroda | 0:13a5d365ba16 | 340 | * The \a size parameter tells on how many objects to call the constructor of T. |
| ykuroda | 0:13a5d365ba16 | 341 | */ |
| ykuroda | 0:13a5d365ba16 | 342 | template<typename T> inline T* construct_elements_of_array(T *ptr, size_t size) |
| ykuroda | 0:13a5d365ba16 | 343 | { |
| ykuroda | 0:13a5d365ba16 | 344 | for (size_t i=0; i < size; ++i) ::new (ptr + i) T; |
| ykuroda | 0:13a5d365ba16 | 345 | return ptr; |
| ykuroda | 0:13a5d365ba16 | 346 | } |
| ykuroda | 0:13a5d365ba16 | 347 | |
| ykuroda | 0:13a5d365ba16 | 348 | /** \internal Destructs the elements of an array. |
| ykuroda | 0:13a5d365ba16 | 349 | * The \a size parameters tells on how many objects to call the destructor of T. |
| ykuroda | 0:13a5d365ba16 | 350 | */ |
| ykuroda | 0:13a5d365ba16 | 351 | template<typename T> inline void destruct_elements_of_array(T *ptr, size_t size) |
| ykuroda | 0:13a5d365ba16 | 352 | { |
| ykuroda | 0:13a5d365ba16 | 353 | // always destruct an array starting from the end. |
| ykuroda | 0:13a5d365ba16 | 354 | if(ptr) |
| ykuroda | 0:13a5d365ba16 | 355 | while(size) ptr[--size].~T(); |
| ykuroda | 0:13a5d365ba16 | 356 | } |
| ykuroda | 0:13a5d365ba16 | 357 | |
| ykuroda | 0:13a5d365ba16 | 358 | /***************************************************************************** |
| ykuroda | 0:13a5d365ba16 | 359 | *** Implementation of aligned new/delete-like functions *** |
| ykuroda | 0:13a5d365ba16 | 360 | *****************************************************************************/ |
| ykuroda | 0:13a5d365ba16 | 361 | |
| ykuroda | 0:13a5d365ba16 | 362 | template<typename T> |
| ykuroda | 0:13a5d365ba16 | 363 | EIGEN_ALWAYS_INLINE void check_size_for_overflow(size_t size) |
| ykuroda | 0:13a5d365ba16 | 364 | { |
| ykuroda | 0:13a5d365ba16 | 365 | if(size > size_t(-1) / sizeof(T)) |
| ykuroda | 0:13a5d365ba16 | 366 | throw_std_bad_alloc(); |
| ykuroda | 0:13a5d365ba16 | 367 | } |
| ykuroda | 0:13a5d365ba16 | 368 | |
| ykuroda | 0:13a5d365ba16 | 369 | /** \internal Allocates \a size objects of type T. The returned pointer is guaranteed to have 16 bytes alignment. |
| ykuroda | 0:13a5d365ba16 | 370 | * On allocation error, the returned pointer is undefined, but a std::bad_alloc is thrown. |
| ykuroda | 0:13a5d365ba16 | 371 | * The default constructor of T is called. |
| ykuroda | 0:13a5d365ba16 | 372 | */ |
| ykuroda | 0:13a5d365ba16 | 373 | template<typename T> inline T* aligned_new(size_t size) |
| ykuroda | 0:13a5d365ba16 | 374 | { |
| ykuroda | 0:13a5d365ba16 | 375 | check_size_for_overflow<T>(size); |
| ykuroda | 0:13a5d365ba16 | 376 | T *result = reinterpret_cast<T*>(aligned_malloc(sizeof(T)*size)); |
| ykuroda | 0:13a5d365ba16 | 377 | return construct_elements_of_array(result, size); |
| ykuroda | 0:13a5d365ba16 | 378 | } |
| ykuroda | 0:13a5d365ba16 | 379 | |
| ykuroda | 0:13a5d365ba16 | 380 | template<typename T, bool Align> inline T* conditional_aligned_new(size_t size) |
| ykuroda | 0:13a5d365ba16 | 381 | { |
| ykuroda | 0:13a5d365ba16 | 382 | check_size_for_overflow<T>(size); |
| ykuroda | 0:13a5d365ba16 | 383 | T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size)); |
| ykuroda | 0:13a5d365ba16 | 384 | return construct_elements_of_array(result, size); |
| ykuroda | 0:13a5d365ba16 | 385 | } |
| ykuroda | 0:13a5d365ba16 | 386 | |
| ykuroda | 0:13a5d365ba16 | 387 | /** \internal Deletes objects constructed with aligned_new |
| ykuroda | 0:13a5d365ba16 | 388 | * The \a size parameters tells on how many objects to call the destructor of T. |
| ykuroda | 0:13a5d365ba16 | 389 | */ |
| ykuroda | 0:13a5d365ba16 | 390 | template<typename T> inline void aligned_delete(T *ptr, size_t size) |
| ykuroda | 0:13a5d365ba16 | 391 | { |
| ykuroda | 0:13a5d365ba16 | 392 | destruct_elements_of_array<T>(ptr, size); |
| ykuroda | 0:13a5d365ba16 | 393 | aligned_free(ptr); |
| ykuroda | 0:13a5d365ba16 | 394 | } |
| ykuroda | 0:13a5d365ba16 | 395 | |
| ykuroda | 0:13a5d365ba16 | 396 | /** \internal Deletes objects constructed with conditional_aligned_new |
| ykuroda | 0:13a5d365ba16 | 397 | * The \a size parameters tells on how many objects to call the destructor of T. |
| ykuroda | 0:13a5d365ba16 | 398 | */ |
| ykuroda | 0:13a5d365ba16 | 399 | template<typename T, bool Align> inline void conditional_aligned_delete(T *ptr, size_t size) |
| ykuroda | 0:13a5d365ba16 | 400 | { |
| ykuroda | 0:13a5d365ba16 | 401 | destruct_elements_of_array<T>(ptr, size); |
| ykuroda | 0:13a5d365ba16 | 402 | conditional_aligned_free<Align>(ptr); |
| ykuroda | 0:13a5d365ba16 | 403 | } |
| ykuroda | 0:13a5d365ba16 | 404 | |
| ykuroda | 0:13a5d365ba16 | 405 | template<typename T, bool Align> inline T* conditional_aligned_realloc_new(T* pts, size_t new_size, size_t old_size) |
| ykuroda | 0:13a5d365ba16 | 406 | { |
| ykuroda | 0:13a5d365ba16 | 407 | check_size_for_overflow<T>(new_size); |
| ykuroda | 0:13a5d365ba16 | 408 | check_size_for_overflow<T>(old_size); |
| ykuroda | 0:13a5d365ba16 | 409 | if(new_size < old_size) |
| ykuroda | 0:13a5d365ba16 | 410 | destruct_elements_of_array(pts+new_size, old_size-new_size); |
| ykuroda | 0:13a5d365ba16 | 411 | T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size)); |
| ykuroda | 0:13a5d365ba16 | 412 | if(new_size > old_size) |
| ykuroda | 0:13a5d365ba16 | 413 | construct_elements_of_array(result+old_size, new_size-old_size); |
| ykuroda | 0:13a5d365ba16 | 414 | return result; |
| ykuroda | 0:13a5d365ba16 | 415 | } |
| ykuroda | 0:13a5d365ba16 | 416 | |
| ykuroda | 0:13a5d365ba16 | 417 | |
| ykuroda | 0:13a5d365ba16 | 418 | template<typename T, bool Align> inline T* conditional_aligned_new_auto(size_t size) |
| ykuroda | 0:13a5d365ba16 | 419 | { |
| ykuroda | 0:13a5d365ba16 | 420 | if(size==0) |
| ykuroda | 0:13a5d365ba16 | 421 | return 0; // short-cut. Also fixes Bug 884 |
| ykuroda | 0:13a5d365ba16 | 422 | check_size_for_overflow<T>(size); |
| ykuroda | 0:13a5d365ba16 | 423 | T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size)); |
| ykuroda | 0:13a5d365ba16 | 424 | if(NumTraits<T>::RequireInitialization) |
| ykuroda | 0:13a5d365ba16 | 425 | construct_elements_of_array(result, size); |
| ykuroda | 0:13a5d365ba16 | 426 | return result; |
| ykuroda | 0:13a5d365ba16 | 427 | } |
| ykuroda | 0:13a5d365ba16 | 428 | |
| ykuroda | 0:13a5d365ba16 | 429 | template<typename T, bool Align> inline T* conditional_aligned_realloc_new_auto(T* pts, size_t new_size, size_t old_size) |
| ykuroda | 0:13a5d365ba16 | 430 | { |
| ykuroda | 0:13a5d365ba16 | 431 | check_size_for_overflow<T>(new_size); |
| ykuroda | 0:13a5d365ba16 | 432 | check_size_for_overflow<T>(old_size); |
| ykuroda | 0:13a5d365ba16 | 433 | if(NumTraits<T>::RequireInitialization && (new_size < old_size)) |
| ykuroda | 0:13a5d365ba16 | 434 | destruct_elements_of_array(pts+new_size, old_size-new_size); |
| ykuroda | 0:13a5d365ba16 | 435 | T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size)); |
| ykuroda | 0:13a5d365ba16 | 436 | if(NumTraits<T>::RequireInitialization && (new_size > old_size)) |
| ykuroda | 0:13a5d365ba16 | 437 | construct_elements_of_array(result+old_size, new_size-old_size); |
| ykuroda | 0:13a5d365ba16 | 438 | return result; |
| ykuroda | 0:13a5d365ba16 | 439 | } |
| ykuroda | 0:13a5d365ba16 | 440 | |
| ykuroda | 0:13a5d365ba16 | 441 | template<typename T, bool Align> inline void conditional_aligned_delete_auto(T *ptr, size_t size) |
| ykuroda | 0:13a5d365ba16 | 442 | { |
| ykuroda | 0:13a5d365ba16 | 443 | if(NumTraits<T>::RequireInitialization) |
| ykuroda | 0:13a5d365ba16 | 444 | destruct_elements_of_array<T>(ptr, size); |
| ykuroda | 0:13a5d365ba16 | 445 | conditional_aligned_free<Align>(ptr); |
| ykuroda | 0:13a5d365ba16 | 446 | } |
| ykuroda | 0:13a5d365ba16 | 447 | |
| ykuroda | 0:13a5d365ba16 | 448 | /****************************************************************************/ |
| ykuroda | 0:13a5d365ba16 | 449 | |
| ykuroda | 0:13a5d365ba16 | 450 | /** \internal Returns the index of the first element of the array that is well aligned for vectorization. |
| ykuroda | 0:13a5d365ba16 | 451 | * |
| ykuroda | 0:13a5d365ba16 | 452 | * \param array the address of the start of the array |
| ykuroda | 0:13a5d365ba16 | 453 | * \param size the size of the array |
| ykuroda | 0:13a5d365ba16 | 454 | * |
| ykuroda | 0:13a5d365ba16 | 455 | * \note If no element of the array is well aligned, the size of the array is returned. Typically, |
| ykuroda | 0:13a5d365ba16 | 456 | * for example with SSE, "well aligned" means 16-byte-aligned. If vectorization is disabled or if the |
| ykuroda | 0:13a5d365ba16 | 457 | * packet size for the given scalar type is 1, then everything is considered well-aligned. |
| ykuroda | 0:13a5d365ba16 | 458 | * |
| ykuroda | 0:13a5d365ba16 | 459 | * \note If the scalar type is vectorizable, we rely on the following assumptions: sizeof(Scalar) is a |
| ykuroda | 0:13a5d365ba16 | 460 | * power of 2, the packet size in bytes is also a power of 2, and is a multiple of sizeof(Scalar). On the |
| ykuroda | 0:13a5d365ba16 | 461 | * other hand, we do not assume that the array address is a multiple of sizeof(Scalar), as that fails for |
| ykuroda | 0:13a5d365ba16 | 462 | * example with Scalar=double on certain 32-bit platforms, see bug #79. |
| ykuroda | 0:13a5d365ba16 | 463 | * |
| ykuroda | 0:13a5d365ba16 | 464 | * There is also the variant first_aligned(const MatrixBase&) defined in DenseCoeffsBase.h. |
| ykuroda | 0:13a5d365ba16 | 465 | */ |
| ykuroda | 0:13a5d365ba16 | 466 | template<typename Scalar, typename Index> |
| ykuroda | 0:13a5d365ba16 | 467 | static inline Index first_aligned(const Scalar* array, Index size) |
| ykuroda | 0:13a5d365ba16 | 468 | { |
| ykuroda | 0:13a5d365ba16 | 469 | static const Index PacketSize = packet_traits<Scalar>::size; |
| ykuroda | 0:13a5d365ba16 | 470 | static const Index PacketAlignedMask = PacketSize-1; |
| ykuroda | 0:13a5d365ba16 | 471 | |
| ykuroda | 0:13a5d365ba16 | 472 | if(PacketSize==1) |
| ykuroda | 0:13a5d365ba16 | 473 | { |
| ykuroda | 0:13a5d365ba16 | 474 | // Either there is no vectorization, or a packet consists of exactly 1 scalar so that all elements |
| ykuroda | 0:13a5d365ba16 | 475 | // of the array have the same alignment. |
| ykuroda | 0:13a5d365ba16 | 476 | return 0; |
| ykuroda | 0:13a5d365ba16 | 477 | } |
| ykuroda | 0:13a5d365ba16 | 478 | else if(size_t(array) & (sizeof(Scalar)-1)) |
| ykuroda | 0:13a5d365ba16 | 479 | { |
| ykuroda | 0:13a5d365ba16 | 480 | // There is vectorization for this scalar type, but the array is not aligned to the size of a single scalar. |
| ykuroda | 0:13a5d365ba16 | 481 | // Consequently, no element of the array is well aligned. |
| ykuroda | 0:13a5d365ba16 | 482 | return size; |
| ykuroda | 0:13a5d365ba16 | 483 | } |
| ykuroda | 0:13a5d365ba16 | 484 | else |
| ykuroda | 0:13a5d365ba16 | 485 | { |
| ykuroda | 0:13a5d365ba16 | 486 | return std::min<Index>( (PacketSize - (Index((size_t(array)/sizeof(Scalar))) & PacketAlignedMask)) |
| ykuroda | 0:13a5d365ba16 | 487 | & PacketAlignedMask, size); |
| ykuroda | 0:13a5d365ba16 | 488 | } |
| ykuroda | 0:13a5d365ba16 | 489 | } |
| ykuroda | 0:13a5d365ba16 | 490 | |
| ykuroda | 0:13a5d365ba16 | 491 | /** \internal Returns the smallest integer multiple of \a base and greater or equal to \a size |
| ykuroda | 0:13a5d365ba16 | 492 | */ |
| ykuroda | 0:13a5d365ba16 | 493 | template<typename Index> |
| ykuroda | 0:13a5d365ba16 | 494 | inline static Index first_multiple(Index size, Index base) |
| ykuroda | 0:13a5d365ba16 | 495 | { |
| ykuroda | 0:13a5d365ba16 | 496 | return ((size+base-1)/base)*base; |
| ykuroda | 0:13a5d365ba16 | 497 | } |
| ykuroda | 0:13a5d365ba16 | 498 | |
| ykuroda | 0:13a5d365ba16 | 499 | // std::copy is much slower than memcpy, so let's introduce a smart_copy which |
| ykuroda | 0:13a5d365ba16 | 500 | // use memcpy on trivial types, i.e., on types that does not require an initialization ctor. |
| ykuroda | 0:13a5d365ba16 | 501 | template<typename T, bool UseMemcpy> struct smart_copy_helper; |
| ykuroda | 0:13a5d365ba16 | 502 | |
| ykuroda | 0:13a5d365ba16 | 503 | template<typename T> void smart_copy(const T* start, const T* end, T* target) |
| ykuroda | 0:13a5d365ba16 | 504 | { |
| ykuroda | 0:13a5d365ba16 | 505 | smart_copy_helper<T,!NumTraits<T>::RequireInitialization>::run(start, end, target); |
| ykuroda | 0:13a5d365ba16 | 506 | } |
| ykuroda | 0:13a5d365ba16 | 507 | |
| ykuroda | 0:13a5d365ba16 | 508 | template<typename T> struct smart_copy_helper<T,true> { |
| ykuroda | 0:13a5d365ba16 | 509 | static inline void run(const T* start, const T* end, T* target) |
| ykuroda | 0:13a5d365ba16 | 510 | { memcpy(target, start, std::ptrdiff_t(end)-std::ptrdiff_t(start)); } |
| ykuroda | 0:13a5d365ba16 | 511 | }; |
| ykuroda | 0:13a5d365ba16 | 512 | |
| ykuroda | 0:13a5d365ba16 | 513 | template<typename T> struct smart_copy_helper<T,false> { |
| ykuroda | 0:13a5d365ba16 | 514 | static inline void run(const T* start, const T* end, T* target) |
| ykuroda | 0:13a5d365ba16 | 515 | { std::copy(start, end, target); } |
| ykuroda | 0:13a5d365ba16 | 516 | }; |
| ykuroda | 0:13a5d365ba16 | 517 | |
| ykuroda | 0:13a5d365ba16 | 518 | |
| ykuroda | 0:13a5d365ba16 | 519 | /***************************************************************************** |
| ykuroda | 0:13a5d365ba16 | 520 | *** Implementation of runtime stack allocation (falling back to malloc) *** |
| ykuroda | 0:13a5d365ba16 | 521 | *****************************************************************************/ |
| ykuroda | 0:13a5d365ba16 | 522 | |
| ykuroda | 0:13a5d365ba16 | 523 | // you can overwrite Eigen's default behavior regarding alloca by defining EIGEN_ALLOCA |
| ykuroda | 0:13a5d365ba16 | 524 | // to the appropriate stack allocation function |
| ykuroda | 0:13a5d365ba16 | 525 | #ifndef EIGEN_ALLOCA |
| ykuroda | 0:13a5d365ba16 | 526 | #if (defined __linux__) || (defined __APPLE__) || (defined alloca) |
| ykuroda | 0:13a5d365ba16 | 527 | #define EIGEN_ALLOCA alloca |
| ykuroda | 0:13a5d365ba16 | 528 | #elif defined(_MSC_VER) |
| ykuroda | 0:13a5d365ba16 | 529 | #define EIGEN_ALLOCA _alloca |
| ykuroda | 0:13a5d365ba16 | 530 | #endif |
| ykuroda | 0:13a5d365ba16 | 531 | #endif |
| ykuroda | 0:13a5d365ba16 | 532 | |
| ykuroda | 0:13a5d365ba16 | 533 | // This helper class construct the allocated memory, and takes care of destructing and freeing the handled data |
| ykuroda | 0:13a5d365ba16 | 534 | // at destruction time. In practice this helper class is mainly useful to avoid memory leak in case of exceptions. |
| ykuroda | 0:13a5d365ba16 | 535 | template<typename T> class aligned_stack_memory_handler |
| ykuroda | 0:13a5d365ba16 | 536 | { |
| ykuroda | 0:13a5d365ba16 | 537 | public: |
| ykuroda | 0:13a5d365ba16 | 538 | /* Creates a stack_memory_handler responsible for the buffer \a ptr of size \a size. |
| ykuroda | 0:13a5d365ba16 | 539 | * Note that \a ptr can be 0 regardless of the other parameters. |
| ykuroda | 0:13a5d365ba16 | 540 | * This constructor takes care of constructing/initializing the elements of the buffer if required by the scalar type T (see NumTraits<T>::RequireInitialization). |
| ykuroda | 0:13a5d365ba16 | 541 | * In this case, the buffer elements will also be destructed when this handler will be destructed. |
| ykuroda | 0:13a5d365ba16 | 542 | * Finally, if \a dealloc is true, then the pointer \a ptr is freed. |
| ykuroda | 0:13a5d365ba16 | 543 | **/ |
| ykuroda | 0:13a5d365ba16 | 544 | aligned_stack_memory_handler(T* ptr, size_t size, bool dealloc) |
| ykuroda | 0:13a5d365ba16 | 545 | : m_ptr(ptr), m_size(size), m_deallocate(dealloc) |
| ykuroda | 0:13a5d365ba16 | 546 | { |
| ykuroda | 0:13a5d365ba16 | 547 | if(NumTraits<T>::RequireInitialization && m_ptr) |
| ykuroda | 0:13a5d365ba16 | 548 | Eigen::internal::construct_elements_of_array(m_ptr, size); |
| ykuroda | 0:13a5d365ba16 | 549 | } |
| ykuroda | 0:13a5d365ba16 | 550 | ~aligned_stack_memory_handler() |
| ykuroda | 0:13a5d365ba16 | 551 | { |
| ykuroda | 0:13a5d365ba16 | 552 | if(NumTraits<T>::RequireInitialization && m_ptr) |
| ykuroda | 0:13a5d365ba16 | 553 | Eigen::internal::destruct_elements_of_array<T>(m_ptr, m_size); |
| ykuroda | 0:13a5d365ba16 | 554 | if(m_deallocate) |
| ykuroda | 0:13a5d365ba16 | 555 | Eigen::internal::aligned_free(m_ptr); |
| ykuroda | 0:13a5d365ba16 | 556 | } |
| ykuroda | 0:13a5d365ba16 | 557 | protected: |
| ykuroda | 0:13a5d365ba16 | 558 | T* m_ptr; |
| ykuroda | 0:13a5d365ba16 | 559 | size_t m_size; |
| ykuroda | 0:13a5d365ba16 | 560 | bool m_deallocate; |
| ykuroda | 0:13a5d365ba16 | 561 | }; |
| ykuroda | 0:13a5d365ba16 | 562 | |
| ykuroda | 0:13a5d365ba16 | 563 | } // end namespace internal |
| ykuroda | 0:13a5d365ba16 | 564 | |
| ykuroda | 0:13a5d365ba16 | 565 | /** \internal |
| ykuroda | 0:13a5d365ba16 | 566 | * Declares, allocates and construct an aligned buffer named NAME of SIZE elements of type TYPE on the stack |
| ykuroda | 0:13a5d365ba16 | 567 | * if SIZE is smaller than EIGEN_STACK_ALLOCATION_LIMIT, and if stack allocation is supported by the platform |
| ykuroda | 0:13a5d365ba16 | 568 | * (currently, this is Linux and Visual Studio only). Otherwise the memory is allocated on the heap. |
| ykuroda | 0:13a5d365ba16 | 569 | * The allocated buffer is automatically deleted when exiting the scope of this declaration. |
| ykuroda | 0:13a5d365ba16 | 570 | * If BUFFER is non null, then the declared variable is simply an alias for BUFFER, and no allocation/deletion occurs. |
| ykuroda | 0:13a5d365ba16 | 571 | * Here is an example: |
| ykuroda | 0:13a5d365ba16 | 572 | * \code |
| ykuroda | 0:13a5d365ba16 | 573 | * { |
| ykuroda | 0:13a5d365ba16 | 574 | * ei_declare_aligned_stack_constructed_variable(float,data,size,0); |
| ykuroda | 0:13a5d365ba16 | 575 | * // use data[0] to data[size-1] |
| ykuroda | 0:13a5d365ba16 | 576 | * } |
| ykuroda | 0:13a5d365ba16 | 577 | * \endcode |
| ykuroda | 0:13a5d365ba16 | 578 | * The underlying stack allocation function can controlled with the EIGEN_ALLOCA preprocessor token. |
| ykuroda | 0:13a5d365ba16 | 579 | */ |
| ykuroda | 0:13a5d365ba16 | 580 | #ifdef EIGEN_ALLOCA |
| ykuroda | 0:13a5d365ba16 | 581 | |
| ykuroda | 0:13a5d365ba16 | 582 | #if defined(__arm__) || defined(_WIN32) |
| ykuroda | 0:13a5d365ba16 | 583 | #define EIGEN_ALIGNED_ALLOCA(SIZE) reinterpret_cast<void*>((reinterpret_cast<size_t>(EIGEN_ALLOCA(SIZE+16)) & ~(size_t(15))) + 16) |
| ykuroda | 0:13a5d365ba16 | 584 | #else |
| ykuroda | 0:13a5d365ba16 | 585 | #define EIGEN_ALIGNED_ALLOCA EIGEN_ALLOCA |
| ykuroda | 0:13a5d365ba16 | 586 | #endif |
| ykuroda | 0:13a5d365ba16 | 587 | |
| ykuroda | 0:13a5d365ba16 | 588 | #define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \ |
| ykuroda | 0:13a5d365ba16 | 589 | Eigen::internal::check_size_for_overflow<TYPE>(SIZE); \ |
| ykuroda | 0:13a5d365ba16 | 590 | TYPE* NAME = (BUFFER)!=0 ? (BUFFER) \ |
| ykuroda | 0:13a5d365ba16 | 591 | : reinterpret_cast<TYPE*>( \ |
| ykuroda | 0:13a5d365ba16 | 592 | (sizeof(TYPE)*SIZE<=EIGEN_STACK_ALLOCATION_LIMIT) ? EIGEN_ALIGNED_ALLOCA(sizeof(TYPE)*SIZE) \ |
| ykuroda | 0:13a5d365ba16 | 593 | : Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE) ); \ |
| ykuroda | 0:13a5d365ba16 | 594 | Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,sizeof(TYPE)*SIZE>EIGEN_STACK_ALLOCATION_LIMIT) |
| ykuroda | 0:13a5d365ba16 | 595 | |
| ykuroda | 0:13a5d365ba16 | 596 | #else |
| ykuroda | 0:13a5d365ba16 | 597 | |
| ykuroda | 0:13a5d365ba16 | 598 | #define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \ |
| ykuroda | 0:13a5d365ba16 | 599 | Eigen::internal::check_size_for_overflow<TYPE>(SIZE); \ |
| ykuroda | 0:13a5d365ba16 | 600 | TYPE* NAME = (BUFFER)!=0 ? BUFFER : reinterpret_cast<TYPE*>(Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE)); \ |
| ykuroda | 0:13a5d365ba16 | 601 | Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,true) |
| ykuroda | 0:13a5d365ba16 | 602 | |
| ykuroda | 0:13a5d365ba16 | 603 | #endif |
| ykuroda | 0:13a5d365ba16 | 604 | |
| ykuroda | 0:13a5d365ba16 | 605 | |
| ykuroda | 0:13a5d365ba16 | 606 | /***************************************************************************** |
| ykuroda | 0:13a5d365ba16 | 607 | *** Implementation of EIGEN_MAKE_ALIGNED_OPERATOR_NEW [_IF] *** |
| ykuroda | 0:13a5d365ba16 | 608 | *****************************************************************************/ |
| ykuroda | 0:13a5d365ba16 | 609 | |
| ykuroda | 0:13a5d365ba16 | 610 | #if EIGEN_ALIGN |
| ykuroda | 0:13a5d365ba16 | 611 | #ifdef EIGEN_EXCEPTIONS |
| ykuroda | 0:13a5d365ba16 | 612 | #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \ |
| ykuroda | 0:13a5d365ba16 | 613 | void* operator new(size_t size, const std::nothrow_t&) throw() { \ |
| ykuroda | 0:13a5d365ba16 | 614 | try { return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); } \ |
| ykuroda | 0:13a5d365ba16 | 615 | catch (...) { return 0; } \ |
| ykuroda | 0:13a5d365ba16 | 616 | } |
| ykuroda | 0:13a5d365ba16 | 617 | #else |
| ykuroda | 0:13a5d365ba16 | 618 | #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \ |
| ykuroda | 0:13a5d365ba16 | 619 | void* operator new(size_t size, const std::nothrow_t&) throw() { \ |
| ykuroda | 0:13a5d365ba16 | 620 | return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \ |
| ykuroda | 0:13a5d365ba16 | 621 | } |
| ykuroda | 0:13a5d365ba16 | 622 | #endif |
| ykuroda | 0:13a5d365ba16 | 623 | |
| ykuroda | 0:13a5d365ba16 | 624 | #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) \ |
| ykuroda | 0:13a5d365ba16 | 625 | void *operator new(size_t size) { \ |
| ykuroda | 0:13a5d365ba16 | 626 | return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \ |
| ykuroda | 0:13a5d365ba16 | 627 | } \ |
| ykuroda | 0:13a5d365ba16 | 628 | void *operator new[](size_t size) { \ |
| ykuroda | 0:13a5d365ba16 | 629 | return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \ |
| ykuroda | 0:13a5d365ba16 | 630 | } \ |
| ykuroda | 0:13a5d365ba16 | 631 | void operator delete(void * ptr) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \ |
| ykuroda | 0:13a5d365ba16 | 632 | void operator delete[](void * ptr) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \ |
| ykuroda | 0:13a5d365ba16 | 633 | void operator delete(void * ptr, std::size_t /* sz */) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \ |
| ykuroda | 0:13a5d365ba16 | 634 | void operator delete[](void * ptr, std::size_t /* sz */) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \ |
| ykuroda | 0:13a5d365ba16 | 635 | /* in-place new and delete. since (at least afaik) there is no actual */ \ |
| ykuroda | 0:13a5d365ba16 | 636 | /* memory allocated we can safely let the default implementation handle */ \ |
| ykuroda | 0:13a5d365ba16 | 637 | /* this particular case. */ \ |
| ykuroda | 0:13a5d365ba16 | 638 | static void *operator new(size_t size, void *ptr) { return ::operator new(size,ptr); } \ |
| ykuroda | 0:13a5d365ba16 | 639 | static void *operator new[](size_t size, void* ptr) { return ::operator new[](size,ptr); } \ |
| ykuroda | 0:13a5d365ba16 | 640 | void operator delete(void * memory, void *ptr) throw() { return ::operator delete(memory,ptr); } \ |
| ykuroda | 0:13a5d365ba16 | 641 | void operator delete[](void * memory, void *ptr) throw() { return ::operator delete[](memory,ptr); } \ |
| ykuroda | 0:13a5d365ba16 | 642 | /* nothrow-new (returns zero instead of std::bad_alloc) */ \ |
| ykuroda | 0:13a5d365ba16 | 643 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \ |
| ykuroda | 0:13a5d365ba16 | 644 | void operator delete(void *ptr, const std::nothrow_t&) throw() { \ |
| ykuroda | 0:13a5d365ba16 | 645 | Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); \ |
| ykuroda | 0:13a5d365ba16 | 646 | } \ |
| ykuroda | 0:13a5d365ba16 | 647 | typedef void eigen_aligned_operator_new_marker_type; |
| ykuroda | 0:13a5d365ba16 | 648 | #else |
| ykuroda | 0:13a5d365ba16 | 649 | #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) |
| ykuroda | 0:13a5d365ba16 | 650 | #endif |
| ykuroda | 0:13a5d365ba16 | 651 | |
| ykuroda | 0:13a5d365ba16 | 652 | #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(true) |
| ykuroda | 0:13a5d365ba16 | 653 | #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar,Size) \ |
| ykuroda | 0:13a5d365ba16 | 654 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(bool(((Size)!=Eigen::Dynamic) && ((sizeof(Scalar)*(Size))%16==0))) |
| ykuroda | 0:13a5d365ba16 | 655 | |
| ykuroda | 0:13a5d365ba16 | 656 | /****************************************************************************/ |
| ykuroda | 0:13a5d365ba16 | 657 | |
| ykuroda | 0:13a5d365ba16 | 658 | /** \class aligned_allocator |
| ykuroda | 0:13a5d365ba16 | 659 | * \ingroup Core_Module |
| ykuroda | 0:13a5d365ba16 | 660 | * |
| ykuroda | 0:13a5d365ba16 | 661 | * \brief STL compatible allocator to use with with 16 byte aligned types |
| ykuroda | 0:13a5d365ba16 | 662 | * |
| ykuroda | 0:13a5d365ba16 | 663 | * Example: |
| ykuroda | 0:13a5d365ba16 | 664 | * \code |
| ykuroda | 0:13a5d365ba16 | 665 | * // Matrix4f requires 16 bytes alignment: |
| ykuroda | 0:13a5d365ba16 | 666 | * std::map< int, Matrix4f, std::less<int>, |
| ykuroda | 0:13a5d365ba16 | 667 | * aligned_allocator<std::pair<const int, Matrix4f> > > my_map_mat4; |
| ykuroda | 0:13a5d365ba16 | 668 | * // Vector3f does not require 16 bytes alignment, no need to use Eigen's allocator: |
| ykuroda | 0:13a5d365ba16 | 669 | * std::map< int, Vector3f > my_map_vec3; |
| ykuroda | 0:13a5d365ba16 | 670 | * \endcode |
| ykuroda | 0:13a5d365ba16 | 671 | * |
| ykuroda | 0:13a5d365ba16 | 672 | * \sa \ref TopicStlContainers. |
| ykuroda | 0:13a5d365ba16 | 673 | */ |
| ykuroda | 0:13a5d365ba16 | 674 | template<class T> |
| ykuroda | 0:13a5d365ba16 | 675 | class aligned_allocator |
| ykuroda | 0:13a5d365ba16 | 676 | { |
| ykuroda | 0:13a5d365ba16 | 677 | public: |
| ykuroda | 0:13a5d365ba16 | 678 | typedef size_t size_type; |
| ykuroda | 0:13a5d365ba16 | 679 | typedef std::ptrdiff_t difference_type; |
| ykuroda | 0:13a5d365ba16 | 680 | typedef T* pointer; |
| ykuroda | 0:13a5d365ba16 | 681 | typedef const T* const_pointer; |
| ykuroda | 0:13a5d365ba16 | 682 | typedef T& reference; |
| ykuroda | 0:13a5d365ba16 | 683 | typedef const T& const_reference; |
| ykuroda | 0:13a5d365ba16 | 684 | typedef T value_type; |
| ykuroda | 0:13a5d365ba16 | 685 | |
| ykuroda | 0:13a5d365ba16 | 686 | template<class U> |
| ykuroda | 0:13a5d365ba16 | 687 | struct rebind |
| ykuroda | 0:13a5d365ba16 | 688 | { |
| ykuroda | 0:13a5d365ba16 | 689 | typedef aligned_allocator<U> other; |
| ykuroda | 0:13a5d365ba16 | 690 | }; |
| ykuroda | 0:13a5d365ba16 | 691 | |
| ykuroda | 0:13a5d365ba16 | 692 | pointer address( reference value ) const |
| ykuroda | 0:13a5d365ba16 | 693 | { |
| ykuroda | 0:13a5d365ba16 | 694 | return &value; |
| ykuroda | 0:13a5d365ba16 | 695 | } |
| ykuroda | 0:13a5d365ba16 | 696 | |
| ykuroda | 0:13a5d365ba16 | 697 | const_pointer address( const_reference value ) const |
| ykuroda | 0:13a5d365ba16 | 698 | { |
| ykuroda | 0:13a5d365ba16 | 699 | return &value; |
| ykuroda | 0:13a5d365ba16 | 700 | } |
| ykuroda | 0:13a5d365ba16 | 701 | |
| ykuroda | 0:13a5d365ba16 | 702 | aligned_allocator() |
| ykuroda | 0:13a5d365ba16 | 703 | { |
| ykuroda | 0:13a5d365ba16 | 704 | } |
| ykuroda | 0:13a5d365ba16 | 705 | |
| ykuroda | 0:13a5d365ba16 | 706 | aligned_allocator( const aligned_allocator& ) |
| ykuroda | 0:13a5d365ba16 | 707 | { |
| ykuroda | 0:13a5d365ba16 | 708 | } |
| ykuroda | 0:13a5d365ba16 | 709 | |
| ykuroda | 0:13a5d365ba16 | 710 | template<class U> |
| ykuroda | 0:13a5d365ba16 | 711 | aligned_allocator( const aligned_allocator<U>& ) |
| ykuroda | 0:13a5d365ba16 | 712 | { |
| ykuroda | 0:13a5d365ba16 | 713 | } |
| ykuroda | 0:13a5d365ba16 | 714 | |
| ykuroda | 0:13a5d365ba16 | 715 | ~aligned_allocator() |
| ykuroda | 0:13a5d365ba16 | 716 | { |
| ykuroda | 0:13a5d365ba16 | 717 | } |
| ykuroda | 0:13a5d365ba16 | 718 | |
| ykuroda | 0:13a5d365ba16 | 719 | size_type max_size() const |
| ykuroda | 0:13a5d365ba16 | 720 | { |
| ykuroda | 0:13a5d365ba16 | 721 | return (std::numeric_limits<size_type>::max)(); |
| ykuroda | 0:13a5d365ba16 | 722 | } |
| ykuroda | 0:13a5d365ba16 | 723 | |
| ykuroda | 0:13a5d365ba16 | 724 | pointer allocate( size_type num, const void* hint = 0 ) |
| ykuroda | 0:13a5d365ba16 | 725 | { |
| ykuroda | 0:13a5d365ba16 | 726 | EIGEN_UNUSED_VARIABLE(hint); |
| ykuroda | 0:13a5d365ba16 | 727 | internal::check_size_for_overflow<T>(num); |
| ykuroda | 0:13a5d365ba16 | 728 | return static_cast<pointer>( internal::aligned_malloc( num * sizeof(T) ) ); |
| ykuroda | 0:13a5d365ba16 | 729 | } |
| ykuroda | 0:13a5d365ba16 | 730 | |
| ykuroda | 0:13a5d365ba16 | 731 | void construct( pointer p, const T& value ) |
| ykuroda | 0:13a5d365ba16 | 732 | { |
| ykuroda | 0:13a5d365ba16 | 733 | ::new( p ) T( value ); |
| ykuroda | 0:13a5d365ba16 | 734 | } |
| ykuroda | 0:13a5d365ba16 | 735 | |
| ykuroda | 0:13a5d365ba16 | 736 | void destroy( pointer p ) |
| ykuroda | 0:13a5d365ba16 | 737 | { |
| ykuroda | 0:13a5d365ba16 | 738 | p->~T(); |
| ykuroda | 0:13a5d365ba16 | 739 | } |
| ykuroda | 0:13a5d365ba16 | 740 | |
| ykuroda | 0:13a5d365ba16 | 741 | void deallocate( pointer p, size_type /*num*/ ) |
| ykuroda | 0:13a5d365ba16 | 742 | { |
| ykuroda | 0:13a5d365ba16 | 743 | internal::aligned_free( p ); |
| ykuroda | 0:13a5d365ba16 | 744 | } |
| ykuroda | 0:13a5d365ba16 | 745 | |
| ykuroda | 0:13a5d365ba16 | 746 | bool operator!=(const aligned_allocator<T>& ) const |
| ykuroda | 0:13a5d365ba16 | 747 | { return false; } |
| ykuroda | 0:13a5d365ba16 | 748 | |
| ykuroda | 0:13a5d365ba16 | 749 | bool operator==(const aligned_allocator<T>& ) const |
| ykuroda | 0:13a5d365ba16 | 750 | { return true; } |
| ykuroda | 0:13a5d365ba16 | 751 | }; |
| ykuroda | 0:13a5d365ba16 | 752 | |
| ykuroda | 0:13a5d365ba16 | 753 | //---------- Cache sizes ---------- |
| ykuroda | 0:13a5d365ba16 | 754 | |
| ykuroda | 0:13a5d365ba16 | 755 | #if !defined(EIGEN_NO_CPUID) |
| ykuroda | 0:13a5d365ba16 | 756 | # if defined(__GNUC__) && ( defined(__i386__) || defined(__x86_64__) ) |
| ykuroda | 0:13a5d365ba16 | 757 | # if defined(__PIC__) && defined(__i386__) |
| ykuroda | 0:13a5d365ba16 | 758 | // Case for x86 with PIC |
| ykuroda | 0:13a5d365ba16 | 759 | # define EIGEN_CPUID(abcd,func,id) \ |
| ykuroda | 0:13a5d365ba16 | 760 | __asm__ __volatile__ ("xchgl %%ebx, %k1;cpuid; xchgl %%ebx,%k1": "=a" (abcd[0]), "=&r" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "a" (func), "c" (id)); |
| ykuroda | 0:13a5d365ba16 | 761 | # elif defined(__PIC__) && defined(__x86_64__) |
| ykuroda | 0:13a5d365ba16 | 762 | // Case for x64 with PIC. In theory this is only a problem with recent gcc and with medium or large code model, not with the default small code model. |
| ykuroda | 0:13a5d365ba16 | 763 | // However, we cannot detect which code model is used, and the xchg overhead is negligible anyway. |
| ykuroda | 0:13a5d365ba16 | 764 | # define EIGEN_CPUID(abcd,func,id) \ |
| ykuroda | 0:13a5d365ba16 | 765 | __asm__ __volatile__ ("xchg{q}\t{%%}rbx, %q1; cpuid; xchg{q}\t{%%}rbx, %q1": "=a" (abcd[0]), "=&r" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "0" (func), "2" (id)); |
| ykuroda | 0:13a5d365ba16 | 766 | # else |
| ykuroda | 0:13a5d365ba16 | 767 | // Case for x86_64 or x86 w/o PIC |
| ykuroda | 0:13a5d365ba16 | 768 | # define EIGEN_CPUID(abcd,func,id) \ |
| ykuroda | 0:13a5d365ba16 | 769 | __asm__ __volatile__ ("cpuid": "=a" (abcd[0]), "=b" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "0" (func), "2" (id) ); |
| ykuroda | 0:13a5d365ba16 | 770 | # endif |
| ykuroda | 0:13a5d365ba16 | 771 | # elif defined(_MSC_VER) |
| ykuroda | 0:13a5d365ba16 | 772 | # if (_MSC_VER > 1500) && ( defined(_M_IX86) || defined(_M_X64) ) |
| ykuroda | 0:13a5d365ba16 | 773 | # define EIGEN_CPUID(abcd,func,id) __cpuidex((int*)abcd,func,id) |
| ykuroda | 0:13a5d365ba16 | 774 | # endif |
| ykuroda | 0:13a5d365ba16 | 775 | # endif |
| ykuroda | 0:13a5d365ba16 | 776 | #endif |
| ykuroda | 0:13a5d365ba16 | 777 | |
| ykuroda | 0:13a5d365ba16 | 778 | namespace internal { |
| ykuroda | 0:13a5d365ba16 | 779 | |
| ykuroda | 0:13a5d365ba16 | 780 | #ifdef EIGEN_CPUID |
| ykuroda | 0:13a5d365ba16 | 781 | |
| ykuroda | 0:13a5d365ba16 | 782 | inline bool cpuid_is_vendor(int abcd[4], const int vendor[3]) |
| ykuroda | 0:13a5d365ba16 | 783 | { |
| ykuroda | 0:13a5d365ba16 | 784 | return abcd[1]==vendor[0] && abcd[3]==vendor[1] && abcd[2]==vendor[2]; |
| ykuroda | 0:13a5d365ba16 | 785 | } |
| ykuroda | 0:13a5d365ba16 | 786 | |
| ykuroda | 0:13a5d365ba16 | 787 | inline void queryCacheSizes_intel_direct(int& l1, int& l2, int& l3) |
| ykuroda | 0:13a5d365ba16 | 788 | { |
| ykuroda | 0:13a5d365ba16 | 789 | int abcd[4]; |
| ykuroda | 0:13a5d365ba16 | 790 | l1 = l2 = l3 = 0; |
| ykuroda | 0:13a5d365ba16 | 791 | int cache_id = 0; |
| ykuroda | 0:13a5d365ba16 | 792 | int cache_type = 0; |
| ykuroda | 0:13a5d365ba16 | 793 | do { |
| ykuroda | 0:13a5d365ba16 | 794 | abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0; |
| ykuroda | 0:13a5d365ba16 | 795 | EIGEN_CPUID(abcd,0x4,cache_id); |
| ykuroda | 0:13a5d365ba16 | 796 | cache_type = (abcd[0] & 0x0F) >> 0; |
| ykuroda | 0:13a5d365ba16 | 797 | if(cache_type==1||cache_type==3) // data or unified cache |
| ykuroda | 0:13a5d365ba16 | 798 | { |
| ykuroda | 0:13a5d365ba16 | 799 | int cache_level = (abcd[0] & 0xE0) >> 5; // A[7:5] |
| ykuroda | 0:13a5d365ba16 | 800 | int ways = (abcd[1] & 0xFFC00000) >> 22; // B[31:22] |
| ykuroda | 0:13a5d365ba16 | 801 | int partitions = (abcd[1] & 0x003FF000) >> 12; // B[21:12] |
| ykuroda | 0:13a5d365ba16 | 802 | int line_size = (abcd[1] & 0x00000FFF) >> 0; // B[11:0] |
| ykuroda | 0:13a5d365ba16 | 803 | int sets = (abcd[2]); // C[31:0] |
| ykuroda | 0:13a5d365ba16 | 804 | |
| ykuroda | 0:13a5d365ba16 | 805 | int cache_size = (ways+1) * (partitions+1) * (line_size+1) * (sets+1); |
| ykuroda | 0:13a5d365ba16 | 806 | |
| ykuroda | 0:13a5d365ba16 | 807 | switch(cache_level) |
| ykuroda | 0:13a5d365ba16 | 808 | { |
| ykuroda | 0:13a5d365ba16 | 809 | case 1: l1 = cache_size; break; |
| ykuroda | 0:13a5d365ba16 | 810 | case 2: l2 = cache_size; break; |
| ykuroda | 0:13a5d365ba16 | 811 | case 3: l3 = cache_size; break; |
| ykuroda | 0:13a5d365ba16 | 812 | default: break; |
| ykuroda | 0:13a5d365ba16 | 813 | } |
| ykuroda | 0:13a5d365ba16 | 814 | } |
| ykuroda | 0:13a5d365ba16 | 815 | cache_id++; |
| ykuroda | 0:13a5d365ba16 | 816 | } while(cache_type>0 && cache_id<16); |
| ykuroda | 0:13a5d365ba16 | 817 | } |
| ykuroda | 0:13a5d365ba16 | 818 | |
| ykuroda | 0:13a5d365ba16 | 819 | inline void queryCacheSizes_intel_codes(int& l1, int& l2, int& l3) |
| ykuroda | 0:13a5d365ba16 | 820 | { |
| ykuroda | 0:13a5d365ba16 | 821 | int abcd[4]; |
| ykuroda | 0:13a5d365ba16 | 822 | abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0; |
| ykuroda | 0:13a5d365ba16 | 823 | l1 = l2 = l3 = 0; |
| ykuroda | 0:13a5d365ba16 | 824 | EIGEN_CPUID(abcd,0x00000002,0); |
| ykuroda | 0:13a5d365ba16 | 825 | unsigned char * bytes = reinterpret_cast<unsigned char *>(abcd)+2; |
| ykuroda | 0:13a5d365ba16 | 826 | bool check_for_p2_core2 = false; |
| ykuroda | 0:13a5d365ba16 | 827 | for(int i=0; i<14; ++i) |
| ykuroda | 0:13a5d365ba16 | 828 | { |
| ykuroda | 0:13a5d365ba16 | 829 | switch(bytes[i]) |
| ykuroda | 0:13a5d365ba16 | 830 | { |
| ykuroda | 0:13a5d365ba16 | 831 | case 0x0A: l1 = 8; break; // 0Ah data L1 cache, 8 KB, 2 ways, 32 byte lines |
| ykuroda | 0:13a5d365ba16 | 832 | case 0x0C: l1 = 16; break; // 0Ch data L1 cache, 16 KB, 4 ways, 32 byte lines |
| ykuroda | 0:13a5d365ba16 | 833 | case 0x0E: l1 = 24; break; // 0Eh data L1 cache, 24 KB, 6 ways, 64 byte lines |
| ykuroda | 0:13a5d365ba16 | 834 | case 0x10: l1 = 16; break; // 10h data L1 cache, 16 KB, 4 ways, 32 byte lines (IA-64) |
| ykuroda | 0:13a5d365ba16 | 835 | case 0x15: l1 = 16; break; // 15h code L1 cache, 16 KB, 4 ways, 32 byte lines (IA-64) |
| ykuroda | 0:13a5d365ba16 | 836 | case 0x2C: l1 = 32; break; // 2Ch data L1 cache, 32 KB, 8 ways, 64 byte lines |
| ykuroda | 0:13a5d365ba16 | 837 | case 0x30: l1 = 32; break; // 30h code L1 cache, 32 KB, 8 ways, 64 byte lines |
| ykuroda | 0:13a5d365ba16 | 838 | case 0x60: l1 = 16; break; // 60h data L1 cache, 16 KB, 8 ways, 64 byte lines, sectored |
| ykuroda | 0:13a5d365ba16 | 839 | case 0x66: l1 = 8; break; // 66h data L1 cache, 8 KB, 4 ways, 64 byte lines, sectored |
| ykuroda | 0:13a5d365ba16 | 840 | case 0x67: l1 = 16; break; // 67h data L1 cache, 16 KB, 4 ways, 64 byte lines, sectored |
| ykuroda | 0:13a5d365ba16 | 841 | case 0x68: l1 = 32; break; // 68h data L1 cache, 32 KB, 4 ways, 64 byte lines, sectored |
| ykuroda | 0:13a5d365ba16 | 842 | case 0x1A: l2 = 96; break; // code and data L2 cache, 96 KB, 6 ways, 64 byte lines (IA-64) |
| ykuroda | 0:13a5d365ba16 | 843 | case 0x22: l3 = 512; break; // code and data L3 cache, 512 KB, 4 ways (!), 64 byte lines, dual-sectored |
| ykuroda | 0:13a5d365ba16 | 844 | case 0x23: l3 = 1024; break; // code and data L3 cache, 1024 KB, 8 ways, 64 byte lines, dual-sectored |
| ykuroda | 0:13a5d365ba16 | 845 | case 0x25: l3 = 2048; break; // code and data L3 cache, 2048 KB, 8 ways, 64 byte lines, dual-sectored |
| ykuroda | 0:13a5d365ba16 | 846 | case 0x29: l3 = 4096; break; // code and data L3 cache, 4096 KB, 8 ways, 64 byte lines, dual-sectored |
| ykuroda | 0:13a5d365ba16 | 847 | case 0x39: l2 = 128; break; // code and data L2 cache, 128 KB, 4 ways, 64 byte lines, sectored |
| ykuroda | 0:13a5d365ba16 | 848 | case 0x3A: l2 = 192; break; // code and data L2 cache, 192 KB, 6 ways, 64 byte lines, sectored |
| ykuroda | 0:13a5d365ba16 | 849 | case 0x3B: l2 = 128; break; // code and data L2 cache, 128 KB, 2 ways, 64 byte lines, sectored |
| ykuroda | 0:13a5d365ba16 | 850 | case 0x3C: l2 = 256; break; // code and data L2 cache, 256 KB, 4 ways, 64 byte lines, sectored |
| ykuroda | 0:13a5d365ba16 | 851 | case 0x3D: l2 = 384; break; // code and data L2 cache, 384 KB, 6 ways, 64 byte lines, sectored |
| ykuroda | 0:13a5d365ba16 | 852 | case 0x3E: l2 = 512; break; // code and data L2 cache, 512 KB, 4 ways, 64 byte lines, sectored |
| ykuroda | 0:13a5d365ba16 | 853 | case 0x40: l2 = 0; break; // no integrated L2 cache (P6 core) or L3 cache (P4 core) |
| ykuroda | 0:13a5d365ba16 | 854 | case 0x41: l2 = 128; break; // code and data L2 cache, 128 KB, 4 ways, 32 byte lines |
| ykuroda | 0:13a5d365ba16 | 855 | case 0x42: l2 = 256; break; // code and data L2 cache, 256 KB, 4 ways, 32 byte lines |
| ykuroda | 0:13a5d365ba16 | 856 | case 0x43: l2 = 512; break; // code and data L2 cache, 512 KB, 4 ways, 32 byte lines |
| ykuroda | 0:13a5d365ba16 | 857 | case 0x44: l2 = 1024; break; // code and data L2 cache, 1024 KB, 4 ways, 32 byte lines |
| ykuroda | 0:13a5d365ba16 | 858 | case 0x45: l2 = 2048; break; // code and data L2 cache, 2048 KB, 4 ways, 32 byte lines |
| ykuroda | 0:13a5d365ba16 | 859 | case 0x46: l3 = 4096; break; // code and data L3 cache, 4096 KB, 4 ways, 64 byte lines |
| ykuroda | 0:13a5d365ba16 | 860 | case 0x47: l3 = 8192; break; // code and data L3 cache, 8192 KB, 8 ways, 64 byte lines |
| ykuroda | 0:13a5d365ba16 | 861 | case 0x48: l2 = 3072; break; // code and data L2 cache, 3072 KB, 12 ways, 64 byte lines |
| ykuroda | 0:13a5d365ba16 | 862 | case 0x49: if(l2!=0) l3 = 4096; else {check_for_p2_core2=true; l3 = l2 = 4096;} break;// code and data L3 cache, 4096 KB, 16 ways, 64 byte lines (P4) or L2 for core2 |
| ykuroda | 0:13a5d365ba16 | 863 | case 0x4A: l3 = 6144; break; // code and data L3 cache, 6144 KB, 12 ways, 64 byte lines |
| ykuroda | 0:13a5d365ba16 | 864 | case 0x4B: l3 = 8192; break; // code and data L3 cache, 8192 KB, 16 ways, 64 byte lines |
| ykuroda | 0:13a5d365ba16 | 865 | case 0x4C: l3 = 12288; break; // code and data L3 cache, 12288 KB, 12 ways, 64 byte lines |
| ykuroda | 0:13a5d365ba16 | 866 | case 0x4D: l3 = 16384; break; // code and data L3 cache, 16384 KB, 16 ways, 64 byte lines |
| ykuroda | 0:13a5d365ba16 | 867 | case 0x4E: l2 = 6144; break; // code and data L2 cache, 6144 KB, 24 ways, 64 byte lines |
| ykuroda | 0:13a5d365ba16 | 868 | case 0x78: l2 = 1024; break; // code and data L2 cache, 1024 KB, 4 ways, 64 byte lines |
| ykuroda | 0:13a5d365ba16 | 869 | case 0x79: l2 = 128; break; // code and data L2 cache, 128 KB, 8 ways, 64 byte lines, dual-sectored |
| ykuroda | 0:13a5d365ba16 | 870 | case 0x7A: l2 = 256; break; // code and data L2 cache, 256 KB, 8 ways, 64 byte lines, dual-sectored |
| ykuroda | 0:13a5d365ba16 | 871 | case 0x7B: l2 = 512; break; // code and data L2 cache, 512 KB, 8 ways, 64 byte lines, dual-sectored |
| ykuroda | 0:13a5d365ba16 | 872 | case 0x7C: l2 = 1024; break; // code and data L2 cache, 1024 KB, 8 ways, 64 byte lines, dual-sectored |
| ykuroda | 0:13a5d365ba16 | 873 | case 0x7D: l2 = 2048; break; // code and data L2 cache, 2048 KB, 8 ways, 64 byte lines |
| ykuroda | 0:13a5d365ba16 | 874 | case 0x7E: l2 = 256; break; // code and data L2 cache, 256 KB, 8 ways, 128 byte lines, sect. (IA-64) |
| ykuroda | 0:13a5d365ba16 | 875 | case 0x7F: l2 = 512; break; // code and data L2 cache, 512 KB, 2 ways, 64 byte lines |
| ykuroda | 0:13a5d365ba16 | 876 | case 0x80: l2 = 512; break; // code and data L2 cache, 512 KB, 8 ways, 64 byte lines |
| ykuroda | 0:13a5d365ba16 | 877 | case 0x81: l2 = 128; break; // code and data L2 cache, 128 KB, 8 ways, 32 byte lines |
| ykuroda | 0:13a5d365ba16 | 878 | case 0x82: l2 = 256; break; // code and data L2 cache, 256 KB, 8 ways, 32 byte lines |
| ykuroda | 0:13a5d365ba16 | 879 | case 0x83: l2 = 512; break; // code and data L2 cache, 512 KB, 8 ways, 32 byte lines |
| ykuroda | 0:13a5d365ba16 | 880 | case 0x84: l2 = 1024; break; // code and data L2 cache, 1024 KB, 8 ways, 32 byte lines |
| ykuroda | 0:13a5d365ba16 | 881 | case 0x85: l2 = 2048; break; // code and data L2 cache, 2048 KB, 8 ways, 32 byte lines |
| ykuroda | 0:13a5d365ba16 | 882 | case 0x86: l2 = 512; break; // code and data L2 cache, 512 KB, 4 ways, 64 byte lines |
| ykuroda | 0:13a5d365ba16 | 883 | case 0x87: l2 = 1024; break; // code and data L2 cache, 1024 KB, 8 ways, 64 byte lines |
| ykuroda | 0:13a5d365ba16 | 884 | case 0x88: l3 = 2048; break; // code and data L3 cache, 2048 KB, 4 ways, 64 byte lines (IA-64) |
| ykuroda | 0:13a5d365ba16 | 885 | case 0x89: l3 = 4096; break; // code and data L3 cache, 4096 KB, 4 ways, 64 byte lines (IA-64) |
| ykuroda | 0:13a5d365ba16 | 886 | case 0x8A: l3 = 8192; break; // code and data L3 cache, 8192 KB, 4 ways, 64 byte lines (IA-64) |
| ykuroda | 0:13a5d365ba16 | 887 | case 0x8D: l3 = 3072; break; // code and data L3 cache, 3072 KB, 12 ways, 128 byte lines (IA-64) |
| ykuroda | 0:13a5d365ba16 | 888 | |
| ykuroda | 0:13a5d365ba16 | 889 | default: break; |
| ykuroda | 0:13a5d365ba16 | 890 | } |
| ykuroda | 0:13a5d365ba16 | 891 | } |
| ykuroda | 0:13a5d365ba16 | 892 | if(check_for_p2_core2 && l2 == l3) |
| ykuroda | 0:13a5d365ba16 | 893 | l3 = 0; |
| ykuroda | 0:13a5d365ba16 | 894 | l1 *= 1024; |
| ykuroda | 0:13a5d365ba16 | 895 | l2 *= 1024; |
| ykuroda | 0:13a5d365ba16 | 896 | l3 *= 1024; |
| ykuroda | 0:13a5d365ba16 | 897 | } |
| ykuroda | 0:13a5d365ba16 | 898 | |
| ykuroda | 0:13a5d365ba16 | 899 | inline void queryCacheSizes_intel(int& l1, int& l2, int& l3, int max_std_funcs) |
| ykuroda | 0:13a5d365ba16 | 900 | { |
| ykuroda | 0:13a5d365ba16 | 901 | if(max_std_funcs>=4) |
| ykuroda | 0:13a5d365ba16 | 902 | queryCacheSizes_intel_direct(l1,l2,l3); |
| ykuroda | 0:13a5d365ba16 | 903 | else |
| ykuroda | 0:13a5d365ba16 | 904 | queryCacheSizes_intel_codes(l1,l2,l3); |
| ykuroda | 0:13a5d365ba16 | 905 | } |
| ykuroda | 0:13a5d365ba16 | 906 | |
| ykuroda | 0:13a5d365ba16 | 907 | inline void queryCacheSizes_amd(int& l1, int& l2, int& l3) |
| ykuroda | 0:13a5d365ba16 | 908 | { |
| ykuroda | 0:13a5d365ba16 | 909 | int abcd[4]; |
| ykuroda | 0:13a5d365ba16 | 910 | abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0; |
| ykuroda | 0:13a5d365ba16 | 911 | EIGEN_CPUID(abcd,0x80000005,0); |
| ykuroda | 0:13a5d365ba16 | 912 | l1 = (abcd[2] >> 24) * 1024; // C[31:24] = L1 size in KB |
| ykuroda | 0:13a5d365ba16 | 913 | abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0; |
| ykuroda | 0:13a5d365ba16 | 914 | EIGEN_CPUID(abcd,0x80000006,0); |
| ykuroda | 0:13a5d365ba16 | 915 | l2 = (abcd[2] >> 16) * 1024; // C[31;16] = l2 cache size in KB |
| ykuroda | 0:13a5d365ba16 | 916 | l3 = ((abcd[3] & 0xFFFC000) >> 18) * 512 * 1024; // D[31;18] = l3 cache size in 512KB |
| ykuroda | 0:13a5d365ba16 | 917 | } |
| ykuroda | 0:13a5d365ba16 | 918 | #endif |
| ykuroda | 0:13a5d365ba16 | 919 | |
| ykuroda | 0:13a5d365ba16 | 920 | /** \internal |
| ykuroda | 0:13a5d365ba16 | 921 | * Queries and returns the cache sizes in Bytes of the L1, L2, and L3 data caches respectively */ |
| ykuroda | 0:13a5d365ba16 | 922 | inline void queryCacheSizes(int& l1, int& l2, int& l3) |
| ykuroda | 0:13a5d365ba16 | 923 | { |
| ykuroda | 0:13a5d365ba16 | 924 | #ifdef EIGEN_CPUID |
| ykuroda | 0:13a5d365ba16 | 925 | int abcd[4]; |
| ykuroda | 0:13a5d365ba16 | 926 | const int GenuineIntel[] = {0x756e6547, 0x49656e69, 0x6c65746e}; |
| ykuroda | 0:13a5d365ba16 | 927 | const int AuthenticAMD[] = {0x68747541, 0x69746e65, 0x444d4163}; |
| ykuroda | 0:13a5d365ba16 | 928 | const int AMDisbetter_[] = {0x69444d41, 0x74656273, 0x21726574}; // "AMDisbetter!" |
| ykuroda | 0:13a5d365ba16 | 929 | |
| ykuroda | 0:13a5d365ba16 | 930 | // identify the CPU vendor |
| ykuroda | 0:13a5d365ba16 | 931 | EIGEN_CPUID(abcd,0x0,0); |
| ykuroda | 0:13a5d365ba16 | 932 | int max_std_funcs = abcd[1]; |
| ykuroda | 0:13a5d365ba16 | 933 | if(cpuid_is_vendor(abcd,GenuineIntel)) |
| ykuroda | 0:13a5d365ba16 | 934 | queryCacheSizes_intel(l1,l2,l3,max_std_funcs); |
| ykuroda | 0:13a5d365ba16 | 935 | else if(cpuid_is_vendor(abcd,AuthenticAMD) || cpuid_is_vendor(abcd,AMDisbetter_)) |
| ykuroda | 0:13a5d365ba16 | 936 | queryCacheSizes_amd(l1,l2,l3); |
| ykuroda | 0:13a5d365ba16 | 937 | else |
| ykuroda | 0:13a5d365ba16 | 938 | // by default let's use Intel's API |
| ykuroda | 0:13a5d365ba16 | 939 | queryCacheSizes_intel(l1,l2,l3,max_std_funcs); |
| ykuroda | 0:13a5d365ba16 | 940 | |
| ykuroda | 0:13a5d365ba16 | 941 | // here is the list of other vendors: |
| ykuroda | 0:13a5d365ba16 | 942 | // ||cpuid_is_vendor(abcd,"VIA VIA VIA ") |
| ykuroda | 0:13a5d365ba16 | 943 | // ||cpuid_is_vendor(abcd,"CyrixInstead") |
| ykuroda | 0:13a5d365ba16 | 944 | // ||cpuid_is_vendor(abcd,"CentaurHauls") |
| ykuroda | 0:13a5d365ba16 | 945 | // ||cpuid_is_vendor(abcd,"GenuineTMx86") |
| ykuroda | 0:13a5d365ba16 | 946 | // ||cpuid_is_vendor(abcd,"TransmetaCPU") |
| ykuroda | 0:13a5d365ba16 | 947 | // ||cpuid_is_vendor(abcd,"RiseRiseRise") |
| ykuroda | 0:13a5d365ba16 | 948 | // ||cpuid_is_vendor(abcd,"Geode by NSC") |
| ykuroda | 0:13a5d365ba16 | 949 | // ||cpuid_is_vendor(abcd,"SiS SiS SiS ") |
| ykuroda | 0:13a5d365ba16 | 950 | // ||cpuid_is_vendor(abcd,"UMC UMC UMC ") |
| ykuroda | 0:13a5d365ba16 | 951 | // ||cpuid_is_vendor(abcd,"NexGenDriven") |
| ykuroda | 0:13a5d365ba16 | 952 | #else |
| ykuroda | 0:13a5d365ba16 | 953 | l1 = l2 = l3 = -1; |
| ykuroda | 0:13a5d365ba16 | 954 | #endif |
| ykuroda | 0:13a5d365ba16 | 955 | } |
| ykuroda | 0:13a5d365ba16 | 956 | |
| ykuroda | 0:13a5d365ba16 | 957 | /** \internal |
| ykuroda | 0:13a5d365ba16 | 958 | * \returns the size in Bytes of the L1 data cache */ |
| ykuroda | 0:13a5d365ba16 | 959 | inline int queryL1CacheSize() |
| ykuroda | 0:13a5d365ba16 | 960 | { |
| ykuroda | 0:13a5d365ba16 | 961 | int l1(-1), l2, l3; |
| ykuroda | 0:13a5d365ba16 | 962 | queryCacheSizes(l1,l2,l3); |
| ykuroda | 0:13a5d365ba16 | 963 | return l1; |
| ykuroda | 0:13a5d365ba16 | 964 | } |
| ykuroda | 0:13a5d365ba16 | 965 | |
| ykuroda | 0:13a5d365ba16 | 966 | /** \internal |
| ykuroda | 0:13a5d365ba16 | 967 | * \returns the size in Bytes of the L2 or L3 cache if this later is present */ |
| ykuroda | 0:13a5d365ba16 | 968 | inline int queryTopLevelCacheSize() |
| ykuroda | 0:13a5d365ba16 | 969 | { |
| ykuroda | 0:13a5d365ba16 | 970 | int l1, l2(-1), l3(-1); |
| ykuroda | 0:13a5d365ba16 | 971 | queryCacheSizes(l1,l2,l3); |
| ykuroda | 0:13a5d365ba16 | 972 | return (std::max)(l2,l3); |
| ykuroda | 0:13a5d365ba16 | 973 | } |
| ykuroda | 0:13a5d365ba16 | 974 | |
| ykuroda | 0:13a5d365ba16 | 975 | } // end namespace internal |
| ykuroda | 0:13a5d365ba16 | 976 | |
| ykuroda | 0:13a5d365ba16 | 977 | } // end namespace Eigen |
| ykuroda | 0:13a5d365ba16 | 978 | |
| ykuroda | 0:13a5d365ba16 | 979 | #endif // EIGEN_MEMORY_H |