RTC auf true
platform/mbed_alloc_wrappers.cpp@2:7aab896b1a3b, 2019-03-13 (annotated)
- Committer:
- kevman
- Date:
- Wed Mar 13 11:03:24 2019 +0000
- Revision:
- 2:7aab896b1a3b
- Parent:
- 0:38ceb79fef03
2019-03-13
Who changed what in which revision?
User | Revision | Line number | New contents of line |
---|---|---|---|
kevman | 0:38ceb79fef03 | 1 | /* mbed Microcontroller Library |
kevman | 0:38ceb79fef03 | 2 | * Copyright (c) 2006-2016 ARM Limited |
kevman | 0:38ceb79fef03 | 3 | * |
kevman | 0:38ceb79fef03 | 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
kevman | 0:38ceb79fef03 | 5 | * you may not use this file except in compliance with the License. |
kevman | 0:38ceb79fef03 | 6 | * You may obtain a copy of the License at |
kevman | 0:38ceb79fef03 | 7 | * |
kevman | 0:38ceb79fef03 | 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
kevman | 0:38ceb79fef03 | 9 | * |
kevman | 0:38ceb79fef03 | 10 | * Unless required by applicable law or agreed to in writing, software |
kevman | 0:38ceb79fef03 | 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
kevman | 0:38ceb79fef03 | 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
kevman | 0:38ceb79fef03 | 13 | * See the License for the specific language governing permissions and |
kevman | 0:38ceb79fef03 | 14 | * limitations under the License. |
kevman | 0:38ceb79fef03 | 15 | */ |
kevman | 0:38ceb79fef03 | 16 | |
kevman | 0:38ceb79fef03 | 17 | #include "platform/mbed_mem_trace.h" |
kevman | 0:38ceb79fef03 | 18 | #include "platform/mbed_stats.h" |
kevman | 0:38ceb79fef03 | 19 | #include "platform/mbed_toolchain.h" |
kevman | 0:38ceb79fef03 | 20 | #include "platform/SingletonPtr.h" |
kevman | 0:38ceb79fef03 | 21 | #include "platform/PlatformMutex.h" |
kevman | 0:38ceb79fef03 | 22 | #include <stddef.h> |
kevman | 0:38ceb79fef03 | 23 | #include <stdio.h> |
kevman | 0:38ceb79fef03 | 24 | #include <string.h> |
kevman | 0:38ceb79fef03 | 25 | #include <stdlib.h> |
kevman | 0:38ceb79fef03 | 26 | |
kevman | 0:38ceb79fef03 | 27 | /* There are two memory tracers in mbed OS: |
kevman | 0:38ceb79fef03 | 28 | |
kevman | 0:38ceb79fef03 | 29 | - the first can be used to detect the maximum heap usage at runtime. It is |
kevman | 0:38ceb79fef03 | 30 | activated by defining the MBED_HEAP_STATS_ENABLED macro. |
kevman | 0:38ceb79fef03 | 31 | - the second can be used to trace each memory call by automatically invoking |
kevman | 0:38ceb79fef03 | 32 | a callback on each memory operation (see hal/api/mbed_mem_trace.h). It is |
kevman | 0:38ceb79fef03 | 33 | activated by setting the configuration option MBED_MEM_TRACING_ENABLED to true. |
kevman | 0:38ceb79fef03 | 34 | |
kevman | 0:38ceb79fef03 | 35 | Both tracers can be activated and deactivated in any combination. If both tracers |
kevman | 0:38ceb79fef03 | 36 | are active, the second one (MBED_MEM_TRACING_ENABLED) will trace the first one's |
kevman | 0:38ceb79fef03 | 37 | (MBED_HEAP_STATS_ENABLED) memory calls.*/ |
kevman | 0:38ceb79fef03 | 38 | |
kevman | 0:38ceb79fef03 | 39 | /******************************************************************************/ |
kevman | 0:38ceb79fef03 | 40 | /* Implementation of the runtime max heap usage checker */ |
kevman | 0:38ceb79fef03 | 41 | /******************************************************************************/ |
kevman | 0:38ceb79fef03 | 42 | |
kevman | 0:38ceb79fef03 | 43 | typedef struct { |
kevman | 0:38ceb79fef03 | 44 | uint32_t size; |
kevman | 0:38ceb79fef03 | 45 | uint32_t signature; |
kevman | 0:38ceb79fef03 | 46 | } alloc_info_t; |
kevman | 0:38ceb79fef03 | 47 | |
kevman | 0:38ceb79fef03 | 48 | #ifdef MBED_HEAP_STATS_ENABLED |
kevman | 0:38ceb79fef03 | 49 | #define MBED_HEAP_STATS_SIGNATURE (0xdeadbeef) |
kevman | 0:38ceb79fef03 | 50 | |
kevman | 0:38ceb79fef03 | 51 | static SingletonPtr<PlatformMutex> malloc_stats_mutex; |
kevman | 0:38ceb79fef03 | 52 | static mbed_stats_heap_t heap_stats = {0, 0, 0, 0, 0, 0, 0}; |
kevman | 0:38ceb79fef03 | 53 | |
kevman | 0:38ceb79fef03 | 54 | typedef struct { |
kevman | 0:38ceb79fef03 | 55 | size_t size; |
kevman | 0:38ceb79fef03 | 56 | }mbed_heap_overhead_t; |
kevman | 0:38ceb79fef03 | 57 | |
kevman | 0:38ceb79fef03 | 58 | #define MALLOC_HEADER_SIZE (sizeof(mbed_heap_overhead_t)) |
kevman | 0:38ceb79fef03 | 59 | #define MALLOC_HEADER_PTR(p) (mbed_heap_overhead_t *)((char *)(p) - MALLOC_HEADER_SIZE) |
kevman | 0:38ceb79fef03 | 60 | #define MALLOC_HEAP_TOTAL_SIZE(p) (((p)->size) & (~0x1)) |
kevman | 0:38ceb79fef03 | 61 | #endif |
kevman | 0:38ceb79fef03 | 62 | |
kevman | 0:38ceb79fef03 | 63 | void mbed_stats_heap_get(mbed_stats_heap_t *stats) |
kevman | 0:38ceb79fef03 | 64 | { |
kevman | 0:38ceb79fef03 | 65 | #ifdef MBED_HEAP_STATS_ENABLED |
kevman | 0:38ceb79fef03 | 66 | extern uint32_t mbed_heap_size; |
kevman | 0:38ceb79fef03 | 67 | heap_stats.reserved_size = mbed_heap_size; |
kevman | 0:38ceb79fef03 | 68 | |
kevman | 0:38ceb79fef03 | 69 | malloc_stats_mutex->lock(); |
kevman | 0:38ceb79fef03 | 70 | memcpy(stats, &heap_stats, sizeof(mbed_stats_heap_t)); |
kevman | 0:38ceb79fef03 | 71 | malloc_stats_mutex->unlock(); |
kevman | 0:38ceb79fef03 | 72 | #else |
kevman | 0:38ceb79fef03 | 73 | memset(stats, 0, sizeof(mbed_stats_heap_t)); |
kevman | 0:38ceb79fef03 | 74 | #endif |
kevman | 0:38ceb79fef03 | 75 | } |
kevman | 0:38ceb79fef03 | 76 | |
kevman | 0:38ceb79fef03 | 77 | /******************************************************************************/ |
kevman | 0:38ceb79fef03 | 78 | /* GCC memory allocation wrappers */ |
kevman | 0:38ceb79fef03 | 79 | /******************************************************************************/ |
kevman | 0:38ceb79fef03 | 80 | |
kevman | 0:38ceb79fef03 | 81 | #if defined(TOOLCHAIN_GCC) |
kevman | 0:38ceb79fef03 | 82 | |
kevman | 0:38ceb79fef03 | 83 | extern "C" { |
kevman | 0:38ceb79fef03 | 84 | void *__real__malloc_r(struct _reent *r, size_t size); |
kevman | 0:38ceb79fef03 | 85 | void *__real__memalign_r(struct _reent *r, size_t alignment, size_t bytes); |
kevman | 0:38ceb79fef03 | 86 | void *__real__realloc_r(struct _reent *r, void *ptr, size_t size); |
kevman | 0:38ceb79fef03 | 87 | void __real__free_r(struct _reent *r, void *ptr); |
kevman | 0:38ceb79fef03 | 88 | void *__real__calloc_r(struct _reent *r, size_t nmemb, size_t size); |
kevman | 0:38ceb79fef03 | 89 | void *malloc_wrapper(struct _reent *r, size_t size, void *caller); |
kevman | 0:38ceb79fef03 | 90 | void free_wrapper(struct _reent *r, void *ptr, void *caller); |
kevman | 0:38ceb79fef03 | 91 | } |
kevman | 0:38ceb79fef03 | 92 | |
kevman | 0:38ceb79fef03 | 93 | |
kevman | 0:38ceb79fef03 | 94 | extern "C" void *__wrap__malloc_r(struct _reent *r, size_t size) |
kevman | 0:38ceb79fef03 | 95 | { |
kevman | 0:38ceb79fef03 | 96 | return malloc_wrapper(r, size, MBED_CALLER_ADDR()); |
kevman | 0:38ceb79fef03 | 97 | } |
kevman | 0:38ceb79fef03 | 98 | |
kevman | 0:38ceb79fef03 | 99 | extern "C" void *malloc_wrapper(struct _reent *r, size_t size, void *caller) |
kevman | 0:38ceb79fef03 | 100 | { |
kevman | 0:38ceb79fef03 | 101 | void *ptr = NULL; |
kevman | 0:38ceb79fef03 | 102 | #if MBED_MEM_TRACING_ENABLED |
kevman | 0:38ceb79fef03 | 103 | mbed_mem_trace_lock(); |
kevman | 0:38ceb79fef03 | 104 | #endif |
kevman | 0:38ceb79fef03 | 105 | #ifdef MBED_HEAP_STATS_ENABLED |
kevman | 0:38ceb79fef03 | 106 | malloc_stats_mutex->lock(); |
kevman | 0:38ceb79fef03 | 107 | alloc_info_t *alloc_info = (alloc_info_t *)__real__malloc_r(r, size + sizeof(alloc_info_t)); |
kevman | 0:38ceb79fef03 | 108 | if (alloc_info != NULL) { |
kevman | 0:38ceb79fef03 | 109 | alloc_info->size = size; |
kevman | 0:38ceb79fef03 | 110 | alloc_info->signature = MBED_HEAP_STATS_SIGNATURE; |
kevman | 0:38ceb79fef03 | 111 | ptr = (void *)(alloc_info + 1); |
kevman | 0:38ceb79fef03 | 112 | heap_stats.current_size += size; |
kevman | 0:38ceb79fef03 | 113 | heap_stats.total_size += size; |
kevman | 0:38ceb79fef03 | 114 | heap_stats.alloc_cnt += 1; |
kevman | 0:38ceb79fef03 | 115 | if (heap_stats.current_size > heap_stats.max_size) { |
kevman | 0:38ceb79fef03 | 116 | heap_stats.max_size = heap_stats.current_size; |
kevman | 0:38ceb79fef03 | 117 | } |
kevman | 0:38ceb79fef03 | 118 | heap_stats.overhead_size += MALLOC_HEAP_TOTAL_SIZE(MALLOC_HEADER_PTR(alloc_info)) - size; |
kevman | 0:38ceb79fef03 | 119 | } else { |
kevman | 0:38ceb79fef03 | 120 | heap_stats.alloc_fail_cnt += 1; |
kevman | 0:38ceb79fef03 | 121 | } |
kevman | 0:38ceb79fef03 | 122 | malloc_stats_mutex->unlock(); |
kevman | 0:38ceb79fef03 | 123 | #else // #ifdef MBED_HEAP_STATS_ENABLED |
kevman | 0:38ceb79fef03 | 124 | ptr = __real__malloc_r(r, size); |
kevman | 0:38ceb79fef03 | 125 | #endif // #ifdef MBED_HEAP_STATS_ENABLED |
kevman | 0:38ceb79fef03 | 126 | #if MBED_MEM_TRACING_ENABLED |
kevman | 0:38ceb79fef03 | 127 | mbed_mem_trace_malloc(ptr, size, caller); |
kevman | 0:38ceb79fef03 | 128 | mbed_mem_trace_unlock(); |
kevman | 0:38ceb79fef03 | 129 | #endif // #if MBED_MEM_TRACING_ENABLED |
kevman | 0:38ceb79fef03 | 130 | return ptr; |
kevman | 0:38ceb79fef03 | 131 | } |
kevman | 0:38ceb79fef03 | 132 | |
kevman | 0:38ceb79fef03 | 133 | extern "C" void *__wrap__realloc_r(struct _reent *r, void *ptr, size_t size) |
kevman | 0:38ceb79fef03 | 134 | { |
kevman | 0:38ceb79fef03 | 135 | void *new_ptr = NULL; |
kevman | 0:38ceb79fef03 | 136 | #if MBED_MEM_TRACING_ENABLED |
kevman | 0:38ceb79fef03 | 137 | mbed_mem_trace_lock(); |
kevman | 0:38ceb79fef03 | 138 | #endif |
kevman | 0:38ceb79fef03 | 139 | #ifdef MBED_HEAP_STATS_ENABLED |
kevman | 0:38ceb79fef03 | 140 | // Implement realloc_r with malloc and free. |
kevman | 0:38ceb79fef03 | 141 | // The function realloc_r can't be used here directly since |
kevman | 0:38ceb79fef03 | 142 | // it can call into __wrap__malloc_r (returns ptr + 4) or |
kevman | 0:38ceb79fef03 | 143 | // resize memory directly (returns ptr + 0). |
kevman | 0:38ceb79fef03 | 144 | |
kevman | 0:38ceb79fef03 | 145 | // Note - no lock needed since malloc and free are thread safe |
kevman | 0:38ceb79fef03 | 146 | |
kevman | 0:38ceb79fef03 | 147 | // Get old size |
kevman | 0:38ceb79fef03 | 148 | uint32_t old_size = 0; |
kevman | 0:38ceb79fef03 | 149 | if (ptr != NULL) { |
kevman | 0:38ceb79fef03 | 150 | alloc_info_t *alloc_info = ((alloc_info_t *)ptr) - 1; |
kevman | 0:38ceb79fef03 | 151 | old_size = alloc_info->size; |
kevman | 0:38ceb79fef03 | 152 | } |
kevman | 0:38ceb79fef03 | 153 | |
kevman | 0:38ceb79fef03 | 154 | // Allocate space |
kevman | 0:38ceb79fef03 | 155 | if (size != 0) { |
kevman | 0:38ceb79fef03 | 156 | new_ptr = malloc(size); |
kevman | 0:38ceb79fef03 | 157 | } |
kevman | 0:38ceb79fef03 | 158 | |
kevman | 0:38ceb79fef03 | 159 | // If the new buffer has been allocated copy the data to it |
kevman | 0:38ceb79fef03 | 160 | // and free the old buffer |
kevman | 0:38ceb79fef03 | 161 | if (new_ptr != NULL) { |
kevman | 0:38ceb79fef03 | 162 | uint32_t copy_size = (old_size < size) ? old_size : size; |
kevman | 0:38ceb79fef03 | 163 | memcpy(new_ptr, (void *)ptr, copy_size); |
kevman | 0:38ceb79fef03 | 164 | free(ptr); |
kevman | 0:38ceb79fef03 | 165 | } |
kevman | 0:38ceb79fef03 | 166 | #else // #ifdef MBED_HEAP_STATS_ENABLED |
kevman | 0:38ceb79fef03 | 167 | new_ptr = __real__realloc_r(r, ptr, size); |
kevman | 0:38ceb79fef03 | 168 | #endif // #ifdef MBED_HEAP_STATS_ENABLED |
kevman | 0:38ceb79fef03 | 169 | #if MBED_MEM_TRACING_ENABLED |
kevman | 0:38ceb79fef03 | 170 | mbed_mem_trace_realloc(new_ptr, ptr, size, MBED_CALLER_ADDR()); |
kevman | 0:38ceb79fef03 | 171 | mbed_mem_trace_unlock(); |
kevman | 0:38ceb79fef03 | 172 | #endif // #if MBED_MEM_TRACING_ENABLED |
kevman | 0:38ceb79fef03 | 173 | return new_ptr; |
kevman | 0:38ceb79fef03 | 174 | } |
kevman | 0:38ceb79fef03 | 175 | |
kevman | 0:38ceb79fef03 | 176 | extern "C" void __wrap__free_r(struct _reent *r, void *ptr) |
kevman | 0:38ceb79fef03 | 177 | { |
kevman | 0:38ceb79fef03 | 178 | free_wrapper(r, ptr, MBED_CALLER_ADDR()); |
kevman | 0:38ceb79fef03 | 179 | } |
kevman | 0:38ceb79fef03 | 180 | |
kevman | 0:38ceb79fef03 | 181 | extern "C" void free_wrapper(struct _reent *r, void *ptr, void *caller) |
kevman | 0:38ceb79fef03 | 182 | { |
kevman | 0:38ceb79fef03 | 183 | #if MBED_MEM_TRACING_ENABLED |
kevman | 0:38ceb79fef03 | 184 | mbed_mem_trace_lock(); |
kevman | 0:38ceb79fef03 | 185 | #endif |
kevman | 0:38ceb79fef03 | 186 | #ifdef MBED_HEAP_STATS_ENABLED |
kevman | 0:38ceb79fef03 | 187 | malloc_stats_mutex->lock(); |
kevman | 0:38ceb79fef03 | 188 | alloc_info_t *alloc_info = NULL; |
kevman | 0:38ceb79fef03 | 189 | if (ptr != NULL) { |
kevman | 0:38ceb79fef03 | 190 | alloc_info = ((alloc_info_t *)ptr) - 1; |
kevman | 0:38ceb79fef03 | 191 | if (MBED_HEAP_STATS_SIGNATURE == alloc_info->signature) { |
kevman | 0:38ceb79fef03 | 192 | size_t user_size = alloc_info->size; |
kevman | 0:38ceb79fef03 | 193 | size_t alloc_size = MALLOC_HEAP_TOTAL_SIZE(MALLOC_HEADER_PTR(alloc_info)); |
kevman | 0:38ceb79fef03 | 194 | alloc_info->signature = 0x0; |
kevman | 0:38ceb79fef03 | 195 | heap_stats.current_size -= user_size; |
kevman | 0:38ceb79fef03 | 196 | heap_stats.alloc_cnt -= 1; |
kevman | 0:38ceb79fef03 | 197 | heap_stats.overhead_size -= (alloc_size - user_size); |
kevman | 0:38ceb79fef03 | 198 | __real__free_r(r, (void *)alloc_info); |
kevman | 0:38ceb79fef03 | 199 | } else { |
kevman | 0:38ceb79fef03 | 200 | __real__free_r(r, ptr); |
kevman | 0:38ceb79fef03 | 201 | } |
kevman | 0:38ceb79fef03 | 202 | } |
kevman | 0:38ceb79fef03 | 203 | |
kevman | 0:38ceb79fef03 | 204 | malloc_stats_mutex->unlock(); |
kevman | 0:38ceb79fef03 | 205 | #else // #ifdef MBED_HEAP_STATS_ENABLED |
kevman | 0:38ceb79fef03 | 206 | __real__free_r(r, ptr); |
kevman | 0:38ceb79fef03 | 207 | #endif // #ifdef MBED_HEAP_STATS_ENABLED |
kevman | 0:38ceb79fef03 | 208 | #if MBED_MEM_TRACING_ENABLED |
kevman | 0:38ceb79fef03 | 209 | mbed_mem_trace_free(ptr, caller); |
kevman | 0:38ceb79fef03 | 210 | mbed_mem_trace_unlock(); |
kevman | 0:38ceb79fef03 | 211 | #endif // #if MBED_MEM_TRACING_ENABLED |
kevman | 0:38ceb79fef03 | 212 | } |
kevman | 0:38ceb79fef03 | 213 | |
kevman | 0:38ceb79fef03 | 214 | extern "C" void *__wrap__calloc_r(struct _reent *r, size_t nmemb, size_t size) |
kevman | 0:38ceb79fef03 | 215 | { |
kevman | 0:38ceb79fef03 | 216 | void *ptr = NULL; |
kevman | 0:38ceb79fef03 | 217 | #if MBED_MEM_TRACING_ENABLED |
kevman | 0:38ceb79fef03 | 218 | mbed_mem_trace_lock(); |
kevman | 0:38ceb79fef03 | 219 | #endif |
kevman | 0:38ceb79fef03 | 220 | #ifdef MBED_HEAP_STATS_ENABLED |
kevman | 0:38ceb79fef03 | 221 | // Note - no lock needed since malloc is thread safe |
kevman | 0:38ceb79fef03 | 222 | |
kevman | 0:38ceb79fef03 | 223 | ptr = malloc(nmemb * size); |
kevman | 0:38ceb79fef03 | 224 | if (ptr != NULL) { |
kevman | 0:38ceb79fef03 | 225 | memset(ptr, 0, nmemb * size); |
kevman | 0:38ceb79fef03 | 226 | } |
kevman | 0:38ceb79fef03 | 227 | #else // #ifdef MBED_HEAP_STATS_ENABLED |
kevman | 0:38ceb79fef03 | 228 | ptr = __real__calloc_r(r, nmemb, size); |
kevman | 0:38ceb79fef03 | 229 | #endif // #ifdef MBED_HEAP_STATS_ENABLED |
kevman | 0:38ceb79fef03 | 230 | #if MBED_MEM_TRACING_ENABLED |
kevman | 0:38ceb79fef03 | 231 | mbed_mem_trace_calloc(ptr, nmemb, size, MBED_CALLER_ADDR()); |
kevman | 0:38ceb79fef03 | 232 | mbed_mem_trace_unlock(); |
kevman | 0:38ceb79fef03 | 233 | #endif // #if MBED_MEM_TRACING_ENABLED |
kevman | 0:38ceb79fef03 | 234 | return ptr; |
kevman | 0:38ceb79fef03 | 235 | } |
kevman | 0:38ceb79fef03 | 236 | |
kevman | 0:38ceb79fef03 | 237 | extern "C" void *__wrap__memalign_r(struct _reent *r, size_t alignment, size_t bytes) |
kevman | 0:38ceb79fef03 | 238 | { |
kevman | 0:38ceb79fef03 | 239 | return __real__memalign_r(r, alignment, bytes); |
kevman | 0:38ceb79fef03 | 240 | } |
kevman | 0:38ceb79fef03 | 241 | |
kevman | 0:38ceb79fef03 | 242 | |
kevman | 0:38ceb79fef03 | 243 | /******************************************************************************/ |
kevman | 0:38ceb79fef03 | 244 | /* ARMCC / IAR memory allocation wrappers */ |
kevman | 0:38ceb79fef03 | 245 | /******************************************************************************/ |
kevman | 0:38ceb79fef03 | 246 | |
kevman | 0:38ceb79fef03 | 247 | #elif defined(TOOLCHAIN_ARM) || defined(__ICCARM__) |
kevman | 0:38ceb79fef03 | 248 | |
kevman | 0:38ceb79fef03 | 249 | #if defined(TOOLCHAIN_ARM) |
kevman | 0:38ceb79fef03 | 250 | #define SUPER_MALLOC $Super$$malloc |
kevman | 0:38ceb79fef03 | 251 | #define SUB_MALLOC $Sub$$malloc |
kevman | 0:38ceb79fef03 | 252 | #define SUPER_REALLOC $Super$$realloc |
kevman | 0:38ceb79fef03 | 253 | #define SUB_REALLOC $Sub$$realloc |
kevman | 0:38ceb79fef03 | 254 | #define SUPER_CALLOC $Super$$calloc |
kevman | 0:38ceb79fef03 | 255 | #define SUB_CALLOC $Sub$$calloc |
kevman | 0:38ceb79fef03 | 256 | #define SUPER_FREE $Super$$free |
kevman | 0:38ceb79fef03 | 257 | #define SUB_FREE $Sub$$free |
kevman | 0:38ceb79fef03 | 258 | #elif defined(__ICCARM__) |
kevman | 0:38ceb79fef03 | 259 | #define SUPER_MALLOC $Super$$__iar_dlmalloc |
kevman | 0:38ceb79fef03 | 260 | #define SUB_MALLOC $Sub$$__iar_dlmalloc |
kevman | 0:38ceb79fef03 | 261 | #define SUPER_REALLOC $Super$$__iar_dlrealloc |
kevman | 0:38ceb79fef03 | 262 | #define SUB_REALLOC $Sub$$__iar_dlrealloc |
kevman | 0:38ceb79fef03 | 263 | #define SUPER_CALLOC $Super$$__iar_dlcalloc |
kevman | 0:38ceb79fef03 | 264 | #define SUB_CALLOC $Sub$$__iar_dlcalloc |
kevman | 0:38ceb79fef03 | 265 | #define SUPER_FREE $Super$$__iar_dlfree |
kevman | 0:38ceb79fef03 | 266 | #define SUB_FREE $Sub$$__iar_dlfree |
kevman | 0:38ceb79fef03 | 267 | #endif |
kevman | 0:38ceb79fef03 | 268 | |
kevman | 0:38ceb79fef03 | 269 | /* Enable hooking of memory function only if tracing is also enabled */ |
kevman | 0:38ceb79fef03 | 270 | #if defined(MBED_MEM_TRACING_ENABLED) || defined(MBED_HEAP_STATS_ENABLED) |
kevman | 0:38ceb79fef03 | 271 | |
kevman | 0:38ceb79fef03 | 272 | extern "C" { |
kevman | 0:38ceb79fef03 | 273 | void *SUPER_MALLOC(size_t size); |
kevman | 0:38ceb79fef03 | 274 | void *SUPER_REALLOC(void *ptr, size_t size); |
kevman | 0:38ceb79fef03 | 275 | void *SUPER_CALLOC(size_t nmemb, size_t size); |
kevman | 0:38ceb79fef03 | 276 | void SUPER_FREE(void *ptr); |
kevman | 0:38ceb79fef03 | 277 | void *malloc_wrapper(size_t size, void *caller); |
kevman | 0:38ceb79fef03 | 278 | void free_wrapper(void *ptr, void *caller); |
kevman | 0:38ceb79fef03 | 279 | } |
kevman | 0:38ceb79fef03 | 280 | |
kevman | 0:38ceb79fef03 | 281 | extern "C" void *SUB_MALLOC(size_t size) |
kevman | 0:38ceb79fef03 | 282 | { |
kevman | 0:38ceb79fef03 | 283 | return malloc_wrapper(size, MBED_CALLER_ADDR()); |
kevman | 0:38ceb79fef03 | 284 | } |
kevman | 0:38ceb79fef03 | 285 | |
kevman | 0:38ceb79fef03 | 286 | extern "C" void *malloc_wrapper(size_t size, void *caller) |
kevman | 0:38ceb79fef03 | 287 | { |
kevman | 0:38ceb79fef03 | 288 | void *ptr = NULL; |
kevman | 0:38ceb79fef03 | 289 | #if MBED_MEM_TRACING_ENABLED |
kevman | 0:38ceb79fef03 | 290 | mbed_mem_trace_lock(); |
kevman | 0:38ceb79fef03 | 291 | #endif |
kevman | 0:38ceb79fef03 | 292 | #ifdef MBED_HEAP_STATS_ENABLED |
kevman | 0:38ceb79fef03 | 293 | malloc_stats_mutex->lock(); |
kevman | 0:38ceb79fef03 | 294 | alloc_info_t *alloc_info = (alloc_info_t *)SUPER_MALLOC(size + sizeof(alloc_info_t)); |
kevman | 0:38ceb79fef03 | 295 | if (alloc_info != NULL) { |
kevman | 0:38ceb79fef03 | 296 | alloc_info->size = size; |
kevman | 0:38ceb79fef03 | 297 | alloc_info->signature = MBED_HEAP_STATS_SIGNATURE; |
kevman | 0:38ceb79fef03 | 298 | ptr = (void *)(alloc_info + 1); |
kevman | 0:38ceb79fef03 | 299 | heap_stats.current_size += size; |
kevman | 0:38ceb79fef03 | 300 | heap_stats.total_size += size; |
kevman | 0:38ceb79fef03 | 301 | heap_stats.alloc_cnt += 1; |
kevman | 0:38ceb79fef03 | 302 | if (heap_stats.current_size > heap_stats.max_size) { |
kevman | 0:38ceb79fef03 | 303 | heap_stats.max_size = heap_stats.current_size; |
kevman | 0:38ceb79fef03 | 304 | } |
kevman | 0:38ceb79fef03 | 305 | heap_stats.overhead_size += MALLOC_HEAP_TOTAL_SIZE(MALLOC_HEADER_PTR(alloc_info)) - size; |
kevman | 0:38ceb79fef03 | 306 | } else { |
kevman | 0:38ceb79fef03 | 307 | heap_stats.alloc_fail_cnt += 1; |
kevman | 0:38ceb79fef03 | 308 | } |
kevman | 0:38ceb79fef03 | 309 | malloc_stats_mutex->unlock(); |
kevman | 0:38ceb79fef03 | 310 | #else // #ifdef MBED_HEAP_STATS_ENABLED |
kevman | 0:38ceb79fef03 | 311 | ptr = SUPER_MALLOC(size); |
kevman | 0:38ceb79fef03 | 312 | #endif // #ifdef MBED_HEAP_STATS_ENABLED |
kevman | 0:38ceb79fef03 | 313 | #if MBED_MEM_TRACING_ENABLED |
kevman | 0:38ceb79fef03 | 314 | mbed_mem_trace_malloc(ptr, size, caller); |
kevman | 0:38ceb79fef03 | 315 | mbed_mem_trace_unlock(); |
kevman | 0:38ceb79fef03 | 316 | #endif // #if MBED_MEM_TRACING_ENABLED |
kevman | 0:38ceb79fef03 | 317 | return ptr; |
kevman | 0:38ceb79fef03 | 318 | } |
kevman | 0:38ceb79fef03 | 319 | |
kevman | 0:38ceb79fef03 | 320 | |
kevman | 0:38ceb79fef03 | 321 | extern "C" void *SUB_REALLOC(void *ptr, size_t size) |
kevman | 0:38ceb79fef03 | 322 | { |
kevman | 0:38ceb79fef03 | 323 | void *new_ptr = NULL; |
kevman | 0:38ceb79fef03 | 324 | #if MBED_MEM_TRACING_ENABLED |
kevman | 0:38ceb79fef03 | 325 | mbed_mem_trace_lock(); |
kevman | 0:38ceb79fef03 | 326 | #endif |
kevman | 0:38ceb79fef03 | 327 | #ifdef MBED_HEAP_STATS_ENABLED |
kevman | 0:38ceb79fef03 | 328 | // Note - no lock needed since malloc and free are thread safe |
kevman | 0:38ceb79fef03 | 329 | |
kevman | 0:38ceb79fef03 | 330 | // Get old size |
kevman | 0:38ceb79fef03 | 331 | uint32_t old_size = 0; |
kevman | 0:38ceb79fef03 | 332 | if (ptr != NULL) { |
kevman | 0:38ceb79fef03 | 333 | alloc_info_t *alloc_info = ((alloc_info_t *)ptr) - 1; |
kevman | 0:38ceb79fef03 | 334 | old_size = alloc_info->size; |
kevman | 0:38ceb79fef03 | 335 | } |
kevman | 0:38ceb79fef03 | 336 | |
kevman | 0:38ceb79fef03 | 337 | // Allocate space |
kevman | 0:38ceb79fef03 | 338 | if (size != 0) { |
kevman | 0:38ceb79fef03 | 339 | new_ptr = malloc(size); |
kevman | 0:38ceb79fef03 | 340 | } |
kevman | 0:38ceb79fef03 | 341 | |
kevman | 0:38ceb79fef03 | 342 | // If the new buffer has been allocated copy the data to it |
kevman | 0:38ceb79fef03 | 343 | // and free the old buffer |
kevman | 0:38ceb79fef03 | 344 | if ((new_ptr != NULL) && (ptr != NULL)) { |
kevman | 0:38ceb79fef03 | 345 | uint32_t copy_size = (old_size < size) ? old_size : size; |
kevman | 0:38ceb79fef03 | 346 | memcpy(new_ptr, (void *)ptr, copy_size); |
kevman | 0:38ceb79fef03 | 347 | free(ptr); |
kevman | 0:38ceb79fef03 | 348 | } |
kevman | 0:38ceb79fef03 | 349 | #else // #ifdef MBED_HEAP_STATS_ENABLED |
kevman | 0:38ceb79fef03 | 350 | new_ptr = SUPER_REALLOC(ptr, size); |
kevman | 0:38ceb79fef03 | 351 | #endif // #ifdef MBED_HEAP_STATS_ENABLED |
kevman | 0:38ceb79fef03 | 352 | #if MBED_MEM_TRACING_ENABLED |
kevman | 0:38ceb79fef03 | 353 | mbed_mem_trace_realloc(new_ptr, ptr, size, MBED_CALLER_ADDR()); |
kevman | 0:38ceb79fef03 | 354 | mbed_mem_trace_unlock(); |
kevman | 0:38ceb79fef03 | 355 | #endif // #if MBED_MEM_TRACING_ENABLED |
kevman | 0:38ceb79fef03 | 356 | return new_ptr; |
kevman | 0:38ceb79fef03 | 357 | } |
kevman | 0:38ceb79fef03 | 358 | |
kevman | 0:38ceb79fef03 | 359 | extern "C" void *SUB_CALLOC(size_t nmemb, size_t size) |
kevman | 0:38ceb79fef03 | 360 | { |
kevman | 0:38ceb79fef03 | 361 | void *ptr = NULL; |
kevman | 0:38ceb79fef03 | 362 | #if MBED_MEM_TRACING_ENABLED |
kevman | 0:38ceb79fef03 | 363 | mbed_mem_trace_lock(); |
kevman | 0:38ceb79fef03 | 364 | #endif |
kevman | 0:38ceb79fef03 | 365 | #ifdef MBED_HEAP_STATS_ENABLED |
kevman | 0:38ceb79fef03 | 366 | // Note - no lock needed since malloc is thread safe |
kevman | 0:38ceb79fef03 | 367 | ptr = malloc(nmemb * size); |
kevman | 0:38ceb79fef03 | 368 | if (ptr != NULL) { |
kevman | 0:38ceb79fef03 | 369 | memset(ptr, 0, nmemb * size); |
kevman | 0:38ceb79fef03 | 370 | } |
kevman | 0:38ceb79fef03 | 371 | #else // #ifdef MBED_HEAP_STATS_ENABLED |
kevman | 0:38ceb79fef03 | 372 | ptr = SUPER_CALLOC(nmemb, size); |
kevman | 0:38ceb79fef03 | 373 | #endif // #ifdef MBED_HEAP_STATS_ENABLED |
kevman | 0:38ceb79fef03 | 374 | #if MBED_MEM_TRACING_ENABLED |
kevman | 0:38ceb79fef03 | 375 | mbed_mem_trace_calloc(ptr, nmemb, size, MBED_CALLER_ADDR()); |
kevman | 0:38ceb79fef03 | 376 | mbed_mem_trace_unlock(); |
kevman | 0:38ceb79fef03 | 377 | #endif // #if MBED_MEM_TRACING_ENABLED |
kevman | 0:38ceb79fef03 | 378 | return ptr; |
kevman | 0:38ceb79fef03 | 379 | } |
kevman | 0:38ceb79fef03 | 380 | |
kevman | 0:38ceb79fef03 | 381 | extern "C" void SUB_FREE(void *ptr) |
kevman | 0:38ceb79fef03 | 382 | { |
kevman | 0:38ceb79fef03 | 383 | free_wrapper(ptr, MBED_CALLER_ADDR()); |
kevman | 0:38ceb79fef03 | 384 | } |
kevman | 0:38ceb79fef03 | 385 | |
kevman | 0:38ceb79fef03 | 386 | extern "C" void free_wrapper(void *ptr, void *caller) |
kevman | 0:38ceb79fef03 | 387 | { |
kevman | 0:38ceb79fef03 | 388 | #if MBED_MEM_TRACING_ENABLED |
kevman | 0:38ceb79fef03 | 389 | mbed_mem_trace_lock(); |
kevman | 0:38ceb79fef03 | 390 | #endif |
kevman | 0:38ceb79fef03 | 391 | #ifdef MBED_HEAP_STATS_ENABLED |
kevman | 0:38ceb79fef03 | 392 | malloc_stats_mutex->lock(); |
kevman | 0:38ceb79fef03 | 393 | alloc_info_t *alloc_info = NULL; |
kevman | 0:38ceb79fef03 | 394 | if (ptr != NULL) { |
kevman | 0:38ceb79fef03 | 395 | alloc_info = ((alloc_info_t *)ptr) - 1; |
kevman | 0:38ceb79fef03 | 396 | if (MBED_HEAP_STATS_SIGNATURE == alloc_info->signature) { |
kevman | 0:38ceb79fef03 | 397 | size_t user_size = alloc_info->size; |
kevman | 0:38ceb79fef03 | 398 | size_t alloc_size = MALLOC_HEAP_TOTAL_SIZE(MALLOC_HEADER_PTR(alloc_info)); |
kevman | 0:38ceb79fef03 | 399 | alloc_info->signature = 0x0; |
kevman | 0:38ceb79fef03 | 400 | heap_stats.current_size -= user_size; |
kevman | 0:38ceb79fef03 | 401 | heap_stats.alloc_cnt -= 1; |
kevman | 0:38ceb79fef03 | 402 | heap_stats.overhead_size -= (alloc_size - user_size); |
kevman | 0:38ceb79fef03 | 403 | SUPER_FREE((void *)alloc_info); |
kevman | 0:38ceb79fef03 | 404 | } else { |
kevman | 0:38ceb79fef03 | 405 | SUPER_FREE(ptr); |
kevman | 0:38ceb79fef03 | 406 | } |
kevman | 0:38ceb79fef03 | 407 | } |
kevman | 0:38ceb79fef03 | 408 | |
kevman | 0:38ceb79fef03 | 409 | malloc_stats_mutex->unlock(); |
kevman | 0:38ceb79fef03 | 410 | #else // #ifdef MBED_HEAP_STATS_ENABLED |
kevman | 0:38ceb79fef03 | 411 | SUPER_FREE(ptr); |
kevman | 0:38ceb79fef03 | 412 | #endif // #ifdef MBED_HEAP_STATS_ENABLED |
kevman | 0:38ceb79fef03 | 413 | #if MBED_MEM_TRACING_ENABLED |
kevman | 0:38ceb79fef03 | 414 | mbed_mem_trace_free(ptr, caller); |
kevman | 0:38ceb79fef03 | 415 | mbed_mem_trace_unlock(); |
kevman | 0:38ceb79fef03 | 416 | #endif // #if MBED_MEM_TRACING_ENABLED |
kevman | 0:38ceb79fef03 | 417 | } |
kevman | 0:38ceb79fef03 | 418 | |
kevman | 0:38ceb79fef03 | 419 | #endif // #if defined(MBED_MEM_TRACING_ENABLED) || defined(MBED_HEAP_STATS_ENABLED) |
kevman | 0:38ceb79fef03 | 420 | |
kevman | 0:38ceb79fef03 | 421 | /******************************************************************************/ |
kevman | 0:38ceb79fef03 | 422 | /* Allocation wrappers for other toolchains are not supported yet */ |
kevman | 0:38ceb79fef03 | 423 | /******************************************************************************/ |
kevman | 0:38ceb79fef03 | 424 | |
kevman | 0:38ceb79fef03 | 425 | #else |
kevman | 0:38ceb79fef03 | 426 | |
kevman | 0:38ceb79fef03 | 427 | #if MBED_MEM_TRACING_ENABLED |
kevman | 0:38ceb79fef03 | 428 | #error Memory tracing is not supported with the current toolchain. |
kevman | 0:38ceb79fef03 | 429 | #endif |
kevman | 0:38ceb79fef03 | 430 | |
kevman | 0:38ceb79fef03 | 431 | #ifdef MBED_HEAP_STATS_ENABLED |
kevman | 0:38ceb79fef03 | 432 | #error Heap statistics are not supported with the current toolchain. |
kevman | 0:38ceb79fef03 | 433 | #endif |
kevman | 0:38ceb79fef03 | 434 | |
kevman | 0:38ceb79fef03 | 435 | #endif // #if defined(TOOLCHAIN_GCC) |