Important changes to repositories hosted on mbed.com
Mbed hosted mercurial repositories are deprecated and are due to be permanently deleted in July 2026.
To keep a copy of this software download the repository Zip archive or clone locally using Mercurial.
It is also possible to export all your personal repositories from the account settings page.
Dependents: TYBLE16_simple_data_logger TYBLE16_MP3_Air
nsdynmemLIB.c
00001 /* 00002 * Copyright (c) 2014-2019 ARM Limited. All rights reserved. 00003 * SPDX-License-Identifier: Apache-2.0 00004 * Licensed under the Apache License, Version 2.0 (the License); you may 00005 * not use this file except in compliance with the License. 00006 * You may obtain a copy of the License at 00007 * 00008 * http://www.apache.org/licenses/LICENSE-2.0 00009 * 00010 * Unless required by applicable law or agreed to in writing, software 00011 * distributed under the License is distributed on an AS IS BASIS, WITHOUT 00012 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 00013 * See the License for the specific language governing permissions and 00014 * limitations under the License. 00015 */ 00016 #include <stdint.h> 00017 #include <string.h> 00018 #include "nsdynmemLIB.h" 00019 #include "platform/arm_hal_interrupt.h" 00020 #include <stdlib.h> 00021 #include "ns_list.h" 00022 00023 #ifndef STANDARD_MALLOC 00024 typedef enum mem_stat_update_t { 00025 DEV_HEAP_ALLOC_OK, 00026 DEV_HEAP_ALLOC_FAIL, 00027 DEV_HEAP_FREE, 00028 } mem_stat_update_t; 00029 00030 typedef struct { 00031 ns_list_link_t link; 00032 } hole_t; 00033 00034 typedef int ns_mem_word_size_t; // internal signed heap block size type 00035 00036 // Amount of memory regions 00037 #define REGION_COUNT 3 00038 00039 /* struct for book keeping variables */ 00040 struct ns_mem_book { 00041 ns_mem_word_size_t *heap_main[REGION_COUNT]; 00042 ns_mem_word_size_t *heap_main_end[REGION_COUNT]; 00043 mem_stat_t *mem_stat_info_ptr; 00044 void (*heap_failure_callback)(heap_fail_t); 00045 NS_LIST_HEAD (hole_t, link) holes_list; 00046 ns_mem_heap_size_t heap_size; 00047 ns_mem_heap_size_t temporary_alloc_heap_limit; /* Amount of reserved heap temporary alloc can't exceed */ 00048 }; 00049 00050 static ns_mem_book_t *default_book; // heap pointer for original "ns_" API use 00051 00052 // size of a hole_t in our word units 00053 #define HOLE_T_SIZE ((ns_mem_word_size_t) ((sizeof(hole_t) + sizeof(ns_mem_word_size_t) - 1) / sizeof(ns_mem_word_size_t))) 00054 00055 #define TEMPORARY_ALLOC_FREE_HEAP_THRESHOLD 5 /* temporary allocations must leave 5% of the heap free */ 00056 00057 static NS_INLINE hole_t *hole_from_block_start(ns_mem_word_size_t *start) 00058 { 00059 return (hole_t *)(start + 1); 00060 } 00061 00062 static NS_INLINE ns_mem_word_size_t *block_start_from_hole(hole_t *start) 00063 { 00064 return ((ns_mem_word_size_t *)start) - 1; 00065 } 00066 00067 static void heap_failure(ns_mem_book_t *book, heap_fail_t reason) 00068 { 00069 if (book->heap_failure_callback) { 00070 book->heap_failure_callback(reason); 00071 } 00072 } 00073 00074 static int ns_dyn_mem_region_find(ns_mem_book_t *book, ns_mem_word_size_t *block_ptr, ns_mem_word_size_t size) 00075 { 00076 int index; 00077 for (index = 0; index < REGION_COUNT; index++) { 00078 if (book->heap_main[index] != 0) { 00079 if ((block_ptr >= book->heap_main[index]) && 00080 (block_ptr < book->heap_main_end[index]) && 00081 ((block_ptr + size) < book->heap_main_end[index])) { 00082 return index; 00083 } 00084 } 00085 } 00086 00087 return -1; 00088 } 00089 00090 static int ns_dyn_mem_region_save(ns_mem_book_t *book, ns_mem_word_size_t *region_start_ptr, ns_mem_word_size_t region_size) 00091 { 00092 for (int i = 1; i < REGION_COUNT; i++) { 00093 if (book->heap_main[i] == 0) { 00094 book->heap_main[i] = region_start_ptr; 00095 book->heap_main_end[i] = book->heap_main[i] + region_size; 00096 return 0; 00097 } 00098 } 00099 00100 return -1; 00101 } 00102 00103 00104 #endif //STANDARD_MALLOC 00105 00106 void ns_dyn_mem_init(void *heap, ns_mem_heap_size_t h_size, 00107 void (*passed_fptr)(heap_fail_t), mem_stat_t *info_ptr) 00108 { 00109 default_book = ns_mem_init(heap, h_size, passed_fptr, info_ptr); 00110 } 00111 00112 int ns_dyn_mem_region_add(void *region_ptr, ns_mem_heap_size_t region_size) 00113 { 00114 return ns_mem_region_add(default_book, region_ptr, region_size); 00115 } 00116 00117 const mem_stat_t *ns_dyn_mem_get_mem_stat(void) 00118 { 00119 #ifndef STANDARD_MALLOC 00120 return ns_mem_get_mem_stat(default_book); 00121 #else 00122 return NULL; 00123 #endif 00124 } 00125 00126 ns_mem_book_t *ns_mem_init(void *heap, ns_mem_heap_size_t h_size, 00127 void (*passed_fptr)(heap_fail_t), 00128 mem_stat_t *info_ptr) 00129 { 00130 #ifndef STANDARD_MALLOC 00131 ns_mem_book_t *book; 00132 00133 ns_mem_word_size_t *ptr; 00134 ns_mem_word_size_t temp_int; 00135 /* Do memory alignment */ 00136 temp_int = ((uintptr_t)heap % sizeof(ns_mem_word_size_t)); 00137 if (temp_int) { 00138 heap = (uint8_t *) heap + (sizeof(ns_mem_word_size_t) - temp_int); 00139 h_size -= (sizeof(ns_mem_word_size_t) - temp_int); 00140 } 00141 00142 /* Make correction for total length also */ 00143 temp_int = (h_size % sizeof(ns_mem_word_size_t)); 00144 if (temp_int) { 00145 h_size -= (sizeof(ns_mem_word_size_t) - temp_int); 00146 } 00147 00148 book = heap; 00149 memset(book->heap_main, 0, REGION_COUNT * sizeof(ns_mem_word_size_t *)); 00150 memset(book->heap_main_end, 0, REGION_COUNT * sizeof(ns_mem_word_size_t *)); 00151 00152 book->heap_main[0] = (ns_mem_word_size_t *) & (book[1]); // SET Heap Pointer 00153 book->heap_size = h_size - sizeof(ns_mem_book_t); //Set Heap Size 00154 temp_int = (book->heap_size / sizeof(ns_mem_word_size_t)); 00155 temp_int -= 2; 00156 ptr = book->heap_main[0]; 00157 *ptr = -(temp_int); 00158 ptr += (temp_int + 1); 00159 *ptr = -(temp_int); 00160 book->heap_main_end[0] = ptr; 00161 00162 ns_list_init(&book->holes_list); 00163 ns_list_add_to_start(&book->holes_list, hole_from_block_start(book->heap_main[0])); 00164 00165 book->mem_stat_info_ptr = info_ptr; 00166 //RESET Memory by Hea Len 00167 if (info_ptr) { 00168 memset(book->mem_stat_info_ptr, 0, sizeof(mem_stat_t)); 00169 book->mem_stat_info_ptr->heap_sector_size = book->heap_size; 00170 } 00171 book->temporary_alloc_heap_limit = book->heap_size / 100 * (100 - TEMPORARY_ALLOC_FREE_HEAP_THRESHOLD); 00172 #endif 00173 //There really is no support to standard malloc in this library anymore 00174 book->heap_failure_callback = passed_fptr; 00175 00176 return book; 00177 } 00178 00179 int ns_mem_region_add(ns_mem_book_t *book, void *region_ptr, ns_mem_heap_size_t region_size) 00180 { 00181 #ifndef STANDARD_MALLOC 00182 if (!book || !region_ptr || region_size < 3 * sizeof(ns_mem_word_size_t)) { 00183 return -1; 00184 } 00185 00186 ns_mem_word_size_t *block_ptr; 00187 ns_mem_word_size_t temp_int; 00188 00189 /* Do memory alignment */ 00190 temp_int = ((uintptr_t)region_ptr % sizeof(ns_mem_word_size_t)); 00191 if (temp_int) { 00192 region_ptr = (uint8_t *) region_ptr + (sizeof(ns_mem_word_size_t) - temp_int); 00193 region_size -= (sizeof(ns_mem_word_size_t) - temp_int); 00194 } 00195 00196 /* Make correction for total length */ 00197 temp_int = (region_size % sizeof(ns_mem_word_size_t)); 00198 if (temp_int) { 00199 region_size -= (sizeof(ns_mem_word_size_t) - temp_int); 00200 } 00201 00202 // Create hole from new heap memory 00203 temp_int = (region_size / sizeof(ns_mem_word_size_t)); 00204 temp_int -= 2; 00205 block_ptr = region_ptr; 00206 *block_ptr = -(temp_int); 00207 block_ptr += (temp_int + 1); // now block_ptr points to end of block 00208 *block_ptr = -(temp_int); 00209 00210 // find place for the new hole from the holes list 00211 hole_t *hole_to_add = hole_from_block_start(region_ptr); 00212 hole_t *previous_hole = NULL; 00213 ns_list_foreach(hole_t, hole_in_list_ptr, &book->holes_list) { 00214 if (hole_in_list_ptr < hole_to_add) { 00215 previous_hole = hole_in_list_ptr; 00216 } else if (hole_in_list_ptr == hole_to_add) { 00217 // trying to add memory block that is already in the list! 00218 return -2; 00219 } 00220 } 00221 00222 // save region 00223 if (ns_dyn_mem_region_save(book, region_ptr, (region_size / (sizeof(ns_mem_word_size_t))) - 1) != 0) { 00224 return -3; 00225 } 00226 00227 // Add new hole to the list 00228 if (previous_hole) { 00229 ns_list_add_after(&book->holes_list, previous_hole, hole_to_add); 00230 } else { 00231 ns_list_add_to_start(&book->holes_list, hole_to_add); 00232 } 00233 00234 // adjust total heap size with new hole 00235 book->heap_size += region_size; 00236 00237 if (book->mem_stat_info_ptr) { 00238 book->mem_stat_info_ptr->heap_sector_size = book->heap_size; 00239 } 00240 00241 // adjust temporary allocation limits to match new heap 00242 book->temporary_alloc_heap_limit = book->heap_size / 100 * (100 - TEMPORARY_ALLOC_FREE_HEAP_THRESHOLD); 00243 00244 return 0; 00245 #else 00246 (void) book; 00247 (void) region_ptr; 00248 (void) region_size; 00249 00250 return -1; 00251 #endif 00252 } 00253 00254 const mem_stat_t *ns_mem_get_mem_stat(ns_mem_book_t *heap) 00255 { 00256 #ifndef STANDARD_MALLOC 00257 return heap->mem_stat_info_ptr; 00258 #else 00259 return NULL; 00260 #endif 00261 } 00262 00263 int ns_mem_set_temporary_alloc_free_heap_threshold(ns_mem_book_t *book, uint8_t free_heap_percentage, ns_mem_heap_size_t free_heap_amount) 00264 { 00265 #ifndef STANDARD_MALLOC 00266 ns_mem_heap_size_t heap_limit = 0; 00267 00268 if (!book || !book->mem_stat_info_ptr) { 00269 // no book or mem_stats 00270 return -1; 00271 } 00272 00273 if (free_heap_amount && free_heap_amount < book->heap_size / 2) { 00274 heap_limit = book->heap_size - free_heap_amount; 00275 } 00276 00277 if (!free_heap_amount && free_heap_percentage && free_heap_percentage < 50) { 00278 heap_limit = book->heap_size / 100 * (100 - free_heap_percentage); 00279 } 00280 00281 if (free_heap_amount == 0 && free_heap_percentage == 0) { 00282 // feature disabled, allow whole heap to be reserved by temporary allo 00283 heap_limit = book->heap_size; 00284 } 00285 00286 if (heap_limit == 0) { 00287 // illegal heap parameters 00288 return -2; 00289 } 00290 00291 book->temporary_alloc_heap_limit = heap_limit; 00292 00293 return 0; 00294 #else 00295 return -3; 00296 #endif 00297 } 00298 00299 extern int ns_dyn_mem_set_temporary_alloc_free_heap_threshold(uint8_t free_heap_percentage, ns_mem_heap_size_t free_heap_amount) 00300 { 00301 return ns_mem_set_temporary_alloc_free_heap_threshold(default_book, free_heap_percentage, free_heap_amount); 00302 } 00303 00304 #ifndef STANDARD_MALLOC 00305 static void dev_stat_update(mem_stat_t *mem_stat_info_ptr, mem_stat_update_t type, ns_mem_block_size_t size) 00306 { 00307 if (mem_stat_info_ptr) { 00308 switch (type) { 00309 case DEV_HEAP_ALLOC_OK: 00310 mem_stat_info_ptr->heap_sector_alloc_cnt++; 00311 mem_stat_info_ptr->heap_sector_allocated_bytes += size; 00312 if (mem_stat_info_ptr->heap_sector_allocated_bytes_max < mem_stat_info_ptr->heap_sector_allocated_bytes) { 00313 mem_stat_info_ptr->heap_sector_allocated_bytes_max = mem_stat_info_ptr->heap_sector_allocated_bytes; 00314 } 00315 mem_stat_info_ptr->heap_alloc_total_bytes += size; 00316 break; 00317 case DEV_HEAP_ALLOC_FAIL: 00318 mem_stat_info_ptr->heap_alloc_fail_cnt++; 00319 break; 00320 case DEV_HEAP_FREE: 00321 mem_stat_info_ptr->heap_sector_alloc_cnt--; 00322 mem_stat_info_ptr->heap_sector_allocated_bytes -= size; 00323 break; 00324 } 00325 } 00326 } 00327 00328 static ns_mem_word_size_t convert_allocation_size(ns_mem_book_t *book, ns_mem_block_size_t requested_bytes) 00329 { 00330 if (book->heap_main[0] == 0) { 00331 heap_failure(book, NS_DYN_MEM_HEAP_SECTOR_UNITIALIZED); 00332 } else if (requested_bytes < 1) { 00333 heap_failure(book, NS_DYN_MEM_ALLOCATE_SIZE_NOT_VALID); 00334 } else if (requested_bytes > (book->heap_size - 2 * sizeof(ns_mem_word_size_t))) { 00335 heap_failure(book, NS_DYN_MEM_ALLOCATE_SIZE_NOT_VALID); 00336 } 00337 return (requested_bytes + sizeof(ns_mem_word_size_t) - 1) / sizeof(ns_mem_word_size_t); 00338 } 00339 00340 // Checks that block length indicators are valid 00341 // Block has format: Size of data area [1 word] | data area [abs(size) words]| Size of data area [1 word] 00342 // If Size is negative it means area is unallocated 00343 static int8_t ns_mem_block_validate(ns_mem_word_size_t *block_start) 00344 { 00345 int8_t ret_val = -1; 00346 ns_mem_word_size_t *end = block_start; 00347 ns_mem_word_size_t size_start = *end; 00348 end += (1 + abs(size_start)); 00349 if (size_start != 0 && size_start == *end) { 00350 ret_val = 0; 00351 } 00352 return ret_val; 00353 } 00354 #endif 00355 00356 // For direction, use 1 for direction up and -1 for down 00357 static void *ns_mem_internal_alloc(ns_mem_book_t *book, const ns_mem_block_size_t alloc_size, int direction) 00358 { 00359 #ifndef STANDARD_MALLOC 00360 if (!book) { 00361 /* We can not do anything except return NULL because we can't find book 00362 keeping block */ 00363 return NULL; 00364 } 00365 00366 if (book->mem_stat_info_ptr && direction == 1) { 00367 if (book->mem_stat_info_ptr->heap_sector_allocated_bytes > book->temporary_alloc_heap_limit) { 00368 /* Not enough heap for temporary memory allocation */ 00369 dev_stat_update(book->mem_stat_info_ptr, DEV_HEAP_ALLOC_FAIL, 0); 00370 return NULL; 00371 } 00372 } 00373 00374 ns_mem_word_size_t *block_ptr = NULL; 00375 00376 platform_enter_critical(); 00377 00378 ns_mem_word_size_t data_size = convert_allocation_size(book, alloc_size); 00379 if (!data_size) { 00380 goto done; 00381 } 00382 00383 // ns_list_foreach, either forwards or backwards, result to ptr 00384 for (hole_t *cur_hole = direction > 0 ? ns_list_get_first(&book->holes_list) 00385 : ns_list_get_last(&book->holes_list); 00386 cur_hole; 00387 cur_hole = direction > 0 ? ns_list_get_next(&book->holes_list, cur_hole) 00388 : ns_list_get_previous(&book->holes_list, cur_hole) 00389 ) { 00390 ns_mem_word_size_t *p = block_start_from_hole(cur_hole); 00391 if (ns_mem_block_validate(p) != 0 || *p >= 0) { 00392 //Validation failed, or this supposed hole has positive (allocated) size 00393 heap_failure(book, NS_DYN_MEM_HEAP_SECTOR_CORRUPTED); 00394 break; 00395 } 00396 if (-*p >= data_size) { 00397 // Found a big enough block 00398 block_ptr = p; 00399 break; 00400 } 00401 } 00402 00403 if (!block_ptr) { 00404 goto done; 00405 } 00406 00407 // Separate declaration from initialization to keep IAR happy as the gotos skip this block. 00408 ns_mem_word_size_t block_data_size; 00409 block_data_size = -*block_ptr; 00410 if (block_data_size >= (data_size + 2 + HOLE_T_SIZE)) { 00411 ns_mem_word_size_t hole_size = block_data_size - data_size - 2; 00412 ns_mem_word_size_t *hole_ptr; 00413 //There is enough room for a new hole so create it first 00414 if (direction > 0) { 00415 hole_ptr = block_ptr + 1 + data_size + 1; 00416 // Hole will be left at end of area. 00417 // Would like to just replace this block_ptr with new descriptor, but 00418 // they could overlap, so ns_list_replace might fail 00419 //ns_list_replace(&holes_list, block_ptr, hole_from_block_start(hole_ptr)); 00420 hole_t *before = ns_list_get_previous(&book->holes_list, hole_from_block_start(block_ptr)); 00421 ns_list_remove(&book->holes_list, hole_from_block_start(block_ptr)); 00422 if (before) { 00423 ns_list_add_after(&book->holes_list, before, hole_from_block_start(hole_ptr)); 00424 } else { 00425 ns_list_add_to_start(&book->holes_list, hole_from_block_start(hole_ptr)); 00426 } 00427 } else { 00428 hole_ptr = block_ptr; 00429 // Hole remains at start of area - keep existing descriptor in place. 00430 block_ptr += 1 + hole_size + 1; 00431 } 00432 00433 hole_ptr[0] = -hole_size; 00434 hole_ptr[1 + hole_size] = -hole_size; 00435 } else { 00436 // Not enough room for a left-over hole, so use the whole block 00437 data_size = block_data_size; 00438 ns_list_remove(&book->holes_list, hole_from_block_start(block_ptr)); 00439 } 00440 block_ptr[0] = data_size; 00441 block_ptr[1 + data_size] = data_size; 00442 00443 done: 00444 if (book->mem_stat_info_ptr) { 00445 if (block_ptr) { 00446 //Update Allocate OK 00447 dev_stat_update(book->mem_stat_info_ptr, DEV_HEAP_ALLOC_OK, (data_size + 2) * sizeof(ns_mem_word_size_t)); 00448 } else { 00449 //Update Allocate Fail, second parameter is used for stats 00450 dev_stat_update(book->mem_stat_info_ptr, DEV_HEAP_ALLOC_FAIL, 0); 00451 } 00452 } 00453 platform_exit_critical(); 00454 00455 return block_ptr ? block_ptr + 1 : NULL; 00456 #else 00457 void *retval = NULL; 00458 if (alloc_size) { 00459 platform_enter_critical(); 00460 retval = malloc(alloc_size); 00461 platform_exit_critical(); 00462 } 00463 return retval; 00464 #endif 00465 } 00466 00467 void *ns_mem_alloc(ns_mem_book_t *heap, ns_mem_block_size_t alloc_size) 00468 { 00469 return ns_mem_internal_alloc(heap, alloc_size, -1); 00470 } 00471 00472 void *ns_mem_temporary_alloc(ns_mem_book_t *heap, ns_mem_block_size_t alloc_size) 00473 { 00474 return ns_mem_internal_alloc(heap, alloc_size, 1); 00475 } 00476 00477 void *ns_dyn_mem_alloc(ns_mem_block_size_t alloc_size) 00478 { 00479 return ns_mem_alloc(default_book, alloc_size); 00480 } 00481 00482 void *ns_dyn_mem_temporary_alloc(ns_mem_block_size_t alloc_size) 00483 { 00484 return ns_mem_temporary_alloc(default_book, alloc_size); 00485 } 00486 00487 #ifndef STANDARD_MALLOC 00488 static void ns_mem_free_and_merge_with_adjacent_blocks(ns_mem_book_t *book, ns_mem_word_size_t *cur_block, ns_mem_word_size_t data_size) 00489 { 00490 // Theory of operation: Block is always in form | Len | Data | Len | 00491 // So we need to check length of previous (if current not heap start) 00492 // and next (if current not heap end) blocks. Negative length means 00493 // free memory so we can merge freed block with those. 00494 00495 hole_t *existing_start = NULL; 00496 hole_t *existing_end = NULL; 00497 ns_mem_word_size_t *start = cur_block; 00498 ns_mem_word_size_t *end = cur_block + data_size + 1; 00499 ns_mem_word_size_t *region_start; 00500 ns_mem_word_size_t *region_end; 00501 00502 int region_index = ns_dyn_mem_region_find(book, cur_block, data_size); 00503 if (region_index >= 0) { 00504 region_start = book->heap_main[region_index]; 00505 region_end = book->heap_main_end[region_index]; 00506 } else { 00507 heap_failure(book, NS_DYN_MEM_HEAP_SECTOR_CORRUPTED); 00508 // can't find region for the block, return 00509 return; 00510 } 00511 00512 //invalidate current block 00513 *start = -data_size; 00514 *end = -data_size; 00515 ns_mem_word_size_t merged_data_size = data_size; 00516 00517 if (start != region_start) { 00518 if (*(start - 1) < 0) { 00519 ns_mem_word_size_t *block_end = start - 1; 00520 ns_mem_word_size_t block_size = 1 + (-*block_end) + 1; 00521 merged_data_size += block_size; 00522 start -= block_size; 00523 if (*start != *block_end) { 00524 heap_failure(book, NS_DYN_MEM_HEAP_SECTOR_CORRUPTED); 00525 } 00526 if (block_size >= 1 + HOLE_T_SIZE + 1) { 00527 existing_start = hole_from_block_start(start); 00528 } 00529 } 00530 } 00531 00532 if (end != region_end) { 00533 if (*(end + 1) < 0) { 00534 ns_mem_word_size_t *block_start = end + 1; 00535 ns_mem_word_size_t block_size = 1 + (-*block_start) + 1; 00536 merged_data_size += block_size; 00537 end += block_size; 00538 if (*end != *block_start) { 00539 heap_failure(book, NS_DYN_MEM_HEAP_SECTOR_CORRUPTED); 00540 } 00541 if (block_size >= 1 + HOLE_T_SIZE + 1) { 00542 existing_end = hole_from_block_start(block_start); 00543 } 00544 } 00545 } 00546 00547 hole_t *to_add = hole_from_block_start(start); 00548 hole_t *before = NULL; 00549 if (existing_end) { 00550 // Extending hole described by "existing_end" downwards. 00551 // Will replace with descriptor at bottom of merged block. 00552 // (Can't use ns_list_replace, because of danger of overlap) 00553 // Optimisation - note our position for insertion below. 00554 before = ns_list_get_next(&book->holes_list, existing_end); 00555 ns_list_remove(&book->holes_list, existing_end); 00556 } 00557 if (existing_start) { 00558 // Extending hole described by "existing_start" upwards. 00559 // No need to modify that descriptor - it remains at the bottom 00560 // of the merged block to describe it. 00561 } else { 00562 // Didn't find adjacent descriptors, but may still 00563 // be merging with small blocks without descriptors. 00564 if (merged_data_size >= HOLE_T_SIZE) { 00565 // Locate hole position in list, if we don't already know 00566 // from merging with the block above. 00567 if (!existing_end) { 00568 ns_list_foreach(hole_t, ptr, &book->holes_list) { 00569 if (ptr > to_add) { 00570 before = ptr; 00571 break; 00572 } 00573 } 00574 } 00575 if (before) { 00576 ns_list_add_before(&book->holes_list, before, to_add); 00577 } else { 00578 ns_list_add_to_end(&book->holes_list, to_add); 00579 } 00580 00581 } 00582 } 00583 *start = -merged_data_size; 00584 *end = -merged_data_size; 00585 } 00586 #endif 00587 00588 static bool pointer_address_validate(ns_mem_book_t *book, ns_mem_word_size_t *ptr, ns_mem_word_size_t size) 00589 { 00590 00591 if (ns_dyn_mem_region_find(book, ptr, size) >= 0) { 00592 return true; 00593 } 00594 00595 return false; 00596 } 00597 00598 void ns_mem_free(ns_mem_book_t *book, void *block) 00599 { 00600 #ifndef STANDARD_MALLOC 00601 00602 if (!block) { 00603 return; 00604 } 00605 00606 ns_mem_word_size_t *ptr = block; 00607 ns_mem_word_size_t size; 00608 00609 platform_enter_critical(); 00610 ptr --; 00611 //Read Current Size 00612 size = *ptr; 00613 if (!pointer_address_validate(book, ptr, size)) { 00614 heap_failure(book, NS_DYN_MEM_POINTER_NOT_VALID); 00615 } else if (size < 0) { 00616 heap_failure(book, NS_DYN_MEM_DOUBLE_FREE); 00617 } else { 00618 if (ns_mem_block_validate(ptr) != 0) { 00619 heap_failure(book, NS_DYN_MEM_HEAP_SECTOR_CORRUPTED); 00620 } else { 00621 ns_mem_free_and_merge_with_adjacent_blocks(book, ptr, size); 00622 if (book->mem_stat_info_ptr) { 00623 //Update Free Counter 00624 dev_stat_update(book->mem_stat_info_ptr, DEV_HEAP_FREE, (size + 2) * sizeof(ns_mem_word_size_t)); 00625 } 00626 } 00627 } 00628 00629 platform_exit_critical(); 00630 #else 00631 platform_enter_critical(); 00632 free(block); 00633 platform_exit_critical(); 00634 #endif 00635 } 00636 00637 void ns_dyn_mem_free(void *block) 00638 { 00639 ns_mem_free(default_book, block); 00640 }
Generated on Tue Jul 12 2022 13:54:39 by
1.7.2