Important changes to repositories hosted on mbed.com
Mbed hosted mercurial repositories are deprecated and are due to be permanently deleted in July 2026.
To keep a copy of this software download the repository Zip archive or clone locally using Mercurial.
It is also possible to export all your personal repositories from the account settings page.
Fork of F7_Ethernet by
mem.c
00001 /** 00002 * @file 00003 * Dynamic memory manager 00004 * 00005 * This is a lightweight replacement for the standard C library malloc(). 00006 * 00007 * If you want to use the standard C library malloc() instead, define 00008 * MEM_LIBC_MALLOC to 1 in your lwipopts.h 00009 * 00010 * To let mem_malloc() use pools (prevents fragmentation and is much faster than 00011 * a heap but might waste some memory), define MEM_USE_POOLS to 1, define 00012 * MEM_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list 00013 * of pools like this (more pools can be added between _START and _END): 00014 * 00015 * Define three pools with sizes 256, 512, and 1512 bytes 00016 * LWIP_MALLOC_MEMPOOL_START 00017 * LWIP_MALLOC_MEMPOOL(20, 256) 00018 * LWIP_MALLOC_MEMPOOL(10, 512) 00019 * LWIP_MALLOC_MEMPOOL(5, 1512) 00020 * LWIP_MALLOC_MEMPOOL_END 00021 */ 00022 00023 /* 00024 * Copyright (c) 2001-2004 Swedish Institute of Computer Science. 00025 * All rights reserved. 00026 * 00027 * Redistribution and use in source and binary forms, with or without modification, 00028 * are permitted provided that the following conditions are met: 00029 * 00030 * 1. Redistributions of source code must retain the above copyright notice, 00031 * this list of conditions and the following disclaimer. 00032 * 2. Redistributions in binary form must reproduce the above copyright notice, 00033 * this list of conditions and the following disclaimer in the documentation 00034 * and/or other materials provided with the distribution. 00035 * 3. The name of the author may not be used to endorse or promote products 00036 * derived from this software without specific prior written permission. 00037 * 00038 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 00039 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 00040 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 00041 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 00042 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 00043 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 00044 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 00045 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 00046 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY 00047 * OF SUCH DAMAGE. 00048 * 00049 * This file is part of the lwIP TCP/IP stack. 00050 * 00051 * Author: Adam Dunkels <adam@sics.se> 00052 * Simon Goldschmidt 00053 * 00054 */ 00055 00056 #include "lwip/opt.h" 00057 00058 #if !MEM_LIBC_MALLOC /* don't build if not configured for use in lwipopts.h */ 00059 00060 #include "lwip/def.h" 00061 #include "lwip/mem.h" 00062 #include "lwip/sys.h" 00063 #include "lwip/stats.h" 00064 #include "lwip/err.h" 00065 00066 #include <string.h> 00067 00068 #if MEM_USE_POOLS 00069 /* lwIP head implemented with different sized pools */ 00070 00071 /** 00072 * Allocate memory: determine the smallest pool that is big enough 00073 * to contain an element of 'size' and get an element from that pool. 00074 * 00075 * @param size the size in bytes of the memory needed 00076 * @return a pointer to the allocated memory or NULL if the pool is empty 00077 */ 00078 void * 00079 mem_malloc(mem_size_t size) 00080 { 00081 void *ret; 00082 struct memp_malloc_helper *element; 00083 memp_t poolnr; 00084 mem_size_t required_size = size + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)); 00085 00086 for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr = (memp_t)(poolnr + 1)) { 00087 #if MEM_USE_POOLS_TRY_BIGGER_POOL 00088 again: 00089 #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */ 00090 /* is this pool big enough to hold an element of the required size 00091 plus a struct memp_malloc_helper that saves the pool this element came from? */ 00092 if (required_size <= memp_sizes[poolnr]) { 00093 break; 00094 } 00095 } 00096 if (poolnr > MEMP_POOL_LAST) { 00097 LWIP_ASSERT("mem_malloc(): no pool is that big!", 0); 00098 return NULL; 00099 } 00100 element = (struct memp_malloc_helper*)memp_malloc(poolnr); 00101 if (element == NULL) { 00102 /* No need to DEBUGF or ASSERT: This error is already 00103 taken care of in memp.c */ 00104 #if MEM_USE_POOLS_TRY_BIGGER_POOL 00105 /** Try a bigger pool if this one is empty! */ 00106 if (poolnr < MEMP_POOL_LAST) { 00107 poolnr++; 00108 goto again; 00109 } 00110 #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */ 00111 return NULL; 00112 } 00113 00114 /* save the pool number this element came from */ 00115 element->poolnr = poolnr; 00116 /* and return a pointer to the memory directly after the struct memp_malloc_helper */ 00117 ret = (u8_t*)element + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)); 00118 00119 return ret; 00120 } 00121 00122 /** 00123 * Free memory previously allocated by mem_malloc. Loads the pool number 00124 * and calls memp_free with that pool number to put the element back into 00125 * its pool 00126 * 00127 * @param rmem the memory element to free 00128 */ 00129 void 00130 mem_free(void *rmem) 00131 { 00132 struct memp_malloc_helper *hmem; 00133 00134 LWIP_ASSERT("rmem != NULL", (rmem != NULL)); 00135 LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem))); 00136 00137 /* get the original struct memp_malloc_helper */ 00138 hmem = (struct memp_malloc_helper*)(void*)((u8_t*)rmem - LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper))); 00139 00140 LWIP_ASSERT("hmem != NULL", (hmem != NULL)); 00141 LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem))); 00142 LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX)); 00143 00144 /* and put it in the pool we saved earlier */ 00145 memp_free(hmem->poolnr, hmem); 00146 } 00147 00148 #else /* MEM_USE_POOLS */ 00149 /* lwIP replacement for your libc malloc() */ 00150 00151 /** 00152 * The heap is made up as a list of structs of this type. 00153 * This does not have to be aligned since for getting its size, 00154 * we only use the macro SIZEOF_STRUCT_MEM, which automatically alignes. 00155 */ 00156 struct mem { 00157 /** index (-> ram[next]) of the next struct */ 00158 mem_size_t next; 00159 /** index (-> ram[prev]) of the previous struct */ 00160 mem_size_t prev; 00161 /** 1: this area is used; 0: this area is unused */ 00162 u8_t used; 00163 }; 00164 00165 /** All allocated blocks will be MIN_SIZE bytes big, at least! 00166 * MIN_SIZE can be overridden to suit your needs. Smaller values save space, 00167 * larger values could prevent too small blocks to fragment the RAM too much. */ 00168 #ifndef MIN_SIZE 00169 #define MIN_SIZE 12 00170 #endif /* MIN_SIZE */ 00171 /* some alignment macros: we define them here for better source code layout */ 00172 #define MIN_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MIN_SIZE) 00173 #define SIZEOF_STRUCT_MEM LWIP_MEM_ALIGN_SIZE(sizeof(struct mem)) 00174 #define MEM_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SIZE) 00175 00176 /** If you want to relocate the heap to external memory, simply define 00177 * LWIP_RAM_HEAP_POINTER as a void-pointer to that location. 00178 * If so, make sure the memory at that location is big enough (see below on 00179 * how that space is calculated). */ 00180 #ifndef LWIP_RAM_HEAP_POINTER 00181 /** the heap. we need one struct mem at the end and some room for alignment */ 00182 u8_t ram_heap[MEM_SIZE_ALIGNED + (2*SIZEOF_STRUCT_MEM) + MEM_ALIGNMENT]; 00183 #define LWIP_RAM_HEAP_POINTER ram_heap 00184 #endif /* LWIP_RAM_HEAP_POINTER */ 00185 00186 /** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */ 00187 static u8_t *ram; 00188 /** the last entry, always unused! */ 00189 static struct mem *ram_end; 00190 /** pointer to the lowest free block, this is used for faster search */ 00191 static struct mem *lfree; 00192 00193 /** concurrent access protection */ 00194 #if !NO_SYS 00195 static sys_mutex_t mem_mutex; 00196 #endif 00197 00198 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT 00199 00200 static volatile u8_t mem_free_count; 00201 00202 /* Allow mem_free from other (e.g. interrupt) context */ 00203 #define LWIP_MEM_FREE_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_free) 00204 #define LWIP_MEM_FREE_PROTECT() SYS_ARCH_PROTECT(lev_free) 00205 #define LWIP_MEM_FREE_UNPROTECT() SYS_ARCH_UNPROTECT(lev_free) 00206 #define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc) 00207 #define LWIP_MEM_ALLOC_PROTECT() SYS_ARCH_PROTECT(lev_alloc) 00208 #define LWIP_MEM_ALLOC_UNPROTECT() SYS_ARCH_UNPROTECT(lev_alloc) 00209 00210 #else /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ 00211 00212 /* Protect the heap only by using a semaphore */ 00213 #define LWIP_MEM_FREE_DECL_PROTECT() 00214 #define LWIP_MEM_FREE_PROTECT() sys_mutex_lock(&mem_mutex) 00215 #define LWIP_MEM_FREE_UNPROTECT() sys_mutex_unlock(&mem_mutex) 00216 /* mem_malloc is protected using semaphore AND LWIP_MEM_ALLOC_PROTECT */ 00217 #define LWIP_MEM_ALLOC_DECL_PROTECT() 00218 #define LWIP_MEM_ALLOC_PROTECT() 00219 #define LWIP_MEM_ALLOC_UNPROTECT() 00220 00221 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ 00222 00223 00224 /** 00225 * "Plug holes" by combining adjacent empty struct mems. 00226 * After this function is through, there should not exist 00227 * one empty struct mem pointing to another empty struct mem. 00228 * 00229 * @param mem this points to a struct mem which just has been freed 00230 * @internal this function is only called by mem_free() and mem_trim() 00231 * 00232 * This assumes access to the heap is protected by the calling function 00233 * already. 00234 */ 00235 static void 00236 plug_holes(struct mem *mem) 00237 { 00238 struct mem *nmem; 00239 struct mem *pmem; 00240 00241 LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram); 00242 LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end); 00243 LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0); 00244 00245 /* plug hole forward */ 00246 LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED); 00247 00248 nmem = (struct mem *)(void *)&ram[mem->next]; 00249 if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) { 00250 /* if mem->next is unused and not end of ram, combine mem and mem->next */ 00251 if (lfree == nmem) { 00252 lfree = mem; 00253 } 00254 mem->next = nmem->next; 00255 ((struct mem *)(void *)&ram[nmem->next])->prev = (mem_size_t)((u8_t *)mem - ram); 00256 } 00257 00258 /* plug hole backward */ 00259 pmem = (struct mem *)(void *)&ram[mem->prev]; 00260 if (pmem != mem && pmem->used == 0) { 00261 /* if mem->prev is unused, combine mem and mem->prev */ 00262 if (lfree == mem) { 00263 lfree = pmem; 00264 } 00265 pmem->next = mem->next; 00266 ((struct mem *)(void *)&ram[mem->next])->prev = (mem_size_t)((u8_t *)pmem - ram); 00267 } 00268 } 00269 00270 /** 00271 * Zero the heap and initialize start, end and lowest-free 00272 */ 00273 void 00274 mem_init(void) 00275 { 00276 struct mem *mem; 00277 00278 LWIP_ASSERT("Sanity check alignment", 00279 (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT-1)) == 0); 00280 00281 /* align the heap */ 00282 ram = (u8_t *)LWIP_MEM_ALIGN(LWIP_RAM_HEAP_POINTER); 00283 /* initialize the start of the heap */ 00284 mem = (struct mem *)(void *)ram; 00285 mem->next = MEM_SIZE_ALIGNED; 00286 mem->prev = 0; 00287 mem->used = 0; 00288 /* initialize the end of the heap */ 00289 ram_end = (struct mem *)(void *)&ram[MEM_SIZE_ALIGNED]; 00290 ram_end->used = 1; 00291 ram_end->next = MEM_SIZE_ALIGNED; 00292 ram_end->prev = MEM_SIZE_ALIGNED; 00293 00294 /* initialize the lowest-free pointer to the start of the heap */ 00295 lfree = (struct mem *)(void *)ram; 00296 00297 MEM_STATS_AVAIL(avail, MEM_SIZE_ALIGNED); 00298 00299 if(sys_mutex_new(&mem_mutex) != ERR_OK) { 00300 LWIP_ASSERT("failed to create mem_mutex", 0); 00301 } 00302 } 00303 00304 /** 00305 * Put a struct mem back on the heap 00306 * 00307 * @param rmem is the data portion of a struct mem as returned by a previous 00308 * call to mem_malloc() 00309 */ 00310 void 00311 mem_free(void *rmem) 00312 { 00313 struct mem *mem; 00314 LWIP_MEM_FREE_DECL_PROTECT(); 00315 00316 if (rmem == NULL) { 00317 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("mem_free(p == NULL) was called.\n")); 00318 return; 00319 } 00320 LWIP_ASSERT("mem_free: sanity check alignment", (((mem_ptr_t)rmem) & (MEM_ALIGNMENT-1)) == 0); 00321 00322 LWIP_ASSERT("mem_free: legal memory", (u8_t *)rmem >= (u8_t *)ram && 00323 (u8_t *)rmem < (u8_t *)ram_end); 00324 00325 if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) { 00326 SYS_ARCH_DECL_PROTECT(lev); 00327 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory\n")); 00328 /* protect mem stats from concurrent access */ 00329 SYS_ARCH_PROTECT(lev); 00330 MEM_STATS_INC(illegal); 00331 SYS_ARCH_UNPROTECT(lev); 00332 return; 00333 } 00334 /* protect the heap from concurrent access */ 00335 LWIP_MEM_FREE_PROTECT(); 00336 /* Get the corresponding struct mem ... */ 00337 mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM); 00338 /* ... which has to be in a used state ... */ 00339 LWIP_ASSERT("mem_free: mem->used", mem->used); 00340 /* ... and is now unused. */ 00341 mem->used = 0; 00342 00343 if (mem < lfree) { 00344 /* the newly freed struct is now the lowest */ 00345 lfree = mem; 00346 } 00347 00348 MEM_STATS_DEC_USED(used, mem->next - (mem_size_t)(((u8_t *)mem - ram))); 00349 00350 /* finally, see if prev or next are free also */ 00351 plug_holes(mem); 00352 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT 00353 mem_free_count = 1; 00354 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ 00355 LWIP_MEM_FREE_UNPROTECT(); 00356 } 00357 00358 /** 00359 * Shrink memory returned by mem_malloc(). 00360 * 00361 * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked 00362 * @param newsize required size after shrinking (needs to be smaller than or 00363 * equal to the previous size) 00364 * @return for compatibility reasons: is always == rmem, at the moment 00365 * or NULL if newsize is > old size, in which case rmem is NOT touched 00366 * or freed! 00367 */ 00368 void * 00369 mem_trim(void *rmem, mem_size_t newsize) 00370 { 00371 mem_size_t size; 00372 mem_size_t ptr, ptr2; 00373 struct mem *mem, *mem2; 00374 /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */ 00375 LWIP_MEM_FREE_DECL_PROTECT(); 00376 00377 /* Expand the size of the allocated memory region so that we can 00378 adjust for alignment. */ 00379 newsize = LWIP_MEM_ALIGN_SIZE(newsize); 00380 00381 if(newsize < MIN_SIZE_ALIGNED) { 00382 /* every data block must be at least MIN_SIZE_ALIGNED long */ 00383 newsize = MIN_SIZE_ALIGNED; 00384 } 00385 00386 if (newsize > MEM_SIZE_ALIGNED) { 00387 return NULL; 00388 } 00389 00390 LWIP_ASSERT("mem_trim: legal memory", (u8_t *)rmem >= (u8_t *)ram && 00391 (u8_t *)rmem < (u8_t *)ram_end); 00392 00393 if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) { 00394 SYS_ARCH_DECL_PROTECT(lev); 00395 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_trim: illegal memory\n")); 00396 /* protect mem stats from concurrent access */ 00397 SYS_ARCH_PROTECT(lev); 00398 MEM_STATS_INC(illegal); 00399 SYS_ARCH_UNPROTECT(lev); 00400 return rmem; 00401 } 00402 /* Get the corresponding struct mem ... */ 00403 mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM); 00404 /* ... and its offset pointer */ 00405 ptr = (mem_size_t)((u8_t *)mem - ram); 00406 00407 size = mem->next - ptr - SIZEOF_STRUCT_MEM; 00408 LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size); 00409 if (newsize > size) { 00410 /* not supported */ 00411 return NULL; 00412 } 00413 if (newsize == size) { 00414 /* No change in size, simply return */ 00415 return rmem; 00416 } 00417 00418 /* protect the heap from concurrent access */ 00419 LWIP_MEM_FREE_PROTECT(); 00420 00421 mem2 = (struct mem *)(void *)&ram[mem->next]; 00422 if(mem2->used == 0) { 00423 /* The next struct is unused, we can simply move it at little */ 00424 mem_size_t next; 00425 /* remember the old next pointer */ 00426 next = mem2->next; 00427 /* create new struct mem which is moved directly after the shrinked mem */ 00428 ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize; 00429 if (lfree == mem2) { 00430 lfree = (struct mem *)(void *)&ram[ptr2]; 00431 } 00432 mem2 = (struct mem *)(void *)&ram[ptr2]; 00433 mem2->used = 0; 00434 /* restore the next pointer */ 00435 mem2->next = next; 00436 /* link it back to mem */ 00437 mem2->prev = ptr; 00438 /* link mem to it */ 00439 mem->next = ptr2; 00440 /* last thing to restore linked list: as we have moved mem2, 00441 * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not 00442 * the end of the heap */ 00443 if (mem2->next != MEM_SIZE_ALIGNED) { 00444 ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2; 00445 } 00446 MEM_STATS_DEC_USED(used, (size - newsize)); 00447 /* no need to plug holes, we've already done that */ 00448 } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) { 00449 /* Next struct is used but there's room for another struct mem with 00450 * at least MIN_SIZE_ALIGNED of data. 00451 * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem 00452 * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED'). 00453 * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty 00454 * region that couldn't hold data, but when mem->next gets freed, 00455 * the 2 regions would be combined, resulting in more free memory */ 00456 ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize; 00457 mem2 = (struct mem *)(void *)&ram[ptr2]; 00458 if (mem2 < lfree) { 00459 lfree = mem2; 00460 } 00461 mem2->used = 0; 00462 mem2->next = mem->next; 00463 mem2->prev = ptr; 00464 mem->next = ptr2; 00465 if (mem2->next != MEM_SIZE_ALIGNED) { 00466 ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2; 00467 } 00468 MEM_STATS_DEC_USED(used, (size - newsize)); 00469 /* the original mem->next is used, so no need to plug holes! */ 00470 } 00471 /* else { 00472 next struct mem is used but size between mem and mem2 is not big enough 00473 to create another struct mem 00474 -> don't do anyhting. 00475 -> the remaining space stays unused since it is too small 00476 } */ 00477 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT 00478 mem_free_count = 1; 00479 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ 00480 LWIP_MEM_FREE_UNPROTECT(); 00481 return rmem; 00482 } 00483 00484 /** 00485 * Adam's mem_malloc() plus solution for bug #17922 00486 * Allocate a block of memory with a minimum of 'size' bytes. 00487 * 00488 * @param size is the minimum size of the requested block in bytes. 00489 * @return pointer to allocated memory or NULL if no free memory was found. 00490 * 00491 * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT). 00492 */ 00493 void * 00494 mem_malloc(mem_size_t size) 00495 { 00496 mem_size_t ptr, ptr2; 00497 struct mem *mem, *mem2; 00498 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT 00499 u8_t local_mem_free_count = 0; 00500 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ 00501 LWIP_MEM_ALLOC_DECL_PROTECT(); 00502 00503 if (size == 0) { 00504 return NULL; 00505 } 00506 00507 /* Expand the size of the allocated memory region so that we can 00508 adjust for alignment. */ 00509 size = LWIP_MEM_ALIGN_SIZE(size); 00510 00511 if(size < MIN_SIZE_ALIGNED) { 00512 /* every data block must be at least MIN_SIZE_ALIGNED long */ 00513 size = MIN_SIZE_ALIGNED; 00514 } 00515 00516 if (size > MEM_SIZE_ALIGNED) { 00517 return NULL; 00518 } 00519 00520 /* protect the heap from concurrent access */ 00521 sys_mutex_lock(&mem_mutex); 00522 LWIP_MEM_ALLOC_PROTECT(); 00523 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT 00524 /* run as long as a mem_free disturbed mem_malloc or mem_trim */ 00525 do { 00526 local_mem_free_count = 0; 00527 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ 00528 00529 /* Scan through the heap searching for a free block that is big enough, 00530 * beginning with the lowest free block. 00531 */ 00532 for (ptr = (mem_size_t)((u8_t *)lfree - ram); ptr < MEM_SIZE_ALIGNED - size; 00533 ptr = ((struct mem *)(void *)&ram[ptr])->next) { 00534 mem = (struct mem *)(void *)&ram[ptr]; 00535 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT 00536 mem_free_count = 0; 00537 LWIP_MEM_ALLOC_UNPROTECT(); 00538 /* allow mem_free or mem_trim to run */ 00539 LWIP_MEM_ALLOC_PROTECT(); 00540 if (mem_free_count != 0) { 00541 /* If mem_free or mem_trim have run, we have to restart since they 00542 could have altered our current struct mem. */ 00543 local_mem_free_count = 1; 00544 break; 00545 } 00546 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ 00547 00548 if ((!mem->used) && 00549 (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) { 00550 /* mem is not used and at least perfect fit is possible: 00551 * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */ 00552 00553 if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) { 00554 /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing 00555 * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem') 00556 * -> split large block, create empty remainder, 00557 * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if 00558 * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size, 00559 * struct mem would fit in but no data between mem2 and mem2->next 00560 * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty 00561 * region that couldn't hold data, but when mem->next gets freed, 00562 * the 2 regions would be combined, resulting in more free memory 00563 */ 00564 ptr2 = ptr + SIZEOF_STRUCT_MEM + size; 00565 /* create mem2 struct */ 00566 mem2 = (struct mem *)(void *)&ram[ptr2]; 00567 mem2->used = 0; 00568 mem2->next = mem->next; 00569 mem2->prev = ptr; 00570 /* and insert it between mem and mem->next */ 00571 mem->next = ptr2; 00572 mem->used = 1; 00573 00574 if (mem2->next != MEM_SIZE_ALIGNED) { 00575 ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2; 00576 } 00577 MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM)); 00578 } else { 00579 /* (a mem2 struct does no fit into the user data space of mem and mem->next will always 00580 * be used at this point: if not we have 2 unused structs in a row, plug_holes should have 00581 * take care of this). 00582 * -> near fit or excact fit: do not split, no mem2 creation 00583 * also can't move mem->next directly behind mem, since mem->next 00584 * will always be used at this point! 00585 */ 00586 mem->used = 1; 00587 MEM_STATS_INC_USED(used, mem->next - (mem_size_t)((u8_t *)mem - ram)); 00588 } 00589 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT 00590 mem_malloc_adjust_lfree: 00591 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ 00592 if (mem == lfree) { 00593 struct mem *cur = lfree; 00594 /* Find next free block after mem and update lowest free pointer */ 00595 while (cur->used && cur != ram_end) { 00596 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT 00597 mem_free_count = 0; 00598 LWIP_MEM_ALLOC_UNPROTECT(); 00599 /* prevent high interrupt latency... */ 00600 LWIP_MEM_ALLOC_PROTECT(); 00601 if (mem_free_count != 0) { 00602 /* If mem_free or mem_trim have run, we have to restart since they 00603 could have altered our current struct mem or lfree. */ 00604 goto mem_malloc_adjust_lfree; 00605 } 00606 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ 00607 cur = (struct mem *)(void *)&ram[cur->next]; 00608 } 00609 lfree = cur; 00610 LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used))); 00611 } 00612 LWIP_MEM_ALLOC_UNPROTECT(); 00613 sys_mutex_unlock(&mem_mutex); 00614 LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.", 00615 (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end); 00616 LWIP_ASSERT("mem_malloc: allocated memory properly aligned.", 00617 ((mem_ptr_t)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0); 00618 LWIP_ASSERT("mem_malloc: sanity check alignment", 00619 (((mem_ptr_t)mem) & (MEM_ALIGNMENT-1)) == 0); 00620 00621 return (u8_t *)mem + SIZEOF_STRUCT_MEM; 00622 } 00623 } 00624 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT 00625 /* if we got interrupted by a mem_free, try again */ 00626 } while(local_mem_free_count != 0); 00627 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ 00628 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size)); 00629 MEM_STATS_INC(err); 00630 LWIP_MEM_ALLOC_UNPROTECT(); 00631 sys_mutex_unlock(&mem_mutex); 00632 return NULL; 00633 } 00634 00635 #endif /* MEM_USE_POOLS */ 00636 /** 00637 * Contiguously allocates enough space for count objects that are size bytes 00638 * of memory each and returns a pointer to the allocated memory. 00639 * 00640 * The allocated memory is filled with bytes of value zero. 00641 * 00642 * @param count number of objects to allocate 00643 * @param size size of the objects to allocate 00644 * @return pointer to allocated memory / NULL pointer if there is an error 00645 */ 00646 void *mem_calloc(mem_size_t count, mem_size_t size) 00647 { 00648 void *p; 00649 00650 /* allocate 'count' objects of size 'size' */ 00651 p = mem_malloc(count * size); 00652 if (p) { 00653 /* zero the memory */ 00654 memset(p, 0, count * size); 00655 } 00656 return p; 00657 } 00658 00659 #endif /* !MEM_LIBC_MALLOC */
Generated on Wed Jul 13 2022 02:45:41 by
1.7.2
