A version of LWIP, provided for backwards compatibility.
Dependents: AA_DemoBoard DemoBoard HelloServerDemo DemoBoard_RangeIndicator ... more
mem.c
00001 /** 00002 * @file 00003 * Dynamic memory manager 00004 * 00005 * This is a lightweight replacement for the standard C library malloc(). 00006 * 00007 * If you want to use the standard C library malloc() instead, define 00008 * MEM_LIBC_MALLOC to 1 in your lwipopts.h 00009 * 00010 * To let mem_malloc() use pools (prevents fragmentation and is much faster than 00011 * a heap but might waste some memory), define MEM_USE_POOLS to 1, define 00012 * MEM_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list 00013 * of pools like this (more pools can be added between _START and _END): 00014 * 00015 * Define three pools with sizes 256, 512, and 1512 bytes 00016 * LWIP_MALLOC_MEMPOOL_START 00017 * LWIP_MALLOC_MEMPOOL(20, 256) 00018 * LWIP_MALLOC_MEMPOOL(10, 512) 00019 * LWIP_MALLOC_MEMPOOL(5, 1512) 00020 * LWIP_MALLOC_MEMPOOL_END 00021 */ 00022 00023 /* 00024 * Copyright (c) 2001-2004 Swedish Institute of Computer Science. 00025 * All rights reserved. 00026 * 00027 * Redistribution and use in source and binary forms, with or without modification, 00028 * are permitted provided that the following conditions are met: 00029 * 00030 * 1. Redistributions of source code must retain the above copyright notice, 00031 * this list of conditions and the following disclaimer. 00032 * 2. Redistributions in binary form must reproduce the above copyright notice, 00033 * this list of conditions and the following disclaimer in the documentation 00034 * and/or other materials provided with the distribution. 00035 * 3. The name of the author may not be used to endorse or promote products 00036 * derived from this software without specific prior written permission. 00037 * 00038 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 00039 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 00040 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 00041 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 00042 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 00043 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 00044 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 00045 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 00046 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY 00047 * OF SUCH DAMAGE. 00048 * 00049 * This file is part of the lwIP TCP/IP stack. 00050 * 00051 * Author: Adam Dunkels <adam@sics.se> 00052 * Simon Goldschmidt 00053 * 00054 */ 00055 00056 #include "lwip/opt.h" 00057 00058 #if !MEM_LIBC_MALLOC /* don't build if not configured for use in lwipopts.h */ 00059 00060 #include "lwip/def.h" 00061 #include "lwip/mem.h" 00062 #include "lwip/sys.h" 00063 #include "lwip/stats.h" 00064 00065 #include <string.h> 00066 00067 #if MEM_USE_POOLS 00068 /* lwIP head implemented with different sized pools */ 00069 00070 /** 00071 * This structure is used to save the pool one element came from. 00072 */ 00073 struct mem_helper 00074 { 00075 memp_t poolnr; 00076 }; 00077 00078 /** 00079 * Allocate memory: determine the smallest pool that is big enough 00080 * to contain an element of 'size' and get an element from that pool. 00081 * 00082 * @param size the size in bytes of the memory needed 00083 * @return a pointer to the allocated memory or NULL if the pool is empty 00084 */ 00085 void * 00086 mem_malloc(mem_size_t size) 00087 { 00088 struct mem_helper *element; 00089 memp_t poolnr; 00090 00091 for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr++) { 00092 /* is this pool big enough to hold an element of the required size 00093 plus a struct mem_helper that saves the pool this element came from? */ 00094 if ((size + sizeof(struct mem_helper)) <= memp_sizes[poolnr]) { 00095 break; 00096 } 00097 } 00098 if (poolnr > MEMP_POOL_LAST) { 00099 LWIP_ASSERT("mem_malloc(): no pool is that big!", 0); 00100 return NULL; 00101 } 00102 element = (struct mem_helper*)memp_malloc(poolnr); 00103 if (element == NULL) { 00104 /* No need to DEBUGF or ASSERT: This error is already 00105 taken care of in memp.c */ 00106 /** @todo: we could try a bigger pool if this one is empty! */ 00107 return NULL; 00108 } 00109 00110 /* save the pool number this element came from */ 00111 element->poolnr = poolnr; 00112 /* and return a pointer to the memory directly after the struct mem_helper */ 00113 element++; 00114 00115 return element; 00116 } 00117 00118 /** 00119 * Free memory previously allocated by mem_malloc. Loads the pool number 00120 * and calls memp_free with that pool number to put the element back into 00121 * its pool 00122 * 00123 * @param rmem the memory element to free 00124 */ 00125 void 00126 mem_free(void *rmem) 00127 { 00128 struct mem_helper *hmem = (struct mem_helper*)rmem; 00129 00130 LWIP_ASSERT("rmem != NULL", (rmem != NULL)); 00131 LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem))); 00132 00133 /* get the original struct mem_helper */ 00134 hmem--; 00135 00136 LWIP_ASSERT("hmem != NULL", (hmem != NULL)); 00137 LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem))); 00138 LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX)); 00139 00140 /* and put it in the pool we saved earlier */ 00141 memp_free(hmem->poolnr, hmem); 00142 } 00143 00144 #else /* MEM_USE_POOLS */ 00145 /* lwIP replacement for your libc malloc() */ 00146 00147 /** 00148 * The heap is made up as a list of structs of this type. 00149 * This does not have to be aligned since for getting its size, 00150 * we only use the macro SIZEOF_STRUCT_MEM, which automatically alignes. 00151 */ 00152 struct mem { 00153 /** index (-> ram[next]) of the next struct */ 00154 mem_size_t next; 00155 /** index (-> ram[next]) of the next struct */ 00156 mem_size_t prev; 00157 /** 1: this area is used; 0: this area is unused */ 00158 u8_t used; 00159 }; 00160 00161 /** All allocated blocks will be MIN_SIZE bytes big, at least! 00162 * MIN_SIZE can be overridden to suit your needs. Smaller values save space, 00163 * larger values could prevent too small blocks to fragment the RAM too much. */ 00164 #ifndef MIN_SIZE 00165 #define MIN_SIZE 12 00166 #endif /* MIN_SIZE */ 00167 /* some alignment macros: we define them here for better source code layout */ 00168 #define MIN_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MIN_SIZE) 00169 #define SIZEOF_STRUCT_MEM LWIP_MEM_ALIGN_SIZE(sizeof(struct mem)) 00170 #define MEM_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SIZE) 00171 00172 /** the heap. we need one struct mem at the end and some room for alignment */ 00173 static u8_t ram_heap[MEM_SIZE_ALIGNED + (2*SIZEOF_STRUCT_MEM) + MEM_ALIGNMENT]; 00174 /** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */ 00175 static u8_t *ram; 00176 /** the last entry, always unused! */ 00177 static struct mem *ram_end; 00178 /** pointer to the lowest free block, this is used for faster search */ 00179 static struct mem *lfree; 00180 /** concurrent access protection */ 00181 static sys_sem_t mem_sem; 00182 00183 /** 00184 * "Plug holes" by combining adjacent empty struct mems. 00185 * After this function is through, there should not exist 00186 * one empty struct mem pointing to another empty struct mem. 00187 * 00188 * @param mem this points to a struct mem which just has been freed 00189 * @internal this function is only called by mem_free() and mem_realloc() 00190 * 00191 * This assumes access to the heap is protected by the calling function 00192 * already. 00193 */ 00194 static void 00195 plug_holes(struct mem *mem) 00196 { 00197 struct mem *nmem; 00198 struct mem *pmem; 00199 00200 LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram); 00201 LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end); 00202 LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0); 00203 00204 /* plug hole forward */ 00205 LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED); 00206 00207 nmem = (struct mem *)&ram[mem->next]; 00208 if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) { 00209 /* if mem->next is unused and not end of ram, combine mem and mem->next */ 00210 if (lfree == nmem) { 00211 lfree = mem; 00212 } 00213 mem->next = nmem->next; 00214 ((struct mem *)&ram[nmem->next])->prev = (u8_t *)mem - ram; 00215 } 00216 00217 /* plug hole backward */ 00218 pmem = (struct mem *)&ram[mem->prev]; 00219 if (pmem != mem && pmem->used == 0) { 00220 /* if mem->prev is unused, combine mem and mem->prev */ 00221 if (lfree == mem) { 00222 lfree = pmem; 00223 } 00224 pmem->next = mem->next; 00225 ((struct mem *)&ram[mem->next])->prev = (u8_t *)pmem - ram; 00226 } 00227 } 00228 00229 /** 00230 * Zero the heap and initialize start, end and lowest-free 00231 */ 00232 void 00233 mem_init(void) 00234 { 00235 struct mem *mem; 00236 00237 LWIP_ASSERT("Sanity check alignment", 00238 (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT-1)) == 0); 00239 00240 /* align the heap */ 00241 ram = LWIP_MEM_ALIGN(ram_heap); 00242 /* initialize the start of the heap */ 00243 mem = (struct mem *)ram; 00244 mem->next = MEM_SIZE_ALIGNED; 00245 mem->prev = 0; 00246 mem->used = 0; 00247 /* initialize the end of the heap */ 00248 ram_end = (struct mem *)&ram[MEM_SIZE_ALIGNED]; 00249 ram_end->used = 1; 00250 ram_end->next = MEM_SIZE_ALIGNED; 00251 ram_end->prev = MEM_SIZE_ALIGNED; 00252 00253 mem_sem = sys_sem_new(1); 00254 00255 /* initialize the lowest-free pointer to the start of the heap */ 00256 lfree = (struct mem *)ram; 00257 00258 #if MEM_STATS 00259 lwip_stats.mem.avail = MEM_SIZE_ALIGNED; 00260 #endif /* MEM_STATS */ 00261 } 00262 00263 /** 00264 * Put a struct mem back on the heap 00265 * 00266 * @param rmem is the data portion of a struct mem as returned by a previous 00267 * call to mem_malloc() 00268 */ 00269 void 00270 mem_free(void *rmem) 00271 { 00272 struct mem *mem; 00273 00274 if (rmem == NULL) { 00275 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | 2, ("mem_free(p == NULL) was called.\n")); 00276 return; 00277 } 00278 LWIP_ASSERT("mem_free: sanity check alignment", (((mem_ptr_t)rmem) & (MEM_ALIGNMENT-1)) == 0); 00279 00280 /* protect the heap from concurrent access */ 00281 sys_arch_sem_wait(mem_sem, 0); 00282 00283 LWIP_ASSERT("mem_free: legal memory", (u8_t *)rmem >= (u8_t *)ram && 00284 (u8_t *)rmem < (u8_t *)ram_end); 00285 00286 if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) { 00287 LWIP_DEBUGF(MEM_DEBUG | 3, ("mem_free: illegal memory\n")); 00288 #if MEM_STATS 00289 ++lwip_stats.mem.err; 00290 #endif /* MEM_STATS */ 00291 sys_sem_signal(mem_sem); 00292 return; 00293 } 00294 /* Get the corresponding struct mem ... */ 00295 mem = (struct mem *)((u8_t *)rmem - SIZEOF_STRUCT_MEM); 00296 /* ... which has to be in a used state ... */ 00297 LWIP_ASSERT("mem_free: mem->used", mem->used); 00298 /* ... and is now unused. */ 00299 mem->used = 0; 00300 00301 if (mem < lfree) { 00302 /* the newly freed struct is now the lowest */ 00303 lfree = mem; 00304 } 00305 00306 #if MEM_STATS 00307 lwip_stats.mem.used -= mem->next - ((u8_t *)mem - ram); 00308 #endif /* MEM_STATS */ 00309 00310 /* finally, see if prev or next are free also */ 00311 plug_holes(mem); 00312 sys_sem_signal(mem_sem); 00313 } 00314 00315 /** 00316 * In contrast to its name, mem_realloc can only shrink memory, not expand it. 00317 * Since the only use (for now) is in pbuf_realloc (which also can only shrink), 00318 * this shouldn't be a problem! 00319 * 00320 * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked 00321 * @param newsize required size after shrinking (needs to be smaller than or 00322 * equal to the previous size) 00323 * @return for compatibility reasons: is always == rmem, at the moment 00324 */ 00325 void * 00326 mem_realloc(void *rmem, mem_size_t newsize) 00327 { 00328 mem_size_t size; 00329 mem_size_t ptr, ptr2; 00330 struct mem *mem, *mem2; 00331 00332 /* Expand the size of the allocated memory region so that we can 00333 adjust for alignment. */ 00334 newsize = LWIP_MEM_ALIGN_SIZE(newsize); 00335 00336 if(newsize < MIN_SIZE_ALIGNED) { 00337 /* every data block must be at least MIN_SIZE_ALIGNED long */ 00338 newsize = MIN_SIZE_ALIGNED; 00339 } 00340 00341 if (newsize > MEM_SIZE_ALIGNED) { 00342 return NULL; 00343 } 00344 00345 LWIP_ASSERT("mem_realloc: legal memory", (u8_t *)rmem >= (u8_t *)ram && 00346 (u8_t *)rmem < (u8_t *)ram_end); 00347 00348 if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) { 00349 LWIP_DEBUGF(MEM_DEBUG | 3, ("mem_realloc: illegal memory\n")); 00350 return rmem; 00351 } 00352 /* Get the corresponding struct mem ... */ 00353 mem = (struct mem *)((u8_t *)rmem - SIZEOF_STRUCT_MEM); 00354 /* ... and its offset pointer */ 00355 ptr = (u8_t *)mem - ram; 00356 00357 size = mem->next - ptr - SIZEOF_STRUCT_MEM; 00358 LWIP_ASSERT("mem_realloc can only shrink memory", newsize <= size); 00359 if (newsize > size) { 00360 /* not supported */ 00361 return NULL; 00362 } 00363 if (newsize == size) { 00364 /* No change in size, simply return */ 00365 return rmem; 00366 } 00367 00368 /* protect the heap from concurrent access */ 00369 sys_arch_sem_wait(mem_sem, 0); 00370 00371 #if MEM_STATS 00372 lwip_stats.mem.used -= (size - newsize); 00373 #endif /* MEM_STATS */ 00374 00375 mem2 = (struct mem *)&ram[mem->next]; 00376 if(mem2->used == 0) { 00377 /* The next struct is unused, we can simply move it at little */ 00378 mem_size_t next; 00379 /* remember the old next pointer */ 00380 next = mem2->next; 00381 /* create new struct mem which is moved directly after the shrinked mem */ 00382 ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize; 00383 if (lfree == mem2) { 00384 lfree = (struct mem *)&ram[ptr2]; 00385 } 00386 mem2 = (struct mem *)&ram[ptr2]; 00387 mem2->used = 0; 00388 /* restore the next pointer */ 00389 mem2->next = next; 00390 /* link it back to mem */ 00391 mem2->prev = ptr; 00392 /* link mem to it */ 00393 mem->next = ptr2; 00394 /* last thing to restore linked list: as we have moved mem2, 00395 * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not 00396 * the end of the heap */ 00397 if (mem2->next != MEM_SIZE_ALIGNED) { 00398 ((struct mem *)&ram[mem2->next])->prev = ptr2; 00399 } 00400 /* no need to plug holes, we've already done that */ 00401 } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) { 00402 /* Next struct is used but there's room for another struct mem with 00403 * at least MIN_SIZE_ALIGNED of data. 00404 * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem 00405 * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED'). 00406 * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty 00407 * region that couldn't hold data, but when mem->next gets freed, 00408 * the 2 regions would be combined, resulting in more free memory */ 00409 ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize; 00410 mem2 = (struct mem *)&ram[ptr2]; 00411 if (mem2 < lfree) { 00412 lfree = mem2; 00413 } 00414 mem2->used = 0; 00415 mem2->next = mem->next; 00416 mem2->prev = ptr; 00417 mem->next = ptr2; 00418 if (mem2->next != MEM_SIZE_ALIGNED) { 00419 ((struct mem *)&ram[mem2->next])->prev = ptr2; 00420 } 00421 /* the original mem->next is used, so no need to plug holes! */ 00422 } 00423 /* else { 00424 next struct mem is used but size between mem and mem2 is not big enough 00425 to create another struct mem 00426 -> don't do anyhting. 00427 -> the remaining space stays unused since it is too small 00428 } */ 00429 sys_sem_signal(mem_sem); 00430 return rmem; 00431 } 00432 00433 /** 00434 * Adam's mem_malloc() plus solution for bug #17922 00435 * Allocate a block of memory with a minimum of 'size' bytes. 00436 * 00437 * @param size is the minimum size of the requested block in bytes. 00438 * @return pointer to allocated memory or NULL if no free memory was found. 00439 * 00440 * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT). 00441 */ 00442 void * 00443 mem_malloc(mem_size_t size) 00444 { 00445 mem_size_t ptr, ptr2; 00446 struct mem *mem, *mem2; 00447 00448 if (size == 0) { 00449 return NULL; 00450 } 00451 00452 /* Expand the size of the allocated memory region so that we can 00453 adjust for alignment. */ 00454 size = LWIP_MEM_ALIGN_SIZE(size); 00455 00456 if(size < MIN_SIZE_ALIGNED) { 00457 /* every data block must be at least MIN_SIZE_ALIGNED long */ 00458 size = MIN_SIZE_ALIGNED; 00459 } 00460 00461 if (size > MEM_SIZE_ALIGNED) { 00462 return NULL; 00463 } 00464 00465 /* protect the heap from concurrent access */ 00466 sys_arch_sem_wait(mem_sem, 0); 00467 00468 /* Scan through the heap searching for a free block that is big enough, 00469 * beginning with the lowest free block. 00470 */ 00471 for (ptr = (u8_t *)lfree - ram; ptr < MEM_SIZE_ALIGNED - size; 00472 ptr = ((struct mem *)&ram[ptr])->next) { 00473 mem = (struct mem *)&ram[ptr]; 00474 00475 if ((!mem->used) && 00476 (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) { 00477 /* mem is not used and at least perfect fit is possible: 00478 * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */ 00479 00480 if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) { 00481 /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing 00482 * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem') 00483 * -> split large block, create empty remainder, 00484 * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if 00485 * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size, 00486 * struct mem would fit in but no data between mem2 and mem2->next 00487 * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty 00488 * region that couldn't hold data, but when mem->next gets freed, 00489 * the 2 regions would be combined, resulting in more free memory 00490 */ 00491 ptr2 = ptr + SIZEOF_STRUCT_MEM + size; 00492 /* create mem2 struct */ 00493 mem2 = (struct mem *)&ram[ptr2]; 00494 mem2->used = 0; 00495 mem2->next = mem->next; 00496 mem2->prev = ptr; 00497 /* and insert it between mem and mem->next */ 00498 mem->next = ptr2; 00499 mem->used = 1; 00500 00501 if (mem2->next != MEM_SIZE_ALIGNED) { 00502 ((struct mem *)&ram[mem2->next])->prev = ptr2; 00503 } 00504 #if MEM_STATS 00505 lwip_stats.mem.used += (size + SIZEOF_STRUCT_MEM); 00506 if (lwip_stats.mem.max < lwip_stats.mem.used) { 00507 lwip_stats.mem.max = lwip_stats.mem.used; 00508 } 00509 #endif /* MEM_STATS */ 00510 } else { 00511 /* (a mem2 struct does no fit into the user data space of mem and mem->next will always 00512 * be used at this point: if not we have 2 unused structs in a row, plug_holes should have 00513 * take care of this). 00514 * -> near fit or excact fit: do not split, no mem2 creation 00515 * also can't move mem->next directly behind mem, since mem->next 00516 * will always be used at this point! 00517 */ 00518 mem->used = 1; 00519 #if MEM_STATS 00520 lwip_stats.mem.used += mem->next - ((u8_t *)mem - ram); 00521 if (lwip_stats.mem.max < lwip_stats.mem.used) { 00522 lwip_stats.mem.max = lwip_stats.mem.used; 00523 } 00524 #endif /* MEM_STATS */ 00525 } 00526 00527 if (mem == lfree) { 00528 /* Find next free block after mem and update lowest free pointer */ 00529 while (lfree->used && lfree != ram_end) { 00530 lfree = (struct mem *)&ram[lfree->next]; 00531 } 00532 LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used))); 00533 } 00534 sys_sem_signal(mem_sem); 00535 LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.", 00536 (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end); 00537 LWIP_ASSERT("mem_malloc: allocated memory properly aligned.", 00538 (unsigned long)((u8_t *)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0); 00539 LWIP_ASSERT("mem_malloc: sanity check alignment", 00540 (((mem_ptr_t)mem) & (MEM_ALIGNMENT-1)) == 0); 00541 00542 return (u8_t *)mem + SIZEOF_STRUCT_MEM; 00543 } 00544 } 00545 LWIP_DEBUGF(MEM_DEBUG | 2, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size)); 00546 #if MEM_STATS 00547 ++lwip_stats.mem.err; 00548 #endif /* MEM_STATS */ 00549 sys_sem_signal(mem_sem); 00550 return NULL; 00551 } 00552 00553 #endif /* MEM_USE_POOLS */ 00554 /** 00555 * Contiguously allocates enough space for count objects that are size bytes 00556 * of memory each and returns a pointer to the allocated memory. 00557 * 00558 * The allocated memory is filled with bytes of value zero. 00559 * 00560 * @param count number of objects to allocate 00561 * @param size size of the objects to allocate 00562 * @return pointer to allocated memory / NULL pointer if there is an error 00563 */ 00564 void *mem_calloc(mem_size_t count, mem_size_t size) 00565 { 00566 void *p; 00567 00568 /* allocate 'count' objects of size 'size' */ 00569 p = mem_malloc(count * size); 00570 if (p) { 00571 /* zero the memory */ 00572 memset(p, 0, count * size); 00573 } 00574 return p; 00575 } 00576 #endif /* !MEM_LIBC_MALLOC */
Generated on Tue Jul 12 2022 16:06:15 by 1.7.2