Kenji Arai / mbed-os_TYBLE16

Dependents:   TYBLE16_simple_data_logger TYBLE16_MP3_Air

Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers lwip_mem.c Source File

lwip_mem.c

Go to the documentation of this file.
00001 /**
00002  * @file
00003  * Dynamic memory manager
00004  *
00005  * This is a lightweight replacement for the standard C library malloc().
00006  *
00007  * If you want to use the standard C library malloc() instead, define
00008  * MEM_LIBC_MALLOC to 1 in your lwipopts.h
00009  *
00010  * To let mem_malloc() use pools (prevents fragmentation and is much faster than
00011  * a heap but might waste some memory), define MEM_USE_POOLS to 1, define
00012  * MEMP_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list
00013  * of pools like this (more pools can be added between _START and _END):
00014  *
00015  * Define three pools with sizes 256, 512, and 1512 bytes
00016  * LWIP_MALLOC_MEMPOOL_START
00017  * LWIP_MALLOC_MEMPOOL(20, 256)
00018  * LWIP_MALLOC_MEMPOOL(10, 512)
00019  * LWIP_MALLOC_MEMPOOL(5, 1512)
00020  * LWIP_MALLOC_MEMPOOL_END
00021  */
00022 
00023 /*
00024  * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
00025  * All rights reserved.
00026  *
00027  * Redistribution and use in source and binary forms, with or without modification,
00028  * are permitted provided that the following conditions are met:
00029  *
00030  * 1. Redistributions of source code must retain the above copyright notice,
00031  *    this list of conditions and the following disclaimer.
00032  * 2. Redistributions in binary form must reproduce the above copyright notice,
00033  *    this list of conditions and the following disclaimer in the documentation
00034  *    and/or other materials provided with the distribution.
00035  * 3. The name of the author may not be used to endorse or promote products
00036  *    derived from this software without specific prior written permission.
00037  *
00038  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
00039  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
00040  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
00041  * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
00042  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
00043  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
00044  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
00045  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
00046  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
00047  * OF SUCH DAMAGE.
00048  *
00049  * This file is part of the lwIP TCP/IP stack.
00050  *
00051  * Author: Adam Dunkels <adam@sics.se>
00052  *         Simon Goldschmidt
00053  *
00054  */
00055 
00056 #include "lwip/opt.h"
00057 #include "lwip/mem.h"
00058 #include "lwip/def.h"
00059 #include "lwip/sys.h"
00060 #include "lwip/stats.h"
00061 #include "lwip/err.h"
00062 
00063 #include <string.h>
00064 
00065 #if MEM_LIBC_MALLOC
00066 #include <stdlib.h> /* for malloc()/free() */
00067 #endif
00068 
00069 /* This is overridable for tests only... */
00070 #ifndef LWIP_MEM_ILLEGAL_FREE
00071 #define LWIP_MEM_ILLEGAL_FREE(msg)         LWIP_ASSERT(msg, 0)
00072 #endif
00073 
00074 #define MEM_STATS_INC_LOCKED(x)         SYS_ARCH_LOCKED(MEM_STATS_INC(x))
00075 #define MEM_STATS_INC_USED_LOCKED(x, y) SYS_ARCH_LOCKED(MEM_STATS_INC_USED(x, y))
00076 #define MEM_STATS_DEC_USED_LOCKED(x, y) SYS_ARCH_LOCKED(MEM_STATS_DEC_USED(x, y))
00077 
00078 #if MEM_OVERFLOW_CHECK
00079 #define MEM_SANITY_OFFSET   MEM_SANITY_REGION_BEFORE_ALIGNED
00080 #define MEM_SANITY_OVERHEAD (MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED)
00081 #else
00082 #define MEM_SANITY_OFFSET   0
00083 #define MEM_SANITY_OVERHEAD 0
00084 #endif
00085 
00086 #if MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK
00087 /**
00088  * Check if a mep element was victim of an overflow or underflow
00089  * (e.g. the restricted area after/before it has been altered)
00090  *
00091  * @param p the mem element to check
00092  * @param size allocated size of the element
00093  * @param descr1 description of the element source shown on error
00094  * @param descr2 description of the element source shown on error
00095  */
00096 void
00097 mem_overflow_check_raw(void *p, size_t size, const char *descr1, const char *descr2)
00098 {
00099 #if MEM_SANITY_REGION_AFTER_ALIGNED || MEM_SANITY_REGION_BEFORE_ALIGNED
00100   u16_t k;
00101   u8_t *m;
00102 
00103 #if MEM_SANITY_REGION_AFTER_ALIGNED > 0
00104   m = (u8_t *)p + size;
00105   for (k = 0; k < MEM_SANITY_REGION_AFTER_ALIGNED; k++) {
00106     if (m[k] != 0xcd) {
00107       char errstr[128];
00108       snprintf(errstr, sizeof(errstr), "detected mem overflow in %s%s", descr1, descr2);
00109       LWIP_ASSERT(errstr, 0);
00110     }
00111   }
00112 #endif /* MEM_SANITY_REGION_AFTER_ALIGNED > 0 */
00113 
00114 #if MEM_SANITY_REGION_BEFORE_ALIGNED > 0
00115   m = (u8_t *)p - MEM_SANITY_REGION_BEFORE_ALIGNED;
00116   for (k = 0; k < MEM_SANITY_REGION_BEFORE_ALIGNED; k++) {
00117     if (m[k] != 0xcd) {
00118       char errstr[128];
00119       snprintf(errstr, sizeof(errstr), "detected mem underflow in %s%s", descr1, descr2);
00120       LWIP_ASSERT(errstr, 0);
00121     }
00122   }
00123 #endif /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 */
00124 #else
00125   LWIP_UNUSED_ARG(p);
00126   LWIP_UNUSED_ARG(desc);
00127   LWIP_UNUSED_ARG(descr);
00128 #endif
00129 }
00130 
00131 /**
00132  * Initialize the restricted area of a mem element.
00133  */
00134 void
00135 mem_overflow_init_raw(void *p, size_t size)
00136 {
00137 #if MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0
00138   u8_t *m;
00139 #if MEM_SANITY_REGION_BEFORE_ALIGNED > 0
00140   m = (u8_t *)p - MEM_SANITY_REGION_BEFORE_ALIGNED;
00141   memset(m, 0xcd, MEM_SANITY_REGION_BEFORE_ALIGNED);
00142 #endif
00143 #if MEM_SANITY_REGION_AFTER_ALIGNED > 0
00144   m = (u8_t *)p + size;
00145   memset(m, 0xcd, MEM_SANITY_REGION_AFTER_ALIGNED);
00146 #endif
00147 #else /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 */
00148   LWIP_UNUSED_ARG(p);
00149   LWIP_UNUSED_ARG(desc);
00150 #endif /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 */
00151 }
00152 #endif /* MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK */
00153 
00154 #if MEM_LIBC_MALLOC || MEM_USE_POOLS
00155 
00156 /** mem_init is not used when using pools instead of a heap or using
00157  * C library malloc().
00158  */
00159 void
00160 mem_init(void)
00161 {
00162 }
00163 
00164 /** mem_trim is not used when using pools instead of a heap or using
00165  * C library malloc(): we can't free part of a pool element and the stack
00166  * support mem_trim() to return a different pointer
00167  */
00168 void *
00169 mem_trim(void *mem, mem_size_t size)
00170 {
00171   LWIP_UNUSED_ARG(size);
00172   return mem;
00173 }
00174 #endif /* MEM_LIBC_MALLOC || MEM_USE_POOLS */
00175 
00176 #if MEM_LIBC_MALLOC
00177 /* lwIP heap implemented using C library malloc() */
00178 
00179 /* in case C library malloc() needs extra protection,
00180  * allow these defines to be overridden.
00181  */
00182 #ifndef mem_clib_free
00183 #define mem_clib_free free
00184 #endif
00185 #ifndef mem_clib_malloc
00186 #define mem_clib_malloc malloc
00187 #endif
00188 #ifndef mem_clib_calloc
00189 #define mem_clib_calloc calloc
00190 #endif
00191 
00192 #if LWIP_STATS && MEM_STATS
00193 #define MEM_LIBC_STATSHELPER_SIZE LWIP_MEM_ALIGN_SIZE(sizeof(mem_size_t))
00194 #else
00195 #define MEM_LIBC_STATSHELPER_SIZE 0
00196 #endif
00197 
00198 /**
00199  * Allocate a block of memory with a minimum of 'size' bytes.
00200  *
00201  * @param size is the minimum size of the requested block in bytes.
00202  * @return pointer to allocated memory or NULL if no free memory was found.
00203  *
00204  * Note that the returned value must always be aligned (as defined by MEM_ALIGNMENT).
00205  */
00206 void *
00207 mem_malloc(mem_size_t size)
00208 {
00209   void *ret = mem_clib_malloc(size + MEM_LIBC_STATSHELPER_SIZE);
00210   if (ret == NULL) {
00211     MEM_STATS_INC_LOCKED(err);
00212   } else {
00213     LWIP_ASSERT("malloc() must return aligned memory", LWIP_MEM_ALIGN(ret) == ret);
00214 #if LWIP_STATS && MEM_STATS
00215     *(mem_size_t *)ret = size;
00216     ret = (u8_t *)ret + MEM_LIBC_STATSHELPER_SIZE;
00217     MEM_STATS_INC_USED_LOCKED(used, size);
00218 #endif
00219   }
00220   return ret;
00221 }
00222 
00223 /** Put memory back on the heap
00224  *
00225  * @param rmem is the pointer as returned by a previous call to mem_malloc()
00226  */
00227 void
00228 mem_free(void *rmem)
00229 {
00230   LWIP_ASSERT("rmem != NULL", (rmem != NULL));
00231   LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
00232 #if LWIP_STATS && MEM_STATS
00233   rmem = (u8_t *)rmem - MEM_LIBC_STATSHELPER_SIZE;
00234   MEM_STATS_DEC_USED_LOCKED(used, *(mem_size_t *)rmem);
00235 #endif
00236   mem_clib_free(rmem);
00237 }
00238 
00239 #elif MEM_USE_POOLS
00240 
00241 /* lwIP heap implemented with different sized pools */
00242 
00243 /**
00244  * Allocate memory: determine the smallest pool that is big enough
00245  * to contain an element of 'size' and get an element from that pool.
00246  *
00247  * @param size the size in bytes of the memory needed
00248  * @return a pointer to the allocated memory or NULL if the pool is empty
00249  */
00250 void *
00251 mem_malloc(mem_size_t size)
00252 {
00253   void *ret;
00254   struct memp_malloc_helper *element = NULL;
00255   memp_t poolnr;
00256   mem_size_t required_size = size + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper));
00257 
00258   for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr = (memp_t)(poolnr + 1)) {
00259     /* is this pool big enough to hold an element of the required size
00260        plus a struct memp_malloc_helper that saves the pool this element came from? */
00261     if (required_size <= memp_pools[poolnr]->size) {
00262       element = (struct memp_malloc_helper *)memp_malloc(poolnr);
00263       if (element == NULL) {
00264         /* No need to DEBUGF or ASSERT: This error is already taken care of in memp.c */
00265 #if MEM_USE_POOLS_TRY_BIGGER_POOL
00266         /** Try a bigger pool if this one is empty! */
00267         if (poolnr < MEMP_POOL_LAST) {
00268           continue;
00269         }
00270 #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
00271         MEM_STATS_INC_LOCKED(err);
00272         return NULL;
00273       }
00274       break;
00275     }
00276   }
00277   if (poolnr > MEMP_POOL_LAST) {
00278     LWIP_ASSERT("mem_malloc(): no pool is that big!", 0);
00279     MEM_STATS_INC_LOCKED(err);
00280     return NULL;
00281   }
00282 
00283   /* save the pool number this element came from */
00284   element->poolnr = poolnr;
00285   /* and return a pointer to the memory directly after the struct memp_malloc_helper */
00286   ret = (u8_t *)element + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper));
00287 
00288 #if MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS)
00289   /* truncating to u16_t is safe because struct memp_desc::size is u16_t */
00290   element->size = (u16_t)size;
00291   MEM_STATS_INC_USED_LOCKED(used, element->size);
00292 #endif /* MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) */
00293 #if MEMP_OVERFLOW_CHECK
00294   /* initialize unused memory (diff between requested size and selected pool's size) */
00295   memset((u8_t *)ret + size, 0xcd, memp_pools[poolnr]->size - size);
00296 #endif /* MEMP_OVERFLOW_CHECK */
00297   return ret;
00298 }
00299 
00300 /**
00301  * Free memory previously allocated by mem_malloc. Loads the pool number
00302  * and calls memp_free with that pool number to put the element back into
00303  * its pool
00304  *
00305  * @param rmem the memory element to free
00306  */
00307 void
00308 mem_free(void *rmem)
00309 {
00310   struct memp_malloc_helper *hmem;
00311 
00312   LWIP_ASSERT("rmem != NULL", (rmem != NULL));
00313   LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
00314 
00315   /* get the original struct memp_malloc_helper */
00316   /* cast through void* to get rid of alignment warnings */
00317   hmem = (struct memp_malloc_helper *)(void *)((u8_t *)rmem - LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)));
00318 
00319   LWIP_ASSERT("hmem != NULL", (hmem != NULL));
00320   LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem)));
00321   LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX));
00322 
00323   MEM_STATS_DEC_USED_LOCKED(used, hmem->size);
00324 #if MEMP_OVERFLOW_CHECK
00325   {
00326     u16_t i;
00327     LWIP_ASSERT("MEM_USE_POOLS: invalid chunk size",
00328                 hmem->size <= memp_pools[hmem->poolnr]->size);
00329     /* check that unused memory remained untouched (diff between requested size and selected pool's size) */
00330     for (i = hmem->size; i < memp_pools[hmem->poolnr]->size; i++) {
00331       u8_t data = *((u8_t *)rmem + i);
00332       LWIP_ASSERT("MEM_USE_POOLS: mem overflow detected", data == 0xcd);
00333     }
00334   }
00335 #endif /* MEMP_OVERFLOW_CHECK */
00336 
00337   /* and put it in the pool we saved earlier */
00338   memp_free(hmem->poolnr, hmem);
00339 }
00340 
00341 #else /* MEM_USE_POOLS */
00342 /* lwIP replacement for your libc malloc() */
00343 
00344 /**
00345  * The heap is made up as a list of structs of this type.
00346  * This does not have to be aligned since for getting its size,
00347  * we only use the macro SIZEOF_STRUCT_MEM, which automatically aligns.
00348  */
00349 struct mem {
00350   /** index (-> ram[next]) of the next struct */
00351   mem_size_t next;
00352   /** index (-> ram[prev]) of the previous struct */
00353   mem_size_t prev;
00354   /** 1: this area is used; 0: this area is unused */
00355   u8_t used;
00356 #if MEM_OVERFLOW_CHECK
00357   /** this keeps track of the user allocation size for guard checks */
00358   mem_size_t user_size;
00359 #endif
00360 };
00361 
00362 /** All allocated blocks will be MIN_SIZE bytes big, at least!
00363  * MIN_SIZE can be overridden to suit your needs. Smaller values save space,
00364  * larger values could prevent too small blocks to fragment the RAM too much. */
00365 #ifndef MIN_SIZE
00366 #define MIN_SIZE             12
00367 #endif /* MIN_SIZE */
00368 /* some alignment macros: we define them here for better source code layout */
00369 #define MIN_SIZE_ALIGNED     LWIP_MEM_ALIGN_SIZE(MIN_SIZE)
00370 #define SIZEOF_STRUCT_MEM    LWIP_MEM_ALIGN_SIZE(sizeof(struct mem))
00371 #define MEM_SIZE_ALIGNED     LWIP_MEM_ALIGN_SIZE(MEM_SIZE)
00372 
00373 /** If you want to relocate the heap to external memory, simply define
00374  * LWIP_RAM_HEAP_POINTER as a void-pointer to that location.
00375  * If so, make sure the memory at that location is big enough (see below on
00376  * how that space is calculated). */
00377 #ifndef LWIP_RAM_HEAP_POINTER
00378 /** the heap. we need one struct mem at the end and some room for alignment */
00379 LWIP_DECLARE_MEMORY_ALIGNED(ram_heap, MEM_SIZE_ALIGNED + (2U * SIZEOF_STRUCT_MEM));
00380 #define LWIP_RAM_HEAP_POINTER ram_heap
00381 #endif /* LWIP_RAM_HEAP_POINTER */
00382 
00383 /** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */
00384 static u8_t *ram;
00385 /** the last entry, always unused! */
00386 static struct mem *ram_end;
00387 
00388 /** concurrent access protection */
00389 #if !NO_SYS
00390 static sys_mutex_t mem_mutex;
00391 #endif
00392 
00393 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
00394 
00395 static volatile u8_t mem_free_count;
00396 
00397 /* Allow mem_free from other (e.g. interrupt) context */
00398 #define LWIP_MEM_FREE_DECL_PROTECT()  SYS_ARCH_DECL_PROTECT(lev_free)
00399 #define LWIP_MEM_FREE_PROTECT()       SYS_ARCH_PROTECT(lev_free)
00400 #define LWIP_MEM_FREE_UNPROTECT()     SYS_ARCH_UNPROTECT(lev_free)
00401 #define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc)
00402 #define LWIP_MEM_ALLOC_PROTECT()      SYS_ARCH_PROTECT(lev_alloc)
00403 #define LWIP_MEM_ALLOC_UNPROTECT()    SYS_ARCH_UNPROTECT(lev_alloc)
00404 #define LWIP_MEM_LFREE_VOLATILE       volatile
00405 
00406 #else /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
00407 
00408 /* Protect the heap only by using a mutex */
00409 #define LWIP_MEM_FREE_DECL_PROTECT()
00410 #define LWIP_MEM_FREE_PROTECT()    sys_mutex_lock(&mem_mutex)
00411 #define LWIP_MEM_FREE_UNPROTECT()  sys_mutex_unlock(&mem_mutex)
00412 /* mem_malloc is protected using mutex AND LWIP_MEM_ALLOC_PROTECT */
00413 #define LWIP_MEM_ALLOC_DECL_PROTECT()
00414 #define LWIP_MEM_ALLOC_PROTECT()
00415 #define LWIP_MEM_ALLOC_UNPROTECT()
00416 #define LWIP_MEM_LFREE_VOLATILE
00417 
00418 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
00419 
00420 /** pointer to the lowest free block, this is used for faster search */
00421 static struct mem * LWIP_MEM_LFREE_VOLATILE lfree;
00422 
00423 #if MEM_SANITY_CHECK
00424 static void mem_sanity(void);
00425 #define MEM_SANITY() mem_sanity()
00426 #else
00427 #define MEM_SANITY()
00428 #endif
00429 
00430 #if MEM_OVERFLOW_CHECK
00431 static void
00432 mem_overflow_init_element(struct mem *mem, mem_size_t user_size)
00433 {
00434   void *p = (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET;
00435   mem->user_size = user_size;
00436   mem_overflow_init_raw(p, user_size);
00437 }
00438 
00439 static void
00440 mem_overflow_check_element(struct mem *mem)
00441 {
00442   void *p = (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET;
00443   mem_overflow_check_raw(p, mem->user_size, "heap", "");
00444 }
00445 #else /* MEM_OVERFLOW_CHECK */
00446 #define mem_overflow_init_element(mem, size)
00447 #define mem_overflow_check_element(mem)
00448 #endif /* MEM_OVERFLOW_CHECK */
00449 
00450 static struct mem *
00451 ptr_to_mem(mem_size_t ptr)
00452 {
00453   return (struct mem *)(void *)&ram[ptr];
00454 }
00455 
00456 static mem_size_t
00457 mem_to_ptr(void *mem)
00458 {
00459   return (mem_size_t)((u8_t *)mem - ram);
00460 }
00461 
00462 /**
00463  * "Plug holes" by combining adjacent empty struct mems.
00464  * After this function is through, there should not exist
00465  * one empty struct mem pointing to another empty struct mem.
00466  *
00467  * @param mem this points to a struct mem which just has been freed
00468  * @internal this function is only called by mem_free() and mem_trim()
00469  *
00470  * This assumes access to the heap is protected by the calling function
00471  * already.
00472  */
00473 static void
00474 plug_holes(struct mem *mem)
00475 {
00476   struct mem *nmem;
00477   struct mem *pmem;
00478 
00479   LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram);
00480   LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end);
00481   LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0);
00482 
00483   /* plug hole forward */
00484   LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED);
00485 
00486   nmem = ptr_to_mem(mem->next);
00487   if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) {
00488     /* if mem->next is unused and not end of ram, combine mem and mem->next */
00489     if (lfree == nmem) {
00490       lfree = mem;
00491     }
00492     mem->next = nmem->next;
00493     if (nmem->next != MEM_SIZE_ALIGNED) {
00494       ptr_to_mem(nmem->next)->prev = mem_to_ptr(mem);
00495     }
00496   }
00497 
00498   /* plug hole backward */
00499   pmem = ptr_to_mem(mem->prev);
00500   if (pmem != mem && pmem->used == 0) {
00501     /* if mem->prev is unused, combine mem and mem->prev */
00502     if (lfree == mem) {
00503       lfree = pmem;
00504     }
00505     pmem->next = mem->next;
00506     if (mem->next != MEM_SIZE_ALIGNED) {
00507       ptr_to_mem(mem->next)->prev = mem_to_ptr(pmem);
00508     }
00509   }
00510 }
00511 
00512 /**
00513  * Zero the heap and initialize start, end and lowest-free
00514  */
00515 void
00516 mem_init(void)
00517 {
00518   struct mem *mem;
00519 
00520   LWIP_ASSERT("Sanity check alignment",
00521               (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT - 1)) == 0);
00522 
00523   /* align the heap */
00524   ram = (u8_t *)LWIP_MEM_ALIGN(LWIP_RAM_HEAP_POINTER);
00525   /* initialize the start of the heap */
00526   mem = (struct mem *)(void *)ram;
00527   mem->next = MEM_SIZE_ALIGNED;
00528   mem->prev = 0;
00529   mem->used = 0;
00530   /* initialize the end of the heap */
00531   ram_end = ptr_to_mem(MEM_SIZE_ALIGNED);
00532   ram_end->used = 1;
00533   ram_end->next = MEM_SIZE_ALIGNED;
00534   ram_end->prev = MEM_SIZE_ALIGNED;
00535   MEM_SANITY();
00536 
00537   /* initialize the lowest-free pointer to the start of the heap */
00538   lfree = (struct mem *)(void *)ram;
00539 
00540   MEM_STATS_AVAIL(avail, MEM_SIZE_ALIGNED);
00541 
00542   if (sys_mutex_new(&mem_mutex) != ERR_OK) {
00543     LWIP_ASSERT("failed to create mem_mutex", 0);
00544   }
00545 }
00546 
00547 /* Check if a struct mem is correctly linked.
00548  * If not, double-free is a possible reason.
00549  */
00550 static int
00551 mem_link_valid(struct mem *mem)
00552 {
00553   struct mem *nmem, *pmem;
00554   mem_size_t rmem_idx;
00555   rmem_idx = mem_to_ptr(mem);
00556   nmem = ptr_to_mem(mem->next);
00557   pmem = ptr_to_mem(mem->prev);
00558   if ((mem->next > MEM_SIZE_ALIGNED) || (mem->prev > MEM_SIZE_ALIGNED) ||
00559       ((mem->prev != rmem_idx) && (pmem->next != rmem_idx)) ||
00560       ((nmem != ram_end) && (nmem->prev != rmem_idx))) {
00561     return 0;
00562   }
00563   return 1;
00564 }
00565 
00566 #if MEM_SANITY_CHECK
00567 static void
00568 mem_sanity(void)
00569 {
00570   struct mem *mem;
00571   u8_t last_used;
00572 
00573   /* begin with first element here */
00574   mem = (struct mem *)ram;
00575   LWIP_ASSERT("heap element used valid", (mem->used == 0) || (mem->used == 1));
00576   last_used = mem->used;
00577   LWIP_ASSERT("heap element prev ptr valid", mem->prev == 0);
00578   LWIP_ASSERT("heap element next ptr valid", mem->next <= MEM_SIZE_ALIGNED);
00579   LWIP_ASSERT("heap element next ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->next) == ptr_to_mem(mem->next)));
00580 
00581   /* check all elements before the end of the heap */
00582   for (mem = ptr_to_mem(mem->next);
00583        ((u8_t *)mem > ram) && (mem < ram_end);
00584        mem = ptr_to_mem(mem->next)) {
00585     LWIP_ASSERT("heap element aligned", LWIP_MEM_ALIGN(mem) == mem);
00586     LWIP_ASSERT("heap element prev ptr valid", mem->prev <= MEM_SIZE_ALIGNED);
00587     LWIP_ASSERT("heap element next ptr valid", mem->next <= MEM_SIZE_ALIGNED);
00588     LWIP_ASSERT("heap element prev ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->prev) == ptr_to_mem(mem->prev)));
00589     LWIP_ASSERT("heap element next ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->next) == ptr_to_mem(mem->next)));
00590 
00591     if (last_used == 0) {
00592       /* 2 unused elements in a row? */
00593       LWIP_ASSERT("heap element unused?", mem->used == 1);
00594     } else {
00595       LWIP_ASSERT("heap element unused member", (mem->used == 0) || (mem->used == 1));
00596     }
00597 
00598     LWIP_ASSERT("heap element link valid", mem_link_valid(mem));
00599 
00600     /* used/unused altering */
00601     last_used = mem->used;
00602   }
00603   LWIP_ASSERT("heap end ptr sanity", mem == ptr_to_mem(MEM_SIZE_ALIGNED));
00604   LWIP_ASSERT("heap element used valid", mem->used == 1);
00605   LWIP_ASSERT("heap element prev ptr valid", mem->prev == MEM_SIZE_ALIGNED);
00606   LWIP_ASSERT("heap element next ptr valid", mem->next == MEM_SIZE_ALIGNED);
00607 }
00608 #endif /* MEM_SANITY_CHECK */
00609 
00610 /**
00611  * Put a struct mem back on the heap
00612  *
00613  * @param rmem is the data portion of a struct mem as returned by a previous
00614  *             call to mem_malloc()
00615  */
00616 void
00617 mem_free(void *rmem)
00618 {
00619   struct mem *mem;
00620   LWIP_MEM_FREE_DECL_PROTECT();
00621 
00622   if (rmem == NULL) {
00623     LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("mem_free(p == NULL) was called.\n"));
00624     return;
00625   }
00626   if ((((mem_ptr_t)rmem) & (MEM_ALIGNMENT - 1)) != 0) {
00627     LWIP_MEM_ILLEGAL_FREE("mem_free: sanity check alignment");
00628     LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: sanity check alignment\n"));
00629     /* protect mem stats from concurrent access */
00630     MEM_STATS_INC_LOCKED(illegal);
00631     return;
00632   }
00633 
00634   /* Get the corresponding struct mem: */
00635   /* cast through void* to get rid of alignment warnings */
00636   mem = (struct mem *)(void *)((u8_t *)rmem - (SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET));
00637 
00638   if ((u8_t *)mem < ram || (u8_t *)rmem + MIN_SIZE_ALIGNED > (u8_t *)ram_end) {
00639     LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory");
00640     LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory\n"));
00641     /* protect mem stats from concurrent access */
00642     MEM_STATS_INC_LOCKED(illegal);
00643     return;
00644   }
00645 #if MEM_OVERFLOW_CHECK
00646   mem_overflow_check_element(mem);
00647 #endif
00648   /* protect the heap from concurrent access */
00649   LWIP_MEM_FREE_PROTECT();
00650   /* mem has to be in a used state */
00651   if (!mem->used) {
00652     LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory: double free");
00653     LWIP_MEM_FREE_UNPROTECT();
00654     LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory: double free?\n"));
00655     /* protect mem stats from concurrent access */
00656     MEM_STATS_INC_LOCKED(illegal);
00657     return;
00658   }
00659 
00660   if (!mem_link_valid(mem)) {
00661     LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory: non-linked: double free");
00662     LWIP_MEM_FREE_UNPROTECT();
00663     LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory: non-linked: double free?\n"));
00664     /* protect mem stats from concurrent access */
00665     MEM_STATS_INC_LOCKED(illegal);
00666     return;
00667   }
00668 
00669   /* mem is now unused. */
00670   mem->used = 0;
00671 
00672   if (mem < lfree) {
00673     /* the newly freed struct is now the lowest */
00674     lfree = mem;
00675   }
00676 
00677   MEM_STATS_DEC_USED(used, mem->next - (mem_size_t)(((u8_t *)mem - ram)));
00678 
00679   /* finally, see if prev or next are free also */
00680   plug_holes(mem);
00681   MEM_SANITY();
00682 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
00683   mem_free_count = 1;
00684 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
00685   LWIP_MEM_FREE_UNPROTECT();
00686 }
00687 
00688 /**
00689  * Shrink memory returned by mem_malloc().
00690  *
00691  * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked
00692  * @param new_size required size after shrinking (needs to be smaller than or
00693  *                equal to the previous size)
00694  * @return for compatibility reasons: is always == rmem, at the moment
00695  *         or NULL if newsize is > old size, in which case rmem is NOT touched
00696  *         or freed!
00697  */
00698 void *
00699 mem_trim(void *rmem, mem_size_t new_size)
00700 {
00701   mem_size_t size, newsize;
00702   mem_size_t ptr, ptr2;
00703   struct mem *mem, *mem2;
00704   /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */
00705   LWIP_MEM_FREE_DECL_PROTECT();
00706 
00707   /* Expand the size of the allocated memory region so that we can
00708      adjust for alignment. */
00709   newsize = (mem_size_t)LWIP_MEM_ALIGN_SIZE(new_size);
00710   if (newsize < MIN_SIZE_ALIGNED) {
00711     /* every data block must be at least MIN_SIZE_ALIGNED long */
00712     newsize = MIN_SIZE_ALIGNED;
00713   }
00714 #if MEM_OVERFLOW_CHECK
00715   newsize += MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED;
00716 #endif
00717   if ((newsize > MEM_SIZE_ALIGNED) || (newsize < new_size)) {
00718     return NULL;
00719   }
00720 
00721   LWIP_ASSERT("mem_trim: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
00722               (u8_t *)rmem < (u8_t *)ram_end);
00723 
00724   if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
00725     LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_trim: illegal memory\n"));
00726     /* protect mem stats from concurrent access */
00727     MEM_STATS_INC_LOCKED(illegal);
00728     return rmem;
00729   }
00730   /* Get the corresponding struct mem ... */
00731   /* cast through void* to get rid of alignment warnings */
00732   mem = (struct mem *)(void *)((u8_t *)rmem - (SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET));
00733 #if MEM_OVERFLOW_CHECK
00734   mem_overflow_check_element(mem);
00735 #endif
00736   /* ... and its offset pointer */
00737   ptr = mem_to_ptr(mem);
00738 
00739   size = (mem_size_t)((mem_size_t)(mem->next - ptr) - (SIZEOF_STRUCT_MEM + MEM_SANITY_OVERHEAD));
00740   LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size);
00741   if (newsize > size) {
00742     /* not supported */
00743     return NULL;
00744   }
00745   if (newsize == size) {
00746     /* No change in size, simply return */
00747     return rmem;
00748   }
00749 
00750   /* protect the heap from concurrent access */
00751   LWIP_MEM_FREE_PROTECT();
00752 
00753   mem2 = ptr_to_mem(mem->next);
00754   if (mem2->used == 0) {
00755     /* The next struct is unused, we can simply move it at little */
00756     mem_size_t next;
00757     LWIP_ASSERT("invalid next ptr", mem->next != MEM_SIZE_ALIGNED);
00758     /* remember the old next pointer */
00759     next = mem2->next;
00760     /* create new struct mem which is moved directly after the shrinked mem */
00761     ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + newsize);
00762     if (lfree == mem2) {
00763       lfree = ptr_to_mem(ptr2);
00764     }
00765     mem2 = ptr_to_mem(ptr2);
00766     mem2->used = 0;
00767     /* restore the next pointer */
00768     mem2->next = next;
00769     /* link it back to mem */
00770     mem2->prev = ptr;
00771     /* link mem to it */
00772     mem->next = ptr2;
00773     /* last thing to restore linked list: as we have moved mem2,
00774      * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not
00775      * the end of the heap */
00776     if (mem2->next != MEM_SIZE_ALIGNED) {
00777       ptr_to_mem(mem2->next)->prev = ptr2;
00778     }
00779     MEM_STATS_DEC_USED(used, (size - newsize));
00780     /* no need to plug holes, we've already done that */
00781   } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) {
00782     /* Next struct is used but there's room for another struct mem with
00783      * at least MIN_SIZE_ALIGNED of data.
00784      * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem
00785      * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED').
00786      * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
00787      *       region that couldn't hold data, but when mem->next gets freed,
00788      *       the 2 regions would be combined, resulting in more free memory */
00789     ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + newsize);
00790     LWIP_ASSERT("invalid next ptr", mem->next != MEM_SIZE_ALIGNED);
00791     mem2 = ptr_to_mem(ptr2);
00792     if (mem2 < lfree) {
00793       lfree = mem2;
00794     }
00795     mem2->used = 0;
00796     mem2->next = mem->next;
00797     mem2->prev = ptr;
00798     mem->next = ptr2;
00799     if (mem2->next != MEM_SIZE_ALIGNED) {
00800       ptr_to_mem(mem2->next)->prev = ptr2;
00801     }
00802     MEM_STATS_DEC_USED(used, (size - newsize));
00803     /* the original mem->next is used, so no need to plug holes! */
00804   }
00805   /* else {
00806     next struct mem is used but size between mem and mem2 is not big enough
00807     to create another struct mem
00808     -> don't do anyhting.
00809     -> the remaining space stays unused since it is too small
00810   } */
00811 #if MEM_OVERFLOW_CHECK
00812   mem_overflow_init_element(mem, new_size);
00813 #endif
00814   MEM_SANITY();
00815 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
00816   mem_free_count = 1;
00817 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
00818   LWIP_MEM_FREE_UNPROTECT();
00819   return rmem;
00820 }
00821 
00822 /**
00823  * Allocate a block of memory with a minimum of 'size' bytes.
00824  *
00825  * @param size_in is the minimum size of the requested block in bytes.
00826  * @return pointer to allocated memory or NULL if no free memory was found.
00827  *
00828  * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT).
00829  */
00830 void *
00831 mem_malloc(mem_size_t size_in)
00832 {
00833   mem_size_t ptr, ptr2, size;
00834   struct mem *mem, *mem2;
00835 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
00836   u8_t local_mem_free_count = 0;
00837 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
00838   LWIP_MEM_ALLOC_DECL_PROTECT();
00839 
00840   if (size_in == 0) {
00841     return NULL;
00842   }
00843 
00844   /* Expand the size of the allocated memory region so that we can
00845      adjust for alignment. */
00846   size = (mem_size_t)LWIP_MEM_ALIGN_SIZE(size_in);
00847   if (size < MIN_SIZE_ALIGNED) {
00848     /* every data block must be at least MIN_SIZE_ALIGNED long */
00849     size = MIN_SIZE_ALIGNED;
00850   }
00851 #if MEM_OVERFLOW_CHECK
00852   size += MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED;
00853 #endif
00854   if ((size > MEM_SIZE_ALIGNED) || (size < size_in)) {
00855     return NULL;
00856   }
00857 
00858   /* protect the heap from concurrent access */
00859   sys_mutex_lock(&mem_mutex);
00860   LWIP_MEM_ALLOC_PROTECT();
00861 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
00862   /* run as long as a mem_free disturbed mem_malloc or mem_trim */
00863   do {
00864     local_mem_free_count = 0;
00865 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
00866 
00867     /* Scan through the heap searching for a free block that is big enough,
00868      * beginning with the lowest free block.
00869      */
00870     for (ptr = mem_to_ptr(lfree); ptr < MEM_SIZE_ALIGNED - size;
00871          ptr = ptr_to_mem(ptr)->next) {
00872       mem = ptr_to_mem(ptr);
00873 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
00874       mem_free_count = 0;
00875       LWIP_MEM_ALLOC_UNPROTECT();
00876       /* allow mem_free or mem_trim to run */
00877       LWIP_MEM_ALLOC_PROTECT();
00878       if (mem_free_count != 0) {
00879         /* If mem_free or mem_trim have run, we have to restart since they
00880            could have altered our current struct mem. */
00881         local_mem_free_count = 1;
00882         break;
00883       }
00884 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
00885 
00886       if ((!mem->used) &&
00887           (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) {
00888         /* mem is not used and at least perfect fit is possible:
00889          * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
00890 
00891         if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) {
00892           /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing
00893            * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
00894            * -> split large block, create empty remainder,
00895            * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
00896            * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
00897            * struct mem would fit in but no data between mem2 and mem2->next
00898            * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
00899            *       region that couldn't hold data, but when mem->next gets freed,
00900            *       the 2 regions would be combined, resulting in more free memory
00901            */
00902           ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + size);
00903           LWIP_ASSERT("invalid next ptr",ptr2 != MEM_SIZE_ALIGNED);
00904           /* create mem2 struct */
00905           mem2 = ptr_to_mem(ptr2);
00906           mem2->used = 0;
00907           mem2->next = mem->next;
00908           mem2->prev = ptr;
00909           /* and insert it between mem and mem->next */
00910           mem->next = ptr2;
00911           mem->used = 1;
00912 
00913           if (mem2->next != MEM_SIZE_ALIGNED) {
00914             ptr_to_mem(mem2->next)->prev = ptr2;
00915           }
00916           MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM));
00917         } else {
00918           /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
00919            * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
00920            * take care of this).
00921            * -> near fit or exact fit: do not split, no mem2 creation
00922            * also can't move mem->next directly behind mem, since mem->next
00923            * will always be used at this point!
00924            */
00925           mem->used = 1;
00926           MEM_STATS_INC_USED(used, mem->next - mem_to_ptr(mem));
00927         }
00928 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
00929 mem_malloc_adjust_lfree:
00930 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
00931         if (mem == lfree) {
00932           struct mem *cur = lfree;
00933           /* Find next free block after mem and update lowest free pointer */
00934           while (cur->used && cur != ram_end) {
00935 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
00936             mem_free_count = 0;
00937             LWIP_MEM_ALLOC_UNPROTECT();
00938             /* prevent high interrupt latency... */
00939             LWIP_MEM_ALLOC_PROTECT();
00940             if (mem_free_count != 0) {
00941               /* If mem_free or mem_trim have run, we have to restart since they
00942                  could have altered our current struct mem or lfree. */
00943               goto mem_malloc_adjust_lfree;
00944             }
00945 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
00946             cur = ptr_to_mem(cur->next);
00947           }
00948           lfree = cur;
00949           LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used)));
00950         }
00951         LWIP_MEM_ALLOC_UNPROTECT();
00952         sys_mutex_unlock(&mem_mutex);
00953         LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.",
00954                     (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end);
00955         LWIP_ASSERT("mem_malloc: allocated memory properly aligned.",
00956                     ((mem_ptr_t)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0);
00957         LWIP_ASSERT("mem_malloc: sanity check alignment",
00958                     (((mem_ptr_t)mem) & (MEM_ALIGNMENT - 1)) == 0);
00959 
00960 #if MEM_OVERFLOW_CHECK
00961         mem_overflow_init_element(mem, size_in);
00962 #endif
00963         MEM_SANITY();
00964         return (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET;
00965       }
00966     }
00967 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
00968     /* if we got interrupted by a mem_free, try again */
00969   } while (local_mem_free_count != 0);
00970 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
00971   MEM_STATS_INC(err);
00972   LWIP_MEM_ALLOC_UNPROTECT();
00973   sys_mutex_unlock(&mem_mutex);
00974   LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size));
00975   return NULL;
00976 }
00977 
00978 #endif /* MEM_USE_POOLS */
00979 
00980 #if MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS)
00981 void *
00982 mem_calloc(mem_size_t count, mem_size_t size)
00983 {
00984   return mem_clib_calloc(count, size);
00985 }
00986 
00987 #else /* MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) */
00988 /**
00989  * Contiguously allocates enough space for count objects that are size bytes
00990  * of memory each and returns a pointer to the allocated memory.
00991  *
00992  * The allocated memory is filled with bytes of value zero.
00993  *
00994  * @param count number of objects to allocate
00995  * @param size size of the objects to allocate
00996  * @return pointer to allocated memory / NULL pointer if there is an error
00997  */
00998 void *
00999 mem_calloc(mem_size_t count, mem_size_t size)
01000 {
01001   void *p;
01002   size_t alloc_size = (size_t)count * (size_t)size;
01003 
01004   if ((size_t)(mem_size_t)alloc_size != alloc_size) {
01005     LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_calloc: could not allocate %"SZT_F" bytes\n", alloc_size));
01006     return NULL;
01007   }
01008 
01009   /* allocate 'count' objects of size 'size' */
01010   p = mem_malloc((mem_size_t)alloc_size);
01011   if (p) {
01012     /* zero the memory */
01013     memset(p, 0, alloc_size);
01014   }
01015   return p;
01016 }
01017 #endif /* MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) */