STM32F7 Ethernet interface for nucleo STM32F767

Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers lwip_mem.c Source File

lwip_mem.c

Go to the documentation of this file.
00001 /**
00002  * @file
00003  * Dynamic memory manager
00004  *
00005  * This is a lightweight replacement for the standard C library malloc().
00006  *
00007  * If you want to use the standard C library malloc() instead, define
00008  * MEM_LIBC_MALLOC to 1 in your lwipopts.h
00009  *
00010  * To let mem_malloc() use pools (prevents fragmentation and is much faster than
00011  * a heap but might waste some memory), define MEM_USE_POOLS to 1, define
00012  * MEMP_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list
00013  * of pools like this (more pools can be added between _START and _END):
00014  *
00015  * Define three pools with sizes 256, 512, and 1512 bytes
00016  * LWIP_MALLOC_MEMPOOL_START
00017  * LWIP_MALLOC_MEMPOOL(20, 256)
00018  * LWIP_MALLOC_MEMPOOL(10, 512)
00019  * LWIP_MALLOC_MEMPOOL(5, 1512)
00020  * LWIP_MALLOC_MEMPOOL_END
00021  */
00022 
00023 /*
00024  * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
00025  * All rights reserved.
00026  *
00027  * Redistribution and use in source and binary forms, with or without modification,
00028  * are permitted provided that the following conditions are met:
00029  *
00030  * 1. Redistributions of source code must retain the above copyright notice,
00031  *    this list of conditions and the following disclaimer.
00032  * 2. Redistributions in binary form must reproduce the above copyright notice,
00033  *    this list of conditions and the following disclaimer in the documentation
00034  *    and/or other materials provided with the distribution.
00035  * 3. The name of the author may not be used to endorse or promote products
00036  *    derived from this software without specific prior written permission.
00037  *
00038  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
00039  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
00040  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
00041  * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
00042  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
00043  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
00044  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
00045  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
00046  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
00047  * OF SUCH DAMAGE.
00048  *
00049  * This file is part of the lwIP TCP/IP stack.
00050  *
00051  * Author: Adam Dunkels <adam@sics.se>
00052  *         Simon Goldschmidt
00053  *
00054  */
00055 
00056 #include "lwip/opt.h"
00057 #include "lwip/mem.h"
00058 #include "lwip/def.h"
00059 #include "lwip/sys.h"
00060 #include "lwip/stats.h"
00061 #include "lwip/err.h"
00062 
00063 #include <string.h>
00064 
00065 #if MEM_LIBC_MALLOC
00066 #include <stdlib.h> /* for malloc()/free() */
00067 #endif
00068 
00069 #if MEM_LIBC_MALLOC || MEM_USE_POOLS
00070 
00071 /** mem_init is not used when using pools instead of a heap or using
00072  * C library malloc().
00073  */
00074 void
00075 mem_init(void)
00076 {
00077 }
00078 
00079 /** mem_trim is not used when using pools instead of a heap or using
00080  * C library malloc(): we can't free part of a pool element and the stack
00081  * support mem_trim() to return a different pointer
00082  */
00083 void*
00084 mem_trim(void *mem, mem_size_t size)
00085 {
00086   LWIP_UNUSED_ARG(size);
00087   return mem;
00088 }
00089 #endif /* MEM_LIBC_MALLOC || MEM_USE_POOLS */
00090 
00091 #if MEM_LIBC_MALLOC
00092 /* lwIP heap implemented using C library malloc() */
00093 
00094 /* in case C library malloc() needs extra protection,
00095  * allow these defines to be overridden.
00096  */
00097 #ifndef mem_clib_free
00098 #define mem_clib_free free
00099 #endif
00100 #ifndef mem_clib_malloc
00101 #define mem_clib_malloc malloc
00102 #endif
00103 #ifndef mem_clib_calloc
00104 #define mem_clib_calloc calloc
00105 #endif
00106 
00107 #if LWIP_STATS && MEM_STATS
00108 #define MEM_LIBC_STATSHELPER_SIZE LWIP_MEM_ALIGN_SIZE(sizeof(mem_size_t))
00109 #else
00110 #define MEM_LIBC_STATSHELPER_SIZE 0
00111 #endif
00112 
00113 /**
00114  * Allocate a block of memory with a minimum of 'size' bytes.
00115  *
00116  * @param size is the minimum size of the requested block in bytes.
00117  * @return pointer to allocated memory or NULL if no free memory was found.
00118  *
00119  * Note that the returned value must always be aligned (as defined by MEM_ALIGNMENT).
00120  */
00121 void *
00122 mem_malloc(mem_size_t size)
00123 {
00124   void* ret = mem_clib_malloc(size + MEM_LIBC_STATSHELPER_SIZE);
00125   if (ret == NULL) {
00126     MEM_STATS_INC(err);
00127   } else {
00128     LWIP_ASSERT("malloc() must return aligned memory", LWIP_MEM_ALIGN(ret) == ret);
00129 #if LWIP_STATS && MEM_STATS
00130     *(mem_size_t*)ret = size;
00131     ret = (u8_t*)ret + MEM_LIBC_STATSHELPER_SIZE;
00132     MEM_STATS_INC_USED(used, size);
00133 #endif
00134   }
00135   return ret;
00136 }
00137 
00138 /** Put memory back on the heap
00139  *
00140  * @param rmem is the pointer as returned by a previous call to mem_malloc()
00141  */
00142 void
00143 mem_free(void *rmem)
00144 {
00145   LWIP_ASSERT("rmem != NULL", (rmem != NULL));
00146   LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
00147 #if LWIP_STATS && MEM_STATS
00148   rmem = (u8_t*)rmem - MEM_LIBC_STATSHELPER_SIZE;
00149   MEM_STATS_DEC_USED(used, *(mem_size_t*)rmem);
00150 #endif
00151   mem_clib_free(rmem);
00152 }
00153 
00154 #elif MEM_USE_POOLS
00155 
00156 /* lwIP heap implemented with different sized pools */
00157 
00158 /**
00159  * Allocate memory: determine the smallest pool that is big enough
00160  * to contain an element of 'size' and get an element from that pool.
00161  *
00162  * @param size the size in bytes of the memory needed
00163  * @return a pointer to the allocated memory or NULL if the pool is empty
00164  */
00165 void *
00166 mem_malloc(mem_size_t size)
00167 {
00168   void *ret;
00169   struct memp_malloc_helper *element = NULL;
00170   memp_t poolnr;
00171   mem_size_t required_size = size + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper));
00172 
00173   for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr = (memp_t)(poolnr + 1)) {
00174     /* is this pool big enough to hold an element of the required size
00175        plus a struct memp_malloc_helper that saves the pool this element came from? */
00176     if (required_size <= memp_pools[poolnr]->size) {
00177       element = (struct memp_malloc_helper*)memp_malloc(poolnr);
00178       if (element == NULL) {
00179         /* No need to DEBUGF or ASSERT: This error is already taken care of in memp.c */
00180 #if MEM_USE_POOLS_TRY_BIGGER_POOL
00181         /** Try a bigger pool if this one is empty! */
00182         if (poolnr < MEMP_POOL_LAST) {
00183           continue;
00184         }
00185 #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
00186         MEM_STATS_INC(err);
00187         return NULL;
00188       }
00189       break;
00190     }
00191   }
00192   if (poolnr > MEMP_POOL_LAST) {
00193     LWIP_ASSERT("mem_malloc(): no pool is that big!", 0);
00194     MEM_STATS_INC(err);
00195     return NULL;
00196   }
00197 
00198   /* save the pool number this element came from */
00199   element->poolnr = poolnr;
00200   /* and return a pointer to the memory directly after the struct memp_malloc_helper */
00201   ret = (u8_t*)element + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper));
00202 
00203 #if MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS)
00204   /* truncating to u16_t is safe because struct memp_desc::size is u16_t */
00205   element->size = (u16_t)size;
00206   MEM_STATS_INC_USED(used, element->size);
00207 #endif /* MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) */
00208 #if MEMP_OVERFLOW_CHECK
00209   /* initialize unused memory (diff between requested size and selected pool's size) */
00210   memset((u8_t*)ret + size, 0xcd, memp_pools[poolnr]->size - size);
00211 #endif /* MEMP_OVERFLOW_CHECK */
00212   return ret;
00213 }
00214 
00215 /**
00216  * Free memory previously allocated by mem_malloc. Loads the pool number
00217  * and calls memp_free with that pool number to put the element back into
00218  * its pool
00219  *
00220  * @param rmem the memory element to free
00221  */
00222 void
00223 mem_free(void *rmem)
00224 {
00225   struct memp_malloc_helper *hmem;
00226 
00227   LWIP_ASSERT("rmem != NULL", (rmem != NULL));
00228   LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
00229 
00230   /* get the original struct memp_malloc_helper */
00231   /* cast through void* to get rid of alignment warnings */
00232   hmem = (struct memp_malloc_helper*)(void*)((u8_t*)rmem - LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)));
00233 
00234   LWIP_ASSERT("hmem != NULL", (hmem != NULL));
00235   LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem)));
00236   LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX));
00237 
00238   MEM_STATS_DEC_USED(used, hmem->size);
00239 #if MEMP_OVERFLOW_CHECK
00240   {
00241      u16_t i;
00242      LWIP_ASSERT("MEM_USE_POOLS: invalid chunk size",
00243         hmem->size <= memp_pools[hmem->poolnr]->size);
00244      /* check that unused memory remained untouched (diff between requested size and selected pool's size) */
00245      for (i = hmem->size; i < memp_pools[hmem->poolnr]->size; i++) {
00246         u8_t data = *((u8_t*)rmem + i);
00247         LWIP_ASSERT("MEM_USE_POOLS: mem overflow detected", data == 0xcd);
00248      }
00249   }
00250 #endif /* MEMP_OVERFLOW_CHECK */
00251 
00252   /* and put it in the pool we saved earlier */
00253   memp_free(hmem->poolnr, hmem);
00254 }
00255 
00256 #else /* MEM_USE_POOLS */
00257 /* lwIP replacement for your libc malloc() */
00258 
00259 /**
00260  * The heap is made up as a list of structs of this type.
00261  * This does not have to be aligned since for getting its size,
00262  * we only use the macro SIZEOF_STRUCT_MEM, which automatically aligns.
00263  */
00264 struct mem {
00265   /** index (-> ram[next]) of the next struct */
00266   mem_size_t next;
00267   /** index (-> ram[prev]) of the previous struct */
00268   mem_size_t prev;
00269   /** 1: this area is used; 0: this area is unused */
00270   u8_t used;
00271 };
00272 
00273 /** All allocated blocks will be MIN_SIZE bytes big, at least!
00274  * MIN_SIZE can be overridden to suit your needs. Smaller values save space,
00275  * larger values could prevent too small blocks to fragment the RAM too much. */
00276 #ifndef MIN_SIZE
00277 #define MIN_SIZE             12
00278 #endif /* MIN_SIZE */
00279 /* some alignment macros: we define them here for better source code layout */
00280 #define MIN_SIZE_ALIGNED     LWIP_MEM_ALIGN_SIZE(MIN_SIZE)
00281 #define SIZEOF_STRUCT_MEM    LWIP_MEM_ALIGN_SIZE(sizeof(struct mem))
00282 #define MEM_SIZE_ALIGNED     LWIP_MEM_ALIGN_SIZE(MEM_SIZE)
00283 
00284 /** If you want to relocate the heap to external memory, simply define
00285  * LWIP_RAM_HEAP_POINTER as a void-pointer to that location.
00286  * If so, make sure the memory at that location is big enough (see below on
00287  * how that space is calculated). */
00288 #ifndef LWIP_RAM_HEAP_POINTER
00289 /** the heap. we need one struct mem at the end and some room for alignment */
00290 LWIP_DECLARE_MEMORY_ALIGNED(ram_heap, MEM_SIZE_ALIGNED + (2U*SIZEOF_STRUCT_MEM));
00291 #define LWIP_RAM_HEAP_POINTER ram_heap
00292 #endif /* LWIP_RAM_HEAP_POINTER */
00293 
00294 /** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */
00295 static u8_t *ram;
00296 /** the last entry, always unused! */
00297 static struct mem *ram_end;
00298 /** pointer to the lowest free block, this is used for faster search */
00299 static struct mem *lfree;
00300 
00301 /** concurrent access protection */
00302 #if !NO_SYS
00303 static sys_mutex_t mem_mutex;
00304 #endif
00305 
00306 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
00307 
00308 static volatile u8_t mem_free_count;
00309 
00310 /* Allow mem_free from other (e.g. interrupt) context */
00311 #define LWIP_MEM_FREE_DECL_PROTECT()  SYS_ARCH_DECL_PROTECT(lev_free)
00312 #define LWIP_MEM_FREE_PROTECT()       SYS_ARCH_PROTECT(lev_free)
00313 #define LWIP_MEM_FREE_UNPROTECT()     SYS_ARCH_UNPROTECT(lev_free)
00314 #define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc)
00315 #define LWIP_MEM_ALLOC_PROTECT()      SYS_ARCH_PROTECT(lev_alloc)
00316 #define LWIP_MEM_ALLOC_UNPROTECT()    SYS_ARCH_UNPROTECT(lev_alloc)
00317 
00318 #else /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
00319 
00320 /* Protect the heap only by using a semaphore */
00321 #define LWIP_MEM_FREE_DECL_PROTECT()
00322 #define LWIP_MEM_FREE_PROTECT()    sys_mutex_lock(&mem_mutex)
00323 #define LWIP_MEM_FREE_UNPROTECT()  sys_mutex_unlock(&mem_mutex)
00324 /* mem_malloc is protected using semaphore AND LWIP_MEM_ALLOC_PROTECT */
00325 #define LWIP_MEM_ALLOC_DECL_PROTECT()
00326 #define LWIP_MEM_ALLOC_PROTECT()
00327 #define LWIP_MEM_ALLOC_UNPROTECT()
00328 
00329 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
00330 
00331 
00332 /**
00333  * "Plug holes" by combining adjacent empty struct mems.
00334  * After this function is through, there should not exist
00335  * one empty struct mem pointing to another empty struct mem.
00336  *
00337  * @param mem this points to a struct mem which just has been freed
00338  * @internal this function is only called by mem_free() and mem_trim()
00339  *
00340  * This assumes access to the heap is protected by the calling function
00341  * already.
00342  */
00343 static void
00344 plug_holes(struct mem *mem)
00345 {
00346   struct mem *nmem;
00347   struct mem *pmem;
00348 
00349   LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram);
00350   LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end);
00351   LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0);
00352 
00353   /* plug hole forward */
00354   LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED);
00355 
00356   nmem = (struct mem *)(void *)&ram[mem->next];
00357   if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) {
00358     /* if mem->next is unused and not end of ram, combine mem and mem->next */
00359     if (lfree == nmem) {
00360       lfree = mem;
00361     }
00362     mem->next = nmem->next;
00363     ((struct mem *)(void *)&ram[nmem->next])->prev = (mem_size_t)((u8_t *)mem - ram);
00364   }
00365 
00366   /* plug hole backward */
00367   pmem = (struct mem *)(void *)&ram[mem->prev];
00368   if (pmem != mem && pmem->used == 0) {
00369     /* if mem->prev is unused, combine mem and mem->prev */
00370     if (lfree == mem) {
00371       lfree = pmem;
00372     }
00373     pmem->next = mem->next;
00374     ((struct mem *)(void *)&ram[mem->next])->prev = (mem_size_t)((u8_t *)pmem - ram);
00375   }
00376 }
00377 
00378 /**
00379  * Zero the heap and initialize start, end and lowest-free
00380  */
00381 void
00382 mem_init(void)
00383 {
00384   struct mem *mem;
00385 
00386   LWIP_ASSERT("Sanity check alignment",
00387     (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT-1)) == 0);
00388 
00389   /* align the heap */
00390   ram = (u8_t *)LWIP_MEM_ALIGN(LWIP_RAM_HEAP_POINTER);
00391   /* initialize the start of the heap */
00392   mem = (struct mem *)(void *)ram;
00393   mem->next = MEM_SIZE_ALIGNED;
00394   mem->prev = 0;
00395   mem->used = 0;
00396   /* initialize the end of the heap */
00397   ram_end = (struct mem *)(void *)&ram[MEM_SIZE_ALIGNED];
00398   ram_end->used = 1;
00399   ram_end->next = MEM_SIZE_ALIGNED;
00400   ram_end->prev = MEM_SIZE_ALIGNED;
00401 
00402   /* initialize the lowest-free pointer to the start of the heap */
00403   lfree = (struct mem *)(void *)ram;
00404 
00405   MEM_STATS_AVAIL(avail, MEM_SIZE_ALIGNED);
00406 
00407   if (sys_mutex_new(&mem_mutex) != ERR_OK) {
00408     LWIP_ASSERT("failed to create mem_mutex", 0);
00409   }
00410 }
00411 
00412 /**
00413  * Put a struct mem back on the heap
00414  *
00415  * @param rmem is the data portion of a struct mem as returned by a previous
00416  *             call to mem_malloc()
00417  */
00418 void
00419 mem_free(void *rmem)
00420 {
00421   struct mem *mem;
00422   LWIP_MEM_FREE_DECL_PROTECT();
00423 
00424   if (rmem == NULL) {
00425     LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("mem_free(p == NULL) was called.\n"));
00426     return;
00427   }
00428   LWIP_ASSERT("mem_free: sanity check alignment", (((mem_ptr_t)rmem) & (MEM_ALIGNMENT-1)) == 0);
00429 
00430   LWIP_ASSERT("mem_free: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
00431     (u8_t *)rmem < (u8_t *)ram_end);
00432 
00433   if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
00434     SYS_ARCH_DECL_PROTECT(lev);
00435     LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory\n"));
00436     /* protect mem stats from concurrent access */
00437     SYS_ARCH_PROTECT(lev);
00438     MEM_STATS_INC(illegal);
00439     SYS_ARCH_UNPROTECT(lev);
00440     return;
00441   }
00442   /* protect the heap from concurrent access */
00443   LWIP_MEM_FREE_PROTECT();
00444   /* Get the corresponding struct mem ... */
00445   /* cast through void* to get rid of alignment warnings */
00446   mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
00447   /* ... which has to be in a used state ... */
00448   LWIP_ASSERT("mem_free: mem->used", mem->used);
00449   /* ... and is now unused. */
00450   mem->used = 0;
00451 
00452   if (mem < lfree) {
00453     /* the newly freed struct is now the lowest */
00454     lfree = mem;
00455   }
00456 
00457   MEM_STATS_DEC_USED(used, mem->next - (mem_size_t)(((u8_t *)mem - ram)));
00458 
00459   /* finally, see if prev or next are free also */
00460   plug_holes(mem);
00461 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
00462   mem_free_count = 1;
00463 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
00464   LWIP_MEM_FREE_UNPROTECT();
00465 }
00466 
00467 /**
00468  * Shrink memory returned by mem_malloc().
00469  *
00470  * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked
00471  * @param newsize required size after shrinking (needs to be smaller than or
00472  *                equal to the previous size)
00473  * @return for compatibility reasons: is always == rmem, at the moment
00474  *         or NULL if newsize is > old size, in which case rmem is NOT touched
00475  *         or freed!
00476  */
00477 void *
00478 mem_trim(void *rmem, mem_size_t newsize)
00479 {
00480   mem_size_t size;
00481   mem_size_t ptr, ptr2;
00482   struct mem *mem, *mem2;
00483   /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */
00484   LWIP_MEM_FREE_DECL_PROTECT();
00485 
00486   /* Expand the size of the allocated memory region so that we can
00487      adjust for alignment. */
00488   newsize = LWIP_MEM_ALIGN_SIZE(newsize);
00489 
00490   if (newsize < MIN_SIZE_ALIGNED) {
00491     /* every data block must be at least MIN_SIZE_ALIGNED long */
00492     newsize = MIN_SIZE_ALIGNED;
00493   }
00494 
00495   if (newsize > MEM_SIZE_ALIGNED) {
00496     return NULL;
00497   }
00498 
00499   LWIP_ASSERT("mem_trim: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
00500    (u8_t *)rmem < (u8_t *)ram_end);
00501 
00502   if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
00503     SYS_ARCH_DECL_PROTECT(lev);
00504     LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_trim: illegal memory\n"));
00505     /* protect mem stats from concurrent access */
00506     SYS_ARCH_PROTECT(lev);
00507     MEM_STATS_INC(illegal);
00508     SYS_ARCH_UNPROTECT(lev);
00509     return rmem;
00510   }
00511   /* Get the corresponding struct mem ... */
00512   /* cast through void* to get rid of alignment warnings */
00513   mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
00514   /* ... and its offset pointer */
00515   ptr = (mem_size_t)((u8_t *)mem - ram);
00516 
00517   size = mem->next - ptr - SIZEOF_STRUCT_MEM;
00518   LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size);
00519   if (newsize > size) {
00520     /* not supported */
00521     return NULL;
00522   }
00523   if (newsize == size) {
00524     /* No change in size, simply return */
00525     return rmem;
00526   }
00527 
00528   /* protect the heap from concurrent access */
00529   LWIP_MEM_FREE_PROTECT();
00530 
00531   mem2 = (struct mem *)(void *)&ram[mem->next];
00532   if (mem2->used == 0) {
00533     /* The next struct is unused, we can simply move it at little */
00534     mem_size_t next;
00535     /* remember the old next pointer */
00536     next = mem2->next;
00537     /* create new struct mem which is moved directly after the shrinked mem */
00538     ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
00539     if (lfree == mem2) {
00540       lfree = (struct mem *)(void *)&ram[ptr2];
00541     }
00542     mem2 = (struct mem *)(void *)&ram[ptr2];
00543     mem2->used = 0;
00544     /* restore the next pointer */
00545     mem2->next = next;
00546     /* link it back to mem */
00547     mem2->prev = ptr;
00548     /* link mem to it */
00549     mem->next = ptr2;
00550     /* last thing to restore linked list: as we have moved mem2,
00551      * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not
00552      * the end of the heap */
00553     if (mem2->next != MEM_SIZE_ALIGNED) {
00554       ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
00555     }
00556     MEM_STATS_DEC_USED(used, (size - newsize));
00557     /* no need to plug holes, we've already done that */
00558   } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) {
00559     /* Next struct is used but there's room for another struct mem with
00560      * at least MIN_SIZE_ALIGNED of data.
00561      * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem
00562      * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED').
00563      * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
00564      *       region that couldn't hold data, but when mem->next gets freed,
00565      *       the 2 regions would be combined, resulting in more free memory */
00566     ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
00567     mem2 = (struct mem *)(void *)&ram[ptr2];
00568     if (mem2 < lfree) {
00569       lfree = mem2;
00570     }
00571     mem2->used = 0;
00572     mem2->next = mem->next;
00573     mem2->prev = ptr;
00574     mem->next = ptr2;
00575     if (mem2->next != MEM_SIZE_ALIGNED) {
00576       ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
00577     }
00578     MEM_STATS_DEC_USED(used, (size - newsize));
00579     /* the original mem->next is used, so no need to plug holes! */
00580   }
00581   /* else {
00582     next struct mem is used but size between mem and mem2 is not big enough
00583     to create another struct mem
00584     -> don't do anyhting.
00585     -> the remaining space stays unused since it is too small
00586   } */
00587 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
00588   mem_free_count = 1;
00589 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
00590   LWIP_MEM_FREE_UNPROTECT();
00591   return rmem;
00592 }
00593 
00594 /**
00595  * Allocate a block of memory with a minimum of 'size' bytes.
00596  *
00597  * @param size is the minimum size of the requested block in bytes.
00598  * @return pointer to allocated memory or NULL if no free memory was found.
00599  *
00600  * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT).
00601  */
00602 void *
00603 mem_malloc(mem_size_t size)
00604 {
00605   mem_size_t ptr, ptr2;
00606   struct mem *mem, *mem2;
00607 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
00608   u8_t local_mem_free_count = 0;
00609 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
00610   LWIP_MEM_ALLOC_DECL_PROTECT();
00611 
00612   if (size == 0) {
00613     return NULL;
00614   }
00615 
00616   /* Expand the size of the allocated memory region so that we can
00617      adjust for alignment. */
00618   size = LWIP_MEM_ALIGN_SIZE(size);
00619 
00620   if (size < MIN_SIZE_ALIGNED) {
00621     /* every data block must be at least MIN_SIZE_ALIGNED long */
00622     size = MIN_SIZE_ALIGNED;
00623   }
00624 
00625   if (size > MEM_SIZE_ALIGNED) {
00626     return NULL;
00627   }
00628 
00629   /* protect the heap from concurrent access */
00630   sys_mutex_lock(&mem_mutex);
00631   LWIP_MEM_ALLOC_PROTECT();
00632 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
00633   /* run as long as a mem_free disturbed mem_malloc or mem_trim */
00634   do {
00635     local_mem_free_count = 0;
00636 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
00637 
00638     /* Scan through the heap searching for a free block that is big enough,
00639      * beginning with the lowest free block.
00640      */
00641     for (ptr = (mem_size_t)((u8_t *)lfree - ram); ptr < MEM_SIZE_ALIGNED - size;
00642          ptr = ((struct mem *)(void *)&ram[ptr])->next) {
00643       mem = (struct mem *)(void *)&ram[ptr];
00644 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
00645       mem_free_count = 0;
00646       LWIP_MEM_ALLOC_UNPROTECT();
00647       /* allow mem_free or mem_trim to run */
00648       LWIP_MEM_ALLOC_PROTECT();
00649       if (mem_free_count != 0) {
00650         /* If mem_free or mem_trim have run, we have to restart since they
00651            could have altered our current struct mem. */
00652         local_mem_free_count = 1;
00653         break;
00654       }
00655 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
00656 
00657       if ((!mem->used) &&
00658           (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) {
00659         /* mem is not used and at least perfect fit is possible:
00660          * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
00661 
00662         if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) {
00663           /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing
00664            * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
00665            * -> split large block, create empty remainder,
00666            * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
00667            * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
00668            * struct mem would fit in but no data between mem2 and mem2->next
00669            * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
00670            *       region that couldn't hold data, but when mem->next gets freed,
00671            *       the 2 regions would be combined, resulting in more free memory
00672            */
00673           ptr2 = ptr + SIZEOF_STRUCT_MEM + size;
00674           /* create mem2 struct */
00675           mem2 = (struct mem *)(void *)&ram[ptr2];
00676           mem2->used = 0;
00677           mem2->next = mem->next;
00678           mem2->prev = ptr;
00679           /* and insert it between mem and mem->next */
00680           mem->next = ptr2;
00681           mem->used = 1;
00682 
00683           if (mem2->next != MEM_SIZE_ALIGNED) {
00684             ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
00685           }
00686           MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM));
00687         } else {
00688           /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
00689            * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
00690            * take care of this).
00691            * -> near fit or exact fit: do not split, no mem2 creation
00692            * also can't move mem->next directly behind mem, since mem->next
00693            * will always be used at this point!
00694            */
00695           mem->used = 1;
00696           MEM_STATS_INC_USED(used, mem->next - (mem_size_t)((u8_t *)mem - ram));
00697         }
00698 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
00699 mem_malloc_adjust_lfree:
00700 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
00701         if (mem == lfree) {
00702           struct mem *cur = lfree;
00703           /* Find next free block after mem and update lowest free pointer */
00704           while (cur->used && cur != ram_end) {
00705 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
00706             mem_free_count = 0;
00707             LWIP_MEM_ALLOC_UNPROTECT();
00708             /* prevent high interrupt latency... */
00709             LWIP_MEM_ALLOC_PROTECT();
00710             if (mem_free_count != 0) {
00711               /* If mem_free or mem_trim have run, we have to restart since they
00712                  could have altered our current struct mem or lfree. */
00713               goto mem_malloc_adjust_lfree;
00714             }
00715 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
00716             cur = (struct mem *)(void *)&ram[cur->next];
00717           }
00718           lfree = cur;
00719           LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used)));
00720         }
00721         LWIP_MEM_ALLOC_UNPROTECT();
00722         sys_mutex_unlock(&mem_mutex);
00723         LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.",
00724          (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end);
00725         LWIP_ASSERT("mem_malloc: allocated memory properly aligned.",
00726          ((mem_ptr_t)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0);
00727         LWIP_ASSERT("mem_malloc: sanity check alignment",
00728           (((mem_ptr_t)mem) & (MEM_ALIGNMENT-1)) == 0);
00729 
00730         return (u8_t *)mem + SIZEOF_STRUCT_MEM;
00731       }
00732     }
00733 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
00734     /* if we got interrupted by a mem_free, try again */
00735   } while (local_mem_free_count != 0);
00736 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
00737   LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size));
00738   MEM_STATS_INC(err);
00739   LWIP_MEM_ALLOC_UNPROTECT();
00740   sys_mutex_unlock(&mem_mutex);
00741   return NULL;
00742 }
00743 
00744 #endif /* MEM_USE_POOLS */
00745 
00746 #if MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS)
00747 void *
00748 mem_calloc(mem_size_t count, mem_size_t size)
00749 {
00750   return mem_clib_calloc(count, size);
00751 }
00752 
00753 #else /* MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) */
00754 /**
00755  * Contiguously allocates enough space for count objects that are size bytes
00756  * of memory each and returns a pointer to the allocated memory.
00757  *
00758  * The allocated memory is filled with bytes of value zero.
00759  *
00760  * @param count number of objects to allocate
00761  * @param size size of the objects to allocate
00762  * @return pointer to allocated memory / NULL pointer if there is an error
00763  */
00764 void *
00765 mem_calloc(mem_size_t count, mem_size_t size)
00766 {
00767   void *p;
00768 
00769   /* allocate 'count' objects of size 'size' */
00770   p = mem_malloc(count * size);
00771   if (p) {
00772     /* zero the memory */
00773     memset(p, 0, (size_t)count * (size_t)size);
00774   }
00775   return p;
00776 }
00777 #endif /* MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) */