Official mbed lwIP library (version 1.4.0)

Dependents:   LwIPNetworking NetServicesMin EthernetInterface EthernetInterface_RSF ... more

Legacy Networking Libraries

This is an mbed 2 networking library. For mbed OS 5, lwip has been integrated with built-in networking interfaces. The networking libraries have been revised to better support additional network stacks and thread safety here.

This library is based on the code of lwIP v1.4.0

Copyright (c) 2001, 2002 Swedish Institute of Computer Science.
All rights reserved. 

Redistribution and use in source and binary forms, with or without modification, 
are permitted provided that the following conditions are met:

1. Redistributions of source code must retain the above copyright notice,
   this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
   this list of conditions and the following disclaimer in the documentation
   and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
   derived from this software without specific prior written permission. 

THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 
SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY 
OF SUCH DAMAGE.
Committer:
mbed_official
Date:
Fri Jun 22 09:25:39 2012 +0000
Revision:
0:51ac1d130fd4
Child:
2:fcd6ac34b3f8
Initial import from lwip-1.4.0: http://download.savannah.gnu.org/releases/lwip/lwip-1.4.0.zip

Who changed what in which revision?

UserRevisionLine numberNew contents of line
mbed_official 0:51ac1d130fd4 1 /**
mbed_official 0:51ac1d130fd4 2 * @file
mbed_official 0:51ac1d130fd4 3 * Dynamic memory manager
mbed_official 0:51ac1d130fd4 4 *
mbed_official 0:51ac1d130fd4 5 * This is a lightweight replacement for the standard C library malloc().
mbed_official 0:51ac1d130fd4 6 *
mbed_official 0:51ac1d130fd4 7 * If you want to use the standard C library malloc() instead, define
mbed_official 0:51ac1d130fd4 8 * MEM_LIBC_MALLOC to 1 in your lwipopts.h
mbed_official 0:51ac1d130fd4 9 *
mbed_official 0:51ac1d130fd4 10 * To let mem_malloc() use pools (prevents fragmentation and is much faster than
mbed_official 0:51ac1d130fd4 11 * a heap but might waste some memory), define MEM_USE_POOLS to 1, define
mbed_official 0:51ac1d130fd4 12 * MEM_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list
mbed_official 0:51ac1d130fd4 13 * of pools like this (more pools can be added between _START and _END):
mbed_official 0:51ac1d130fd4 14 *
mbed_official 0:51ac1d130fd4 15 * Define three pools with sizes 256, 512, and 1512 bytes
mbed_official 0:51ac1d130fd4 16 * LWIP_MALLOC_MEMPOOL_START
mbed_official 0:51ac1d130fd4 17 * LWIP_MALLOC_MEMPOOL(20, 256)
mbed_official 0:51ac1d130fd4 18 * LWIP_MALLOC_MEMPOOL(10, 512)
mbed_official 0:51ac1d130fd4 19 * LWIP_MALLOC_MEMPOOL(5, 1512)
mbed_official 0:51ac1d130fd4 20 * LWIP_MALLOC_MEMPOOL_END
mbed_official 0:51ac1d130fd4 21 */
mbed_official 0:51ac1d130fd4 22
mbed_official 0:51ac1d130fd4 23 /*
mbed_official 0:51ac1d130fd4 24 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
mbed_official 0:51ac1d130fd4 25 * All rights reserved.
mbed_official 0:51ac1d130fd4 26 *
mbed_official 0:51ac1d130fd4 27 * Redistribution and use in source and binary forms, with or without modification,
mbed_official 0:51ac1d130fd4 28 * are permitted provided that the following conditions are met:
mbed_official 0:51ac1d130fd4 29 *
mbed_official 0:51ac1d130fd4 30 * 1. Redistributions of source code must retain the above copyright notice,
mbed_official 0:51ac1d130fd4 31 * this list of conditions and the following disclaimer.
mbed_official 0:51ac1d130fd4 32 * 2. Redistributions in binary form must reproduce the above copyright notice,
mbed_official 0:51ac1d130fd4 33 * this list of conditions and the following disclaimer in the documentation
mbed_official 0:51ac1d130fd4 34 * and/or other materials provided with the distribution.
mbed_official 0:51ac1d130fd4 35 * 3. The name of the author may not be used to endorse or promote products
mbed_official 0:51ac1d130fd4 36 * derived from this software without specific prior written permission.
mbed_official 0:51ac1d130fd4 37 *
mbed_official 0:51ac1d130fd4 38 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
mbed_official 0:51ac1d130fd4 39 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
mbed_official 0:51ac1d130fd4 40 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
mbed_official 0:51ac1d130fd4 41 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
mbed_official 0:51ac1d130fd4 42 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
mbed_official 0:51ac1d130fd4 43 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
mbed_official 0:51ac1d130fd4 44 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
mbed_official 0:51ac1d130fd4 45 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
mbed_official 0:51ac1d130fd4 46 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
mbed_official 0:51ac1d130fd4 47 * OF SUCH DAMAGE.
mbed_official 0:51ac1d130fd4 48 *
mbed_official 0:51ac1d130fd4 49 * This file is part of the lwIP TCP/IP stack.
mbed_official 0:51ac1d130fd4 50 *
mbed_official 0:51ac1d130fd4 51 * Author: Adam Dunkels <adam@sics.se>
mbed_official 0:51ac1d130fd4 52 * Simon Goldschmidt
mbed_official 0:51ac1d130fd4 53 *
mbed_official 0:51ac1d130fd4 54 */
mbed_official 0:51ac1d130fd4 55
mbed_official 0:51ac1d130fd4 56 #include "lwip/opt.h"
mbed_official 0:51ac1d130fd4 57
mbed_official 0:51ac1d130fd4 58 #if !MEM_LIBC_MALLOC /* don't build if not configured for use in lwipopts.h */
mbed_official 0:51ac1d130fd4 59
mbed_official 0:51ac1d130fd4 60 #include "lwip/def.h"
mbed_official 0:51ac1d130fd4 61 #include "lwip/mem.h"
mbed_official 0:51ac1d130fd4 62 #include "lwip/sys.h"
mbed_official 0:51ac1d130fd4 63 #include "lwip/stats.h"
mbed_official 0:51ac1d130fd4 64 #include "lwip/err.h"
mbed_official 0:51ac1d130fd4 65
mbed_official 0:51ac1d130fd4 66 #include <string.h>
mbed_official 0:51ac1d130fd4 67
mbed_official 0:51ac1d130fd4 68 #if MEM_USE_POOLS
mbed_official 0:51ac1d130fd4 69 /* lwIP head implemented with different sized pools */
mbed_official 0:51ac1d130fd4 70
mbed_official 0:51ac1d130fd4 71 /**
mbed_official 0:51ac1d130fd4 72 * Allocate memory: determine the smallest pool that is big enough
mbed_official 0:51ac1d130fd4 73 * to contain an element of 'size' and get an element from that pool.
mbed_official 0:51ac1d130fd4 74 *
mbed_official 0:51ac1d130fd4 75 * @param size the size in bytes of the memory needed
mbed_official 0:51ac1d130fd4 76 * @return a pointer to the allocated memory or NULL if the pool is empty
mbed_official 0:51ac1d130fd4 77 */
mbed_official 0:51ac1d130fd4 78 void *
mbed_official 0:51ac1d130fd4 79 mem_malloc(mem_size_t size)
mbed_official 0:51ac1d130fd4 80 {
mbed_official 0:51ac1d130fd4 81 struct memp_malloc_helper *element;
mbed_official 0:51ac1d130fd4 82 memp_t poolnr;
mbed_official 0:51ac1d130fd4 83 mem_size_t required_size = size + sizeof(struct memp_malloc_helper);
mbed_official 0:51ac1d130fd4 84
mbed_official 0:51ac1d130fd4 85 for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr = (memp_t)(poolnr + 1)) {
mbed_official 0:51ac1d130fd4 86 #if MEM_USE_POOLS_TRY_BIGGER_POOL
mbed_official 0:51ac1d130fd4 87 again:
mbed_official 0:51ac1d130fd4 88 #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
mbed_official 0:51ac1d130fd4 89 /* is this pool big enough to hold an element of the required size
mbed_official 0:51ac1d130fd4 90 plus a struct memp_malloc_helper that saves the pool this element came from? */
mbed_official 0:51ac1d130fd4 91 if (required_size <= memp_sizes[poolnr]) {
mbed_official 0:51ac1d130fd4 92 break;
mbed_official 0:51ac1d130fd4 93 }
mbed_official 0:51ac1d130fd4 94 }
mbed_official 0:51ac1d130fd4 95 if (poolnr > MEMP_POOL_LAST) {
mbed_official 0:51ac1d130fd4 96 LWIP_ASSERT("mem_malloc(): no pool is that big!", 0);
mbed_official 0:51ac1d130fd4 97 return NULL;
mbed_official 0:51ac1d130fd4 98 }
mbed_official 0:51ac1d130fd4 99 element = (struct memp_malloc_helper*)memp_malloc(poolnr);
mbed_official 0:51ac1d130fd4 100 if (element == NULL) {
mbed_official 0:51ac1d130fd4 101 /* No need to DEBUGF or ASSERT: This error is already
mbed_official 0:51ac1d130fd4 102 taken care of in memp.c */
mbed_official 0:51ac1d130fd4 103 #if MEM_USE_POOLS_TRY_BIGGER_POOL
mbed_official 0:51ac1d130fd4 104 /** Try a bigger pool if this one is empty! */
mbed_official 0:51ac1d130fd4 105 if (poolnr < MEMP_POOL_LAST) {
mbed_official 0:51ac1d130fd4 106 poolnr++;
mbed_official 0:51ac1d130fd4 107 goto again;
mbed_official 0:51ac1d130fd4 108 }
mbed_official 0:51ac1d130fd4 109 #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
mbed_official 0:51ac1d130fd4 110 return NULL;
mbed_official 0:51ac1d130fd4 111 }
mbed_official 0:51ac1d130fd4 112
mbed_official 0:51ac1d130fd4 113 /* save the pool number this element came from */
mbed_official 0:51ac1d130fd4 114 element->poolnr = poolnr;
mbed_official 0:51ac1d130fd4 115 /* and return a pointer to the memory directly after the struct memp_malloc_helper */
mbed_official 0:51ac1d130fd4 116 element++;
mbed_official 0:51ac1d130fd4 117
mbed_official 0:51ac1d130fd4 118 return element;
mbed_official 0:51ac1d130fd4 119 }
mbed_official 0:51ac1d130fd4 120
mbed_official 0:51ac1d130fd4 121 /**
mbed_official 0:51ac1d130fd4 122 * Free memory previously allocated by mem_malloc. Loads the pool number
mbed_official 0:51ac1d130fd4 123 * and calls memp_free with that pool number to put the element back into
mbed_official 0:51ac1d130fd4 124 * its pool
mbed_official 0:51ac1d130fd4 125 *
mbed_official 0:51ac1d130fd4 126 * @param rmem the memory element to free
mbed_official 0:51ac1d130fd4 127 */
mbed_official 0:51ac1d130fd4 128 void
mbed_official 0:51ac1d130fd4 129 mem_free(void *rmem)
mbed_official 0:51ac1d130fd4 130 {
mbed_official 0:51ac1d130fd4 131 struct memp_malloc_helper *hmem = (struct memp_malloc_helper*)rmem;
mbed_official 0:51ac1d130fd4 132
mbed_official 0:51ac1d130fd4 133 LWIP_ASSERT("rmem != NULL", (rmem != NULL));
mbed_official 0:51ac1d130fd4 134 LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
mbed_official 0:51ac1d130fd4 135
mbed_official 0:51ac1d130fd4 136 /* get the original struct memp_malloc_helper */
mbed_official 0:51ac1d130fd4 137 hmem--;
mbed_official 0:51ac1d130fd4 138
mbed_official 0:51ac1d130fd4 139 LWIP_ASSERT("hmem != NULL", (hmem != NULL));
mbed_official 0:51ac1d130fd4 140 LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem)));
mbed_official 0:51ac1d130fd4 141 LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX));
mbed_official 0:51ac1d130fd4 142
mbed_official 0:51ac1d130fd4 143 /* and put it in the pool we saved earlier */
mbed_official 0:51ac1d130fd4 144 memp_free(hmem->poolnr, hmem);
mbed_official 0:51ac1d130fd4 145 }
mbed_official 0:51ac1d130fd4 146
mbed_official 0:51ac1d130fd4 147 #else /* MEM_USE_POOLS */
mbed_official 0:51ac1d130fd4 148 /* lwIP replacement for your libc malloc() */
mbed_official 0:51ac1d130fd4 149
mbed_official 0:51ac1d130fd4 150 /**
mbed_official 0:51ac1d130fd4 151 * The heap is made up as a list of structs of this type.
mbed_official 0:51ac1d130fd4 152 * This does not have to be aligned since for getting its size,
mbed_official 0:51ac1d130fd4 153 * we only use the macro SIZEOF_STRUCT_MEM, which automatically alignes.
mbed_official 0:51ac1d130fd4 154 */
mbed_official 0:51ac1d130fd4 155 struct mem {
mbed_official 0:51ac1d130fd4 156 /** index (-> ram[next]) of the next struct */
mbed_official 0:51ac1d130fd4 157 mem_size_t next;
mbed_official 0:51ac1d130fd4 158 /** index (-> ram[prev]) of the previous struct */
mbed_official 0:51ac1d130fd4 159 mem_size_t prev;
mbed_official 0:51ac1d130fd4 160 /** 1: this area is used; 0: this area is unused */
mbed_official 0:51ac1d130fd4 161 u8_t used;
mbed_official 0:51ac1d130fd4 162 };
mbed_official 0:51ac1d130fd4 163
mbed_official 0:51ac1d130fd4 164 /** All allocated blocks will be MIN_SIZE bytes big, at least!
mbed_official 0:51ac1d130fd4 165 * MIN_SIZE can be overridden to suit your needs. Smaller values save space,
mbed_official 0:51ac1d130fd4 166 * larger values could prevent too small blocks to fragment the RAM too much. */
mbed_official 0:51ac1d130fd4 167 #ifndef MIN_SIZE
mbed_official 0:51ac1d130fd4 168 #define MIN_SIZE 12
mbed_official 0:51ac1d130fd4 169 #endif /* MIN_SIZE */
mbed_official 0:51ac1d130fd4 170 /* some alignment macros: we define them here for better source code layout */
mbed_official 0:51ac1d130fd4 171 #define MIN_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MIN_SIZE)
mbed_official 0:51ac1d130fd4 172 #define SIZEOF_STRUCT_MEM LWIP_MEM_ALIGN_SIZE(sizeof(struct mem))
mbed_official 0:51ac1d130fd4 173 #define MEM_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SIZE)
mbed_official 0:51ac1d130fd4 174
mbed_official 0:51ac1d130fd4 175 /** If you want to relocate the heap to external memory, simply define
mbed_official 0:51ac1d130fd4 176 * LWIP_RAM_HEAP_POINTER as a void-pointer to that location.
mbed_official 0:51ac1d130fd4 177 * If so, make sure the memory at that location is big enough (see below on
mbed_official 0:51ac1d130fd4 178 * how that space is calculated). */
mbed_official 0:51ac1d130fd4 179 #ifndef LWIP_RAM_HEAP_POINTER
mbed_official 0:51ac1d130fd4 180 /** the heap. we need one struct mem at the end and some room for alignment */
mbed_official 0:51ac1d130fd4 181 u8_t ram_heap[MEM_SIZE_ALIGNED + (2*SIZEOF_STRUCT_MEM) + MEM_ALIGNMENT];
mbed_official 0:51ac1d130fd4 182 #define LWIP_RAM_HEAP_POINTER ram_heap
mbed_official 0:51ac1d130fd4 183 #endif /* LWIP_RAM_HEAP_POINTER */
mbed_official 0:51ac1d130fd4 184
mbed_official 0:51ac1d130fd4 185 /** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */
mbed_official 0:51ac1d130fd4 186 static u8_t *ram;
mbed_official 0:51ac1d130fd4 187 /** the last entry, always unused! */
mbed_official 0:51ac1d130fd4 188 static struct mem *ram_end;
mbed_official 0:51ac1d130fd4 189 /** pointer to the lowest free block, this is used for faster search */
mbed_official 0:51ac1d130fd4 190 static struct mem *lfree;
mbed_official 0:51ac1d130fd4 191
mbed_official 0:51ac1d130fd4 192 /** concurrent access protection */
mbed_official 0:51ac1d130fd4 193 static sys_mutex_t mem_mutex;
mbed_official 0:51ac1d130fd4 194
mbed_official 0:51ac1d130fd4 195 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
mbed_official 0:51ac1d130fd4 196
mbed_official 0:51ac1d130fd4 197 static volatile u8_t mem_free_count;
mbed_official 0:51ac1d130fd4 198
mbed_official 0:51ac1d130fd4 199 /* Allow mem_free from other (e.g. interrupt) context */
mbed_official 0:51ac1d130fd4 200 #define LWIP_MEM_FREE_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_free)
mbed_official 0:51ac1d130fd4 201 #define LWIP_MEM_FREE_PROTECT() SYS_ARCH_PROTECT(lev_free)
mbed_official 0:51ac1d130fd4 202 #define LWIP_MEM_FREE_UNPROTECT() SYS_ARCH_UNPROTECT(lev_free)
mbed_official 0:51ac1d130fd4 203 #define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc)
mbed_official 0:51ac1d130fd4 204 #define LWIP_MEM_ALLOC_PROTECT() SYS_ARCH_PROTECT(lev_alloc)
mbed_official 0:51ac1d130fd4 205 #define LWIP_MEM_ALLOC_UNPROTECT() SYS_ARCH_UNPROTECT(lev_alloc)
mbed_official 0:51ac1d130fd4 206
mbed_official 0:51ac1d130fd4 207 #else /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
mbed_official 0:51ac1d130fd4 208
mbed_official 0:51ac1d130fd4 209 /* Protect the heap only by using a semaphore */
mbed_official 0:51ac1d130fd4 210 #define LWIP_MEM_FREE_DECL_PROTECT()
mbed_official 0:51ac1d130fd4 211 #define LWIP_MEM_FREE_PROTECT() sys_mutex_lock(&mem_mutex)
mbed_official 0:51ac1d130fd4 212 #define LWIP_MEM_FREE_UNPROTECT() sys_mutex_unlock(&mem_mutex)
mbed_official 0:51ac1d130fd4 213 /* mem_malloc is protected using semaphore AND LWIP_MEM_ALLOC_PROTECT */
mbed_official 0:51ac1d130fd4 214 #define LWIP_MEM_ALLOC_DECL_PROTECT()
mbed_official 0:51ac1d130fd4 215 #define LWIP_MEM_ALLOC_PROTECT()
mbed_official 0:51ac1d130fd4 216 #define LWIP_MEM_ALLOC_UNPROTECT()
mbed_official 0:51ac1d130fd4 217
mbed_official 0:51ac1d130fd4 218 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
mbed_official 0:51ac1d130fd4 219
mbed_official 0:51ac1d130fd4 220
mbed_official 0:51ac1d130fd4 221 /**
mbed_official 0:51ac1d130fd4 222 * "Plug holes" by combining adjacent empty struct mems.
mbed_official 0:51ac1d130fd4 223 * After this function is through, there should not exist
mbed_official 0:51ac1d130fd4 224 * one empty struct mem pointing to another empty struct mem.
mbed_official 0:51ac1d130fd4 225 *
mbed_official 0:51ac1d130fd4 226 * @param mem this points to a struct mem which just has been freed
mbed_official 0:51ac1d130fd4 227 * @internal this function is only called by mem_free() and mem_trim()
mbed_official 0:51ac1d130fd4 228 *
mbed_official 0:51ac1d130fd4 229 * This assumes access to the heap is protected by the calling function
mbed_official 0:51ac1d130fd4 230 * already.
mbed_official 0:51ac1d130fd4 231 */
mbed_official 0:51ac1d130fd4 232 static void
mbed_official 0:51ac1d130fd4 233 plug_holes(struct mem *mem)
mbed_official 0:51ac1d130fd4 234 {
mbed_official 0:51ac1d130fd4 235 struct mem *nmem;
mbed_official 0:51ac1d130fd4 236 struct mem *pmem;
mbed_official 0:51ac1d130fd4 237
mbed_official 0:51ac1d130fd4 238 LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram);
mbed_official 0:51ac1d130fd4 239 LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end);
mbed_official 0:51ac1d130fd4 240 LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0);
mbed_official 0:51ac1d130fd4 241
mbed_official 0:51ac1d130fd4 242 /* plug hole forward */
mbed_official 0:51ac1d130fd4 243 LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED);
mbed_official 0:51ac1d130fd4 244
mbed_official 0:51ac1d130fd4 245 nmem = (struct mem *)(void *)&ram[mem->next];
mbed_official 0:51ac1d130fd4 246 if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) {
mbed_official 0:51ac1d130fd4 247 /* if mem->next is unused and not end of ram, combine mem and mem->next */
mbed_official 0:51ac1d130fd4 248 if (lfree == nmem) {
mbed_official 0:51ac1d130fd4 249 lfree = mem;
mbed_official 0:51ac1d130fd4 250 }
mbed_official 0:51ac1d130fd4 251 mem->next = nmem->next;
mbed_official 0:51ac1d130fd4 252 ((struct mem *)(void *)&ram[nmem->next])->prev = (mem_size_t)((u8_t *)mem - ram);
mbed_official 0:51ac1d130fd4 253 }
mbed_official 0:51ac1d130fd4 254
mbed_official 0:51ac1d130fd4 255 /* plug hole backward */
mbed_official 0:51ac1d130fd4 256 pmem = (struct mem *)(void *)&ram[mem->prev];
mbed_official 0:51ac1d130fd4 257 if (pmem != mem && pmem->used == 0) {
mbed_official 0:51ac1d130fd4 258 /* if mem->prev is unused, combine mem and mem->prev */
mbed_official 0:51ac1d130fd4 259 if (lfree == mem) {
mbed_official 0:51ac1d130fd4 260 lfree = pmem;
mbed_official 0:51ac1d130fd4 261 }
mbed_official 0:51ac1d130fd4 262 pmem->next = mem->next;
mbed_official 0:51ac1d130fd4 263 ((struct mem *)(void *)&ram[mem->next])->prev = (mem_size_t)((u8_t *)pmem - ram);
mbed_official 0:51ac1d130fd4 264 }
mbed_official 0:51ac1d130fd4 265 }
mbed_official 0:51ac1d130fd4 266
mbed_official 0:51ac1d130fd4 267 /**
mbed_official 0:51ac1d130fd4 268 * Zero the heap and initialize start, end and lowest-free
mbed_official 0:51ac1d130fd4 269 */
mbed_official 0:51ac1d130fd4 270 void
mbed_official 0:51ac1d130fd4 271 mem_init(void)
mbed_official 0:51ac1d130fd4 272 {
mbed_official 0:51ac1d130fd4 273 struct mem *mem;
mbed_official 0:51ac1d130fd4 274
mbed_official 0:51ac1d130fd4 275 LWIP_ASSERT("Sanity check alignment",
mbed_official 0:51ac1d130fd4 276 (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT-1)) == 0);
mbed_official 0:51ac1d130fd4 277
mbed_official 0:51ac1d130fd4 278 /* align the heap */
mbed_official 0:51ac1d130fd4 279 ram = (u8_t *)LWIP_MEM_ALIGN(LWIP_RAM_HEAP_POINTER);
mbed_official 0:51ac1d130fd4 280 /* initialize the start of the heap */
mbed_official 0:51ac1d130fd4 281 mem = (struct mem *)(void *)ram;
mbed_official 0:51ac1d130fd4 282 mem->next = MEM_SIZE_ALIGNED;
mbed_official 0:51ac1d130fd4 283 mem->prev = 0;
mbed_official 0:51ac1d130fd4 284 mem->used = 0;
mbed_official 0:51ac1d130fd4 285 /* initialize the end of the heap */
mbed_official 0:51ac1d130fd4 286 ram_end = (struct mem *)(void *)&ram[MEM_SIZE_ALIGNED];
mbed_official 0:51ac1d130fd4 287 ram_end->used = 1;
mbed_official 0:51ac1d130fd4 288 ram_end->next = MEM_SIZE_ALIGNED;
mbed_official 0:51ac1d130fd4 289 ram_end->prev = MEM_SIZE_ALIGNED;
mbed_official 0:51ac1d130fd4 290
mbed_official 0:51ac1d130fd4 291 /* initialize the lowest-free pointer to the start of the heap */
mbed_official 0:51ac1d130fd4 292 lfree = (struct mem *)(void *)ram;
mbed_official 0:51ac1d130fd4 293
mbed_official 0:51ac1d130fd4 294 MEM_STATS_AVAIL(avail, MEM_SIZE_ALIGNED);
mbed_official 0:51ac1d130fd4 295
mbed_official 0:51ac1d130fd4 296 if(sys_mutex_new(&mem_mutex) != ERR_OK) {
mbed_official 0:51ac1d130fd4 297 LWIP_ASSERT("failed to create mem_mutex", 0);
mbed_official 0:51ac1d130fd4 298 }
mbed_official 0:51ac1d130fd4 299 }
mbed_official 0:51ac1d130fd4 300
mbed_official 0:51ac1d130fd4 301 /**
mbed_official 0:51ac1d130fd4 302 * Put a struct mem back on the heap
mbed_official 0:51ac1d130fd4 303 *
mbed_official 0:51ac1d130fd4 304 * @param rmem is the data portion of a struct mem as returned by a previous
mbed_official 0:51ac1d130fd4 305 * call to mem_malloc()
mbed_official 0:51ac1d130fd4 306 */
mbed_official 0:51ac1d130fd4 307 void
mbed_official 0:51ac1d130fd4 308 mem_free(void *rmem)
mbed_official 0:51ac1d130fd4 309 {
mbed_official 0:51ac1d130fd4 310 struct mem *mem;
mbed_official 0:51ac1d130fd4 311 LWIP_MEM_FREE_DECL_PROTECT();
mbed_official 0:51ac1d130fd4 312
mbed_official 0:51ac1d130fd4 313 if (rmem == NULL) {
mbed_official 0:51ac1d130fd4 314 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("mem_free(p == NULL) was called.\n"));
mbed_official 0:51ac1d130fd4 315 return;
mbed_official 0:51ac1d130fd4 316 }
mbed_official 0:51ac1d130fd4 317 LWIP_ASSERT("mem_free: sanity check alignment", (((mem_ptr_t)rmem) & (MEM_ALIGNMENT-1)) == 0);
mbed_official 0:51ac1d130fd4 318
mbed_official 0:51ac1d130fd4 319 LWIP_ASSERT("mem_free: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
mbed_official 0:51ac1d130fd4 320 (u8_t *)rmem < (u8_t *)ram_end);
mbed_official 0:51ac1d130fd4 321
mbed_official 0:51ac1d130fd4 322 if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
mbed_official 0:51ac1d130fd4 323 SYS_ARCH_DECL_PROTECT(lev);
mbed_official 0:51ac1d130fd4 324 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory\n"));
mbed_official 0:51ac1d130fd4 325 /* protect mem stats from concurrent access */
mbed_official 0:51ac1d130fd4 326 SYS_ARCH_PROTECT(lev);
mbed_official 0:51ac1d130fd4 327 MEM_STATS_INC(illegal);
mbed_official 0:51ac1d130fd4 328 SYS_ARCH_UNPROTECT(lev);
mbed_official 0:51ac1d130fd4 329 return;
mbed_official 0:51ac1d130fd4 330 }
mbed_official 0:51ac1d130fd4 331 /* protect the heap from concurrent access */
mbed_official 0:51ac1d130fd4 332 LWIP_MEM_FREE_PROTECT();
mbed_official 0:51ac1d130fd4 333 /* Get the corresponding struct mem ... */
mbed_official 0:51ac1d130fd4 334 mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
mbed_official 0:51ac1d130fd4 335 /* ... which has to be in a used state ... */
mbed_official 0:51ac1d130fd4 336 LWIP_ASSERT("mem_free: mem->used", mem->used);
mbed_official 0:51ac1d130fd4 337 /* ... and is now unused. */
mbed_official 0:51ac1d130fd4 338 mem->used = 0;
mbed_official 0:51ac1d130fd4 339
mbed_official 0:51ac1d130fd4 340 if (mem < lfree) {
mbed_official 0:51ac1d130fd4 341 /* the newly freed struct is now the lowest */
mbed_official 0:51ac1d130fd4 342 lfree = mem;
mbed_official 0:51ac1d130fd4 343 }
mbed_official 0:51ac1d130fd4 344
mbed_official 0:51ac1d130fd4 345 MEM_STATS_DEC_USED(used, mem->next - (mem_size_t)(((u8_t *)mem - ram)));
mbed_official 0:51ac1d130fd4 346
mbed_official 0:51ac1d130fd4 347 /* finally, see if prev or next are free also */
mbed_official 0:51ac1d130fd4 348 plug_holes(mem);
mbed_official 0:51ac1d130fd4 349 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
mbed_official 0:51ac1d130fd4 350 mem_free_count = 1;
mbed_official 0:51ac1d130fd4 351 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
mbed_official 0:51ac1d130fd4 352 LWIP_MEM_FREE_UNPROTECT();
mbed_official 0:51ac1d130fd4 353 }
mbed_official 0:51ac1d130fd4 354
mbed_official 0:51ac1d130fd4 355 /**
mbed_official 0:51ac1d130fd4 356 * Shrink memory returned by mem_malloc().
mbed_official 0:51ac1d130fd4 357 *
mbed_official 0:51ac1d130fd4 358 * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked
mbed_official 0:51ac1d130fd4 359 * @param newsize required size after shrinking (needs to be smaller than or
mbed_official 0:51ac1d130fd4 360 * equal to the previous size)
mbed_official 0:51ac1d130fd4 361 * @return for compatibility reasons: is always == rmem, at the moment
mbed_official 0:51ac1d130fd4 362 * or NULL if newsize is > old size, in which case rmem is NOT touched
mbed_official 0:51ac1d130fd4 363 * or freed!
mbed_official 0:51ac1d130fd4 364 */
mbed_official 0:51ac1d130fd4 365 void *
mbed_official 0:51ac1d130fd4 366 mem_trim(void *rmem, mem_size_t newsize)
mbed_official 0:51ac1d130fd4 367 {
mbed_official 0:51ac1d130fd4 368 mem_size_t size;
mbed_official 0:51ac1d130fd4 369 mem_size_t ptr, ptr2;
mbed_official 0:51ac1d130fd4 370 struct mem *mem, *mem2;
mbed_official 0:51ac1d130fd4 371 /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */
mbed_official 0:51ac1d130fd4 372 LWIP_MEM_FREE_DECL_PROTECT();
mbed_official 0:51ac1d130fd4 373
mbed_official 0:51ac1d130fd4 374 /* Expand the size of the allocated memory region so that we can
mbed_official 0:51ac1d130fd4 375 adjust for alignment. */
mbed_official 0:51ac1d130fd4 376 newsize = LWIP_MEM_ALIGN_SIZE(newsize);
mbed_official 0:51ac1d130fd4 377
mbed_official 0:51ac1d130fd4 378 if(newsize < MIN_SIZE_ALIGNED) {
mbed_official 0:51ac1d130fd4 379 /* every data block must be at least MIN_SIZE_ALIGNED long */
mbed_official 0:51ac1d130fd4 380 newsize = MIN_SIZE_ALIGNED;
mbed_official 0:51ac1d130fd4 381 }
mbed_official 0:51ac1d130fd4 382
mbed_official 0:51ac1d130fd4 383 if (newsize > MEM_SIZE_ALIGNED) {
mbed_official 0:51ac1d130fd4 384 return NULL;
mbed_official 0:51ac1d130fd4 385 }
mbed_official 0:51ac1d130fd4 386
mbed_official 0:51ac1d130fd4 387 LWIP_ASSERT("mem_trim: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
mbed_official 0:51ac1d130fd4 388 (u8_t *)rmem < (u8_t *)ram_end);
mbed_official 0:51ac1d130fd4 389
mbed_official 0:51ac1d130fd4 390 if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
mbed_official 0:51ac1d130fd4 391 SYS_ARCH_DECL_PROTECT(lev);
mbed_official 0:51ac1d130fd4 392 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_trim: illegal memory\n"));
mbed_official 0:51ac1d130fd4 393 /* protect mem stats from concurrent access */
mbed_official 0:51ac1d130fd4 394 SYS_ARCH_PROTECT(lev);
mbed_official 0:51ac1d130fd4 395 MEM_STATS_INC(illegal);
mbed_official 0:51ac1d130fd4 396 SYS_ARCH_UNPROTECT(lev);
mbed_official 0:51ac1d130fd4 397 return rmem;
mbed_official 0:51ac1d130fd4 398 }
mbed_official 0:51ac1d130fd4 399 /* Get the corresponding struct mem ... */
mbed_official 0:51ac1d130fd4 400 mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
mbed_official 0:51ac1d130fd4 401 /* ... and its offset pointer */
mbed_official 0:51ac1d130fd4 402 ptr = (mem_size_t)((u8_t *)mem - ram);
mbed_official 0:51ac1d130fd4 403
mbed_official 0:51ac1d130fd4 404 size = mem->next - ptr - SIZEOF_STRUCT_MEM;
mbed_official 0:51ac1d130fd4 405 LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size);
mbed_official 0:51ac1d130fd4 406 if (newsize > size) {
mbed_official 0:51ac1d130fd4 407 /* not supported */
mbed_official 0:51ac1d130fd4 408 return NULL;
mbed_official 0:51ac1d130fd4 409 }
mbed_official 0:51ac1d130fd4 410 if (newsize == size) {
mbed_official 0:51ac1d130fd4 411 /* No change in size, simply return */
mbed_official 0:51ac1d130fd4 412 return rmem;
mbed_official 0:51ac1d130fd4 413 }
mbed_official 0:51ac1d130fd4 414
mbed_official 0:51ac1d130fd4 415 /* protect the heap from concurrent access */
mbed_official 0:51ac1d130fd4 416 LWIP_MEM_FREE_PROTECT();
mbed_official 0:51ac1d130fd4 417
mbed_official 0:51ac1d130fd4 418 mem2 = (struct mem *)(void *)&ram[mem->next];
mbed_official 0:51ac1d130fd4 419 if(mem2->used == 0) {
mbed_official 0:51ac1d130fd4 420 /* The next struct is unused, we can simply move it at little */
mbed_official 0:51ac1d130fd4 421 mem_size_t next;
mbed_official 0:51ac1d130fd4 422 /* remember the old next pointer */
mbed_official 0:51ac1d130fd4 423 next = mem2->next;
mbed_official 0:51ac1d130fd4 424 /* create new struct mem which is moved directly after the shrinked mem */
mbed_official 0:51ac1d130fd4 425 ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
mbed_official 0:51ac1d130fd4 426 if (lfree == mem2) {
mbed_official 0:51ac1d130fd4 427 lfree = (struct mem *)(void *)&ram[ptr2];
mbed_official 0:51ac1d130fd4 428 }
mbed_official 0:51ac1d130fd4 429 mem2 = (struct mem *)(void *)&ram[ptr2];
mbed_official 0:51ac1d130fd4 430 mem2->used = 0;
mbed_official 0:51ac1d130fd4 431 /* restore the next pointer */
mbed_official 0:51ac1d130fd4 432 mem2->next = next;
mbed_official 0:51ac1d130fd4 433 /* link it back to mem */
mbed_official 0:51ac1d130fd4 434 mem2->prev = ptr;
mbed_official 0:51ac1d130fd4 435 /* link mem to it */
mbed_official 0:51ac1d130fd4 436 mem->next = ptr2;
mbed_official 0:51ac1d130fd4 437 /* last thing to restore linked list: as we have moved mem2,
mbed_official 0:51ac1d130fd4 438 * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not
mbed_official 0:51ac1d130fd4 439 * the end of the heap */
mbed_official 0:51ac1d130fd4 440 if (mem2->next != MEM_SIZE_ALIGNED) {
mbed_official 0:51ac1d130fd4 441 ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
mbed_official 0:51ac1d130fd4 442 }
mbed_official 0:51ac1d130fd4 443 MEM_STATS_DEC_USED(used, (size - newsize));
mbed_official 0:51ac1d130fd4 444 /* no need to plug holes, we've already done that */
mbed_official 0:51ac1d130fd4 445 } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) {
mbed_official 0:51ac1d130fd4 446 /* Next struct is used but there's room for another struct mem with
mbed_official 0:51ac1d130fd4 447 * at least MIN_SIZE_ALIGNED of data.
mbed_official 0:51ac1d130fd4 448 * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem
mbed_official 0:51ac1d130fd4 449 * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED').
mbed_official 0:51ac1d130fd4 450 * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
mbed_official 0:51ac1d130fd4 451 * region that couldn't hold data, but when mem->next gets freed,
mbed_official 0:51ac1d130fd4 452 * the 2 regions would be combined, resulting in more free memory */
mbed_official 0:51ac1d130fd4 453 ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
mbed_official 0:51ac1d130fd4 454 mem2 = (struct mem *)(void *)&ram[ptr2];
mbed_official 0:51ac1d130fd4 455 if (mem2 < lfree) {
mbed_official 0:51ac1d130fd4 456 lfree = mem2;
mbed_official 0:51ac1d130fd4 457 }
mbed_official 0:51ac1d130fd4 458 mem2->used = 0;
mbed_official 0:51ac1d130fd4 459 mem2->next = mem->next;
mbed_official 0:51ac1d130fd4 460 mem2->prev = ptr;
mbed_official 0:51ac1d130fd4 461 mem->next = ptr2;
mbed_official 0:51ac1d130fd4 462 if (mem2->next != MEM_SIZE_ALIGNED) {
mbed_official 0:51ac1d130fd4 463 ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
mbed_official 0:51ac1d130fd4 464 }
mbed_official 0:51ac1d130fd4 465 MEM_STATS_DEC_USED(used, (size - newsize));
mbed_official 0:51ac1d130fd4 466 /* the original mem->next is used, so no need to plug holes! */
mbed_official 0:51ac1d130fd4 467 }
mbed_official 0:51ac1d130fd4 468 /* else {
mbed_official 0:51ac1d130fd4 469 next struct mem is used but size between mem and mem2 is not big enough
mbed_official 0:51ac1d130fd4 470 to create another struct mem
mbed_official 0:51ac1d130fd4 471 -> don't do anyhting.
mbed_official 0:51ac1d130fd4 472 -> the remaining space stays unused since it is too small
mbed_official 0:51ac1d130fd4 473 } */
mbed_official 0:51ac1d130fd4 474 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
mbed_official 0:51ac1d130fd4 475 mem_free_count = 1;
mbed_official 0:51ac1d130fd4 476 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
mbed_official 0:51ac1d130fd4 477 LWIP_MEM_FREE_UNPROTECT();
mbed_official 0:51ac1d130fd4 478 return rmem;
mbed_official 0:51ac1d130fd4 479 }
mbed_official 0:51ac1d130fd4 480
mbed_official 0:51ac1d130fd4 481 /**
mbed_official 0:51ac1d130fd4 482 * Adam's mem_malloc() plus solution for bug #17922
mbed_official 0:51ac1d130fd4 483 * Allocate a block of memory with a minimum of 'size' bytes.
mbed_official 0:51ac1d130fd4 484 *
mbed_official 0:51ac1d130fd4 485 * @param size is the minimum size of the requested block in bytes.
mbed_official 0:51ac1d130fd4 486 * @return pointer to allocated memory or NULL if no free memory was found.
mbed_official 0:51ac1d130fd4 487 *
mbed_official 0:51ac1d130fd4 488 * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT).
mbed_official 0:51ac1d130fd4 489 */
mbed_official 0:51ac1d130fd4 490 void *
mbed_official 0:51ac1d130fd4 491 mem_malloc(mem_size_t size)
mbed_official 0:51ac1d130fd4 492 {
mbed_official 0:51ac1d130fd4 493 mem_size_t ptr, ptr2;
mbed_official 0:51ac1d130fd4 494 struct mem *mem, *mem2;
mbed_official 0:51ac1d130fd4 495 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
mbed_official 0:51ac1d130fd4 496 u8_t local_mem_free_count = 0;
mbed_official 0:51ac1d130fd4 497 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
mbed_official 0:51ac1d130fd4 498 LWIP_MEM_ALLOC_DECL_PROTECT();
mbed_official 0:51ac1d130fd4 499
mbed_official 0:51ac1d130fd4 500 if (size == 0) {
mbed_official 0:51ac1d130fd4 501 return NULL;
mbed_official 0:51ac1d130fd4 502 }
mbed_official 0:51ac1d130fd4 503
mbed_official 0:51ac1d130fd4 504 /* Expand the size of the allocated memory region so that we can
mbed_official 0:51ac1d130fd4 505 adjust for alignment. */
mbed_official 0:51ac1d130fd4 506 size = LWIP_MEM_ALIGN_SIZE(size);
mbed_official 0:51ac1d130fd4 507
mbed_official 0:51ac1d130fd4 508 if(size < MIN_SIZE_ALIGNED) {
mbed_official 0:51ac1d130fd4 509 /* every data block must be at least MIN_SIZE_ALIGNED long */
mbed_official 0:51ac1d130fd4 510 size = MIN_SIZE_ALIGNED;
mbed_official 0:51ac1d130fd4 511 }
mbed_official 0:51ac1d130fd4 512
mbed_official 0:51ac1d130fd4 513 if (size > MEM_SIZE_ALIGNED) {
mbed_official 0:51ac1d130fd4 514 return NULL;
mbed_official 0:51ac1d130fd4 515 }
mbed_official 0:51ac1d130fd4 516
mbed_official 0:51ac1d130fd4 517 /* protect the heap from concurrent access */
mbed_official 0:51ac1d130fd4 518 sys_mutex_lock(&mem_mutex);
mbed_official 0:51ac1d130fd4 519 LWIP_MEM_ALLOC_PROTECT();
mbed_official 0:51ac1d130fd4 520 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
mbed_official 0:51ac1d130fd4 521 /* run as long as a mem_free disturbed mem_malloc */
mbed_official 0:51ac1d130fd4 522 do {
mbed_official 0:51ac1d130fd4 523 local_mem_free_count = 0;
mbed_official 0:51ac1d130fd4 524 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
mbed_official 0:51ac1d130fd4 525
mbed_official 0:51ac1d130fd4 526 /* Scan through the heap searching for a free block that is big enough,
mbed_official 0:51ac1d130fd4 527 * beginning with the lowest free block.
mbed_official 0:51ac1d130fd4 528 */
mbed_official 0:51ac1d130fd4 529 for (ptr = (mem_size_t)((u8_t *)lfree - ram); ptr < MEM_SIZE_ALIGNED - size;
mbed_official 0:51ac1d130fd4 530 ptr = ((struct mem *)(void *)&ram[ptr])->next) {
mbed_official 0:51ac1d130fd4 531 mem = (struct mem *)(void *)&ram[ptr];
mbed_official 0:51ac1d130fd4 532 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
mbed_official 0:51ac1d130fd4 533 mem_free_count = 0;
mbed_official 0:51ac1d130fd4 534 LWIP_MEM_ALLOC_UNPROTECT();
mbed_official 0:51ac1d130fd4 535 /* allow mem_free to run */
mbed_official 0:51ac1d130fd4 536 LWIP_MEM_ALLOC_PROTECT();
mbed_official 0:51ac1d130fd4 537 if (mem_free_count != 0) {
mbed_official 0:51ac1d130fd4 538 local_mem_free_count = mem_free_count;
mbed_official 0:51ac1d130fd4 539 }
mbed_official 0:51ac1d130fd4 540 mem_free_count = 0;
mbed_official 0:51ac1d130fd4 541 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
mbed_official 0:51ac1d130fd4 542
mbed_official 0:51ac1d130fd4 543 if ((!mem->used) &&
mbed_official 0:51ac1d130fd4 544 (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) {
mbed_official 0:51ac1d130fd4 545 /* mem is not used and at least perfect fit is possible:
mbed_official 0:51ac1d130fd4 546 * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
mbed_official 0:51ac1d130fd4 547
mbed_official 0:51ac1d130fd4 548 if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) {
mbed_official 0:51ac1d130fd4 549 /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing
mbed_official 0:51ac1d130fd4 550 * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
mbed_official 0:51ac1d130fd4 551 * -> split large block, create empty remainder,
mbed_official 0:51ac1d130fd4 552 * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
mbed_official 0:51ac1d130fd4 553 * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
mbed_official 0:51ac1d130fd4 554 * struct mem would fit in but no data between mem2 and mem2->next
mbed_official 0:51ac1d130fd4 555 * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
mbed_official 0:51ac1d130fd4 556 * region that couldn't hold data, but when mem->next gets freed,
mbed_official 0:51ac1d130fd4 557 * the 2 regions would be combined, resulting in more free memory
mbed_official 0:51ac1d130fd4 558 */
mbed_official 0:51ac1d130fd4 559 ptr2 = ptr + SIZEOF_STRUCT_MEM + size;
mbed_official 0:51ac1d130fd4 560 /* create mem2 struct */
mbed_official 0:51ac1d130fd4 561 mem2 = (struct mem *)(void *)&ram[ptr2];
mbed_official 0:51ac1d130fd4 562 mem2->used = 0;
mbed_official 0:51ac1d130fd4 563 mem2->next = mem->next;
mbed_official 0:51ac1d130fd4 564 mem2->prev = ptr;
mbed_official 0:51ac1d130fd4 565 /* and insert it between mem and mem->next */
mbed_official 0:51ac1d130fd4 566 mem->next = ptr2;
mbed_official 0:51ac1d130fd4 567 mem->used = 1;
mbed_official 0:51ac1d130fd4 568
mbed_official 0:51ac1d130fd4 569 if (mem2->next != MEM_SIZE_ALIGNED) {
mbed_official 0:51ac1d130fd4 570 ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
mbed_official 0:51ac1d130fd4 571 }
mbed_official 0:51ac1d130fd4 572 MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM));
mbed_official 0:51ac1d130fd4 573 } else {
mbed_official 0:51ac1d130fd4 574 /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
mbed_official 0:51ac1d130fd4 575 * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
mbed_official 0:51ac1d130fd4 576 * take care of this).
mbed_official 0:51ac1d130fd4 577 * -> near fit or excact fit: do not split, no mem2 creation
mbed_official 0:51ac1d130fd4 578 * also can't move mem->next directly behind mem, since mem->next
mbed_official 0:51ac1d130fd4 579 * will always be used at this point!
mbed_official 0:51ac1d130fd4 580 */
mbed_official 0:51ac1d130fd4 581 mem->used = 1;
mbed_official 0:51ac1d130fd4 582 MEM_STATS_INC_USED(used, mem->next - (mem_size_t)((u8_t *)mem - ram));
mbed_official 0:51ac1d130fd4 583 }
mbed_official 0:51ac1d130fd4 584
mbed_official 0:51ac1d130fd4 585 if (mem == lfree) {
mbed_official 0:51ac1d130fd4 586 /* Find next free block after mem and update lowest free pointer */
mbed_official 0:51ac1d130fd4 587 while (lfree->used && lfree != ram_end) {
mbed_official 0:51ac1d130fd4 588 LWIP_MEM_ALLOC_UNPROTECT();
mbed_official 0:51ac1d130fd4 589 /* prevent high interrupt latency... */
mbed_official 0:51ac1d130fd4 590 LWIP_MEM_ALLOC_PROTECT();
mbed_official 0:51ac1d130fd4 591 lfree = (struct mem *)(void *)&ram[lfree->next];
mbed_official 0:51ac1d130fd4 592 }
mbed_official 0:51ac1d130fd4 593 LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used)));
mbed_official 0:51ac1d130fd4 594 }
mbed_official 0:51ac1d130fd4 595 LWIP_MEM_ALLOC_UNPROTECT();
mbed_official 0:51ac1d130fd4 596 sys_mutex_unlock(&mem_mutex);
mbed_official 0:51ac1d130fd4 597 LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.",
mbed_official 0:51ac1d130fd4 598 (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end);
mbed_official 0:51ac1d130fd4 599 LWIP_ASSERT("mem_malloc: allocated memory properly aligned.",
mbed_official 0:51ac1d130fd4 600 ((mem_ptr_t)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0);
mbed_official 0:51ac1d130fd4 601 LWIP_ASSERT("mem_malloc: sanity check alignment",
mbed_official 0:51ac1d130fd4 602 (((mem_ptr_t)mem) & (MEM_ALIGNMENT-1)) == 0);
mbed_official 0:51ac1d130fd4 603
mbed_official 0:51ac1d130fd4 604 return (u8_t *)mem + SIZEOF_STRUCT_MEM;
mbed_official 0:51ac1d130fd4 605 }
mbed_official 0:51ac1d130fd4 606 }
mbed_official 0:51ac1d130fd4 607 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
mbed_official 0:51ac1d130fd4 608 /* if we got interrupted by a mem_free, try again */
mbed_official 0:51ac1d130fd4 609 } while(local_mem_free_count != 0);
mbed_official 0:51ac1d130fd4 610 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
mbed_official 0:51ac1d130fd4 611 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size));
mbed_official 0:51ac1d130fd4 612 MEM_STATS_INC(err);
mbed_official 0:51ac1d130fd4 613 LWIP_MEM_ALLOC_UNPROTECT();
mbed_official 0:51ac1d130fd4 614 sys_mutex_unlock(&mem_mutex);
mbed_official 0:51ac1d130fd4 615 return NULL;
mbed_official 0:51ac1d130fd4 616 }
mbed_official 0:51ac1d130fd4 617
mbed_official 0:51ac1d130fd4 618 #endif /* MEM_USE_POOLS */
mbed_official 0:51ac1d130fd4 619 /**
mbed_official 0:51ac1d130fd4 620 * Contiguously allocates enough space for count objects that are size bytes
mbed_official 0:51ac1d130fd4 621 * of memory each and returns a pointer to the allocated memory.
mbed_official 0:51ac1d130fd4 622 *
mbed_official 0:51ac1d130fd4 623 * The allocated memory is filled with bytes of value zero.
mbed_official 0:51ac1d130fd4 624 *
mbed_official 0:51ac1d130fd4 625 * @param count number of objects to allocate
mbed_official 0:51ac1d130fd4 626 * @param size size of the objects to allocate
mbed_official 0:51ac1d130fd4 627 * @return pointer to allocated memory / NULL pointer if there is an error
mbed_official 0:51ac1d130fd4 628 */
mbed_official 0:51ac1d130fd4 629 void *mem_calloc(mem_size_t count, mem_size_t size)
mbed_official 0:51ac1d130fd4 630 {
mbed_official 0:51ac1d130fd4 631 void *p;
mbed_official 0:51ac1d130fd4 632
mbed_official 0:51ac1d130fd4 633 /* allocate 'count' objects of size 'size' */
mbed_official 0:51ac1d130fd4 634 p = mem_malloc(count * size);
mbed_official 0:51ac1d130fd4 635 if (p) {
mbed_official 0:51ac1d130fd4 636 /* zero the memory */
mbed_official 0:51ac1d130fd4 637 memset(p, 0, count * size);
mbed_official 0:51ac1d130fd4 638 }
mbed_official 0:51ac1d130fd4 639 return p;
mbed_official 0:51ac1d130fd4 640 }
mbed_official 0:51ac1d130fd4 641
mbed_official 0:51ac1d130fd4 642 #endif /* !MEM_LIBC_MALLOC */