My fork of the HTTPServer (working)

Dependents:   DGWWebServer LAN2

Committer:
screamer
Date:
Tue Nov 20 12:18:53 2012 +0000
Revision:
1:284f2df30cf9
Parent:
0:7a64fbb4069d
local changes

Who changed what in which revision?

UserRevisionLine numberNew contents of line
screamer 0:7a64fbb4069d 1 /**
screamer 0:7a64fbb4069d 2 * @file
screamer 0:7a64fbb4069d 3 * Dynamic memory manager
screamer 0:7a64fbb4069d 4 *
screamer 0:7a64fbb4069d 5 * This is a lightweight replacement for the standard C library malloc().
screamer 0:7a64fbb4069d 6 *
screamer 0:7a64fbb4069d 7 * If you want to use the standard C library malloc() instead, define
screamer 0:7a64fbb4069d 8 * MEM_LIBC_MALLOC to 1 in your lwipopts.h
screamer 0:7a64fbb4069d 9 *
screamer 0:7a64fbb4069d 10 * To let mem_malloc() use pools (prevents fragmentation and is much faster than
screamer 0:7a64fbb4069d 11 * a heap but might waste some memory), define MEM_USE_POOLS to 1, define
screamer 0:7a64fbb4069d 12 * MEM_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list
screamer 0:7a64fbb4069d 13 * of pools like this (more pools can be added between _START and _END):
screamer 0:7a64fbb4069d 14 *
screamer 0:7a64fbb4069d 15 * Define three pools with sizes 256, 512, and 1512 bytes
screamer 0:7a64fbb4069d 16 * LWIP_MALLOC_MEMPOOL_START
screamer 0:7a64fbb4069d 17 * LWIP_MALLOC_MEMPOOL(20, 256)
screamer 0:7a64fbb4069d 18 * LWIP_MALLOC_MEMPOOL(10, 512)
screamer 0:7a64fbb4069d 19 * LWIP_MALLOC_MEMPOOL(5, 1512)
screamer 0:7a64fbb4069d 20 * LWIP_MALLOC_MEMPOOL_END
screamer 0:7a64fbb4069d 21 */
screamer 0:7a64fbb4069d 22
screamer 0:7a64fbb4069d 23 /*
screamer 0:7a64fbb4069d 24 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
screamer 0:7a64fbb4069d 25 * All rights reserved.
screamer 0:7a64fbb4069d 26 *
screamer 0:7a64fbb4069d 27 * Redistribution and use in source and binary forms, with or without modification,
screamer 0:7a64fbb4069d 28 * are permitted provided that the following conditions are met:
screamer 0:7a64fbb4069d 29 *
screamer 0:7a64fbb4069d 30 * 1. Redistributions of source code must retain the above copyright notice,
screamer 0:7a64fbb4069d 31 * this list of conditions and the following disclaimer.
screamer 0:7a64fbb4069d 32 * 2. Redistributions in binary form must reproduce the above copyright notice,
screamer 0:7a64fbb4069d 33 * this list of conditions and the following disclaimer in the documentation
screamer 0:7a64fbb4069d 34 * and/or other materials provided with the distribution.
screamer 0:7a64fbb4069d 35 * 3. The name of the author may not be used to endorse or promote products
screamer 0:7a64fbb4069d 36 * derived from this software without specific prior written permission.
screamer 0:7a64fbb4069d 37 *
screamer 0:7a64fbb4069d 38 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
screamer 0:7a64fbb4069d 39 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
screamer 0:7a64fbb4069d 40 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
screamer 0:7a64fbb4069d 41 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
screamer 0:7a64fbb4069d 42 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
screamer 0:7a64fbb4069d 43 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
screamer 0:7a64fbb4069d 44 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
screamer 0:7a64fbb4069d 45 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
screamer 0:7a64fbb4069d 46 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
screamer 0:7a64fbb4069d 47 * OF SUCH DAMAGE.
screamer 0:7a64fbb4069d 48 *
screamer 0:7a64fbb4069d 49 * This file is part of the lwIP TCP/IP stack.
screamer 0:7a64fbb4069d 50 *
screamer 0:7a64fbb4069d 51 * Author: Adam Dunkels <adam@sics.se>
screamer 0:7a64fbb4069d 52 * Simon Goldschmidt
screamer 0:7a64fbb4069d 53 *
screamer 0:7a64fbb4069d 54 */
screamer 0:7a64fbb4069d 55
screamer 0:7a64fbb4069d 56 #include "lwip/opt.h"
screamer 0:7a64fbb4069d 57
screamer 0:7a64fbb4069d 58 #if !MEM_LIBC_MALLOC /* don't build if not configured for use in lwipopts.h */
screamer 0:7a64fbb4069d 59
screamer 0:7a64fbb4069d 60 #include "lwip/def.h"
screamer 0:7a64fbb4069d 61 #include "lwip/mem.h"
screamer 0:7a64fbb4069d 62 #include "lwip/sys.h"
screamer 0:7a64fbb4069d 63 #include "lwip/stats.h"
screamer 0:7a64fbb4069d 64
screamer 0:7a64fbb4069d 65 #include <string.h>
screamer 0:7a64fbb4069d 66
screamer 0:7a64fbb4069d 67 #if MEM_USE_POOLS
screamer 0:7a64fbb4069d 68 /* lwIP head implemented with different sized pools */
screamer 0:7a64fbb4069d 69
screamer 0:7a64fbb4069d 70 /**
screamer 0:7a64fbb4069d 71 * Allocate memory: determine the smallest pool that is big enough
screamer 0:7a64fbb4069d 72 * to contain an element of 'size' and get an element from that pool.
screamer 0:7a64fbb4069d 73 *
screamer 0:7a64fbb4069d 74 * @param size the size in bytes of the memory needed
screamer 0:7a64fbb4069d 75 * @return a pointer to the allocated memory or NULL if the pool is empty
screamer 0:7a64fbb4069d 76 */
screamer 0:7a64fbb4069d 77 void *
screamer 0:7a64fbb4069d 78 mem_malloc(mem_size_t size)
screamer 0:7a64fbb4069d 79 {
screamer 0:7a64fbb4069d 80 struct memp_malloc_helper *element;
screamer 0:7a64fbb4069d 81 memp_t poolnr;
screamer 0:7a64fbb4069d 82 mem_size_t required_size = size + sizeof(struct memp_malloc_helper);
screamer 0:7a64fbb4069d 83
screamer 0:7a64fbb4069d 84 for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr++) {
screamer 0:7a64fbb4069d 85 #if MEM_USE_POOLS_TRY_BIGGER_POOL
screamer 0:7a64fbb4069d 86 again:
screamer 0:7a64fbb4069d 87 #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
screamer 0:7a64fbb4069d 88 /* is this pool big enough to hold an element of the required size
screamer 0:7a64fbb4069d 89 plus a struct memp_malloc_helper that saves the pool this element came from? */
screamer 0:7a64fbb4069d 90 if (required_size <= memp_sizes[poolnr]) {
screamer 0:7a64fbb4069d 91 break;
screamer 0:7a64fbb4069d 92 }
screamer 0:7a64fbb4069d 93 }
screamer 0:7a64fbb4069d 94 if (poolnr > MEMP_POOL_LAST) {
screamer 0:7a64fbb4069d 95 LWIP_ASSERT("mem_malloc(): no pool is that big!", 0);
screamer 0:7a64fbb4069d 96 return NULL;
screamer 0:7a64fbb4069d 97 }
screamer 0:7a64fbb4069d 98 element = (struct memp_malloc_helper*)memp_malloc(poolnr);
screamer 0:7a64fbb4069d 99 if (element == NULL) {
screamer 0:7a64fbb4069d 100 /* No need to DEBUGF or ASSERT: This error is already
screamer 0:7a64fbb4069d 101 taken care of in memp.c */
screamer 0:7a64fbb4069d 102 #if MEM_USE_POOLS_TRY_BIGGER_POOL
screamer 0:7a64fbb4069d 103 /** Try a bigger pool if this one is empty! */
screamer 0:7a64fbb4069d 104 if (poolnr < MEMP_POOL_LAST) {
screamer 0:7a64fbb4069d 105 poolnr++;
screamer 0:7a64fbb4069d 106 goto again;
screamer 0:7a64fbb4069d 107 }
screamer 0:7a64fbb4069d 108 #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
screamer 0:7a64fbb4069d 109 return NULL;
screamer 0:7a64fbb4069d 110 }
screamer 0:7a64fbb4069d 111
screamer 0:7a64fbb4069d 112 /* save the pool number this element came from */
screamer 0:7a64fbb4069d 113 element->poolnr = poolnr;
screamer 0:7a64fbb4069d 114 /* and return a pointer to the memory directly after the struct memp_malloc_helper */
screamer 0:7a64fbb4069d 115 element++;
screamer 0:7a64fbb4069d 116
screamer 0:7a64fbb4069d 117 return element;
screamer 0:7a64fbb4069d 118 }
screamer 0:7a64fbb4069d 119
screamer 0:7a64fbb4069d 120 /**
screamer 0:7a64fbb4069d 121 * Free memory previously allocated by mem_malloc. Loads the pool number
screamer 0:7a64fbb4069d 122 * and calls memp_free with that pool number to put the element back into
screamer 0:7a64fbb4069d 123 * its pool
screamer 0:7a64fbb4069d 124 *
screamer 0:7a64fbb4069d 125 * @param rmem the memory element to free
screamer 0:7a64fbb4069d 126 */
screamer 0:7a64fbb4069d 127 void
screamer 0:7a64fbb4069d 128 mem_free(void *rmem)
screamer 0:7a64fbb4069d 129 {
screamer 0:7a64fbb4069d 130 struct memp_malloc_helper *hmem = (struct memp_malloc_helper*)rmem;
screamer 0:7a64fbb4069d 131
screamer 0:7a64fbb4069d 132 LWIP_ASSERT("rmem != NULL", (rmem != NULL));
screamer 0:7a64fbb4069d 133 LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
screamer 0:7a64fbb4069d 134
screamer 0:7a64fbb4069d 135 /* get the original struct memp_malloc_helper */
screamer 0:7a64fbb4069d 136 hmem--;
screamer 0:7a64fbb4069d 137
screamer 0:7a64fbb4069d 138 LWIP_ASSERT("hmem != NULL", (hmem != NULL));
screamer 0:7a64fbb4069d 139 LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem)));
screamer 0:7a64fbb4069d 140 LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX));
screamer 0:7a64fbb4069d 141
screamer 0:7a64fbb4069d 142 /* and put it in the pool we saved earlier */
screamer 0:7a64fbb4069d 143 memp_free(hmem->poolnr, hmem);
screamer 0:7a64fbb4069d 144 }
screamer 0:7a64fbb4069d 145
screamer 0:7a64fbb4069d 146 #else /* MEM_USE_POOLS */
screamer 0:7a64fbb4069d 147 /* lwIP replacement for your libc malloc() */
screamer 0:7a64fbb4069d 148
screamer 0:7a64fbb4069d 149 /**
screamer 0:7a64fbb4069d 150 * The heap is made up as a list of structs of this type.
screamer 0:7a64fbb4069d 151 * This does not have to be aligned since for getting its size,
screamer 0:7a64fbb4069d 152 * we only use the macro SIZEOF_STRUCT_MEM, which automatically alignes.
screamer 0:7a64fbb4069d 153 */
screamer 0:7a64fbb4069d 154 struct mem {
screamer 0:7a64fbb4069d 155 /** index (-> ram[next]) of the next struct */
screamer 0:7a64fbb4069d 156 mem_size_t next;
screamer 0:7a64fbb4069d 157 /** index (-> ram[next]) of the next struct */
screamer 0:7a64fbb4069d 158 mem_size_t prev;
screamer 0:7a64fbb4069d 159 /** 1: this area is used; 0: this area is unused */
screamer 0:7a64fbb4069d 160 u8_t used;
screamer 0:7a64fbb4069d 161 };
screamer 0:7a64fbb4069d 162
screamer 0:7a64fbb4069d 163 /** All allocated blocks will be MIN_SIZE bytes big, at least!
screamer 0:7a64fbb4069d 164 * MIN_SIZE can be overridden to suit your needs. Smaller values save space,
screamer 0:7a64fbb4069d 165 * larger values could prevent too small blocks to fragment the RAM too much. */
screamer 0:7a64fbb4069d 166 #ifndef MIN_SIZE
screamer 0:7a64fbb4069d 167 #define MIN_SIZE 12
screamer 0:7a64fbb4069d 168 #endif /* MIN_SIZE */
screamer 0:7a64fbb4069d 169 /* some alignment macros: we define them here for better source code layout */
screamer 0:7a64fbb4069d 170 #define MIN_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MIN_SIZE)
screamer 0:7a64fbb4069d 171 #define SIZEOF_STRUCT_MEM LWIP_MEM_ALIGN_SIZE(sizeof(struct mem))
screamer 0:7a64fbb4069d 172 #define MEM_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SIZE)
screamer 0:7a64fbb4069d 173
screamer 0:7a64fbb4069d 174 #ifndef MEM_POSITION
screamer 0:7a64fbb4069d 175 #define MEM_POSITION
screamer 0:7a64fbb4069d 176 #endif
screamer 0:7a64fbb4069d 177
screamer 0:7a64fbb4069d 178 /** the heap. we need one struct mem at the end and some room for alignment */
screamer 0:7a64fbb4069d 179 static u8_t ram_heap[MEM_SIZE_ALIGNED + (2*SIZEOF_STRUCT_MEM) + MEM_ALIGNMENT] MEM_POSITION;
screamer 0:7a64fbb4069d 180 /** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */
screamer 0:7a64fbb4069d 181 static u8_t *ram;
screamer 0:7a64fbb4069d 182 /** the last entry, always unused! */
screamer 0:7a64fbb4069d 183 static struct mem *ram_end;
screamer 0:7a64fbb4069d 184 /** pointer to the lowest free block, this is used for faster search */
screamer 0:7a64fbb4069d 185 static struct mem *lfree;
screamer 0:7a64fbb4069d 186
screamer 0:7a64fbb4069d 187 /** concurrent access protection */
screamer 0:7a64fbb4069d 188 //static sys_sem_t mem_sem;
screamer 0:7a64fbb4069d 189
screamer 0:7a64fbb4069d 190 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
screamer 0:7a64fbb4069d 191
screamer 0:7a64fbb4069d 192 static volatile u8_t mem_free_count;
screamer 0:7a64fbb4069d 193
screamer 0:7a64fbb4069d 194 /* Allow mem_free from other (e.g. interrupt) context */
screamer 0:7a64fbb4069d 195 #define LWIP_MEM_FREE_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_free)
screamer 0:7a64fbb4069d 196 #define LWIP_MEM_FREE_PROTECT() SYS_ARCH_PROTECT(lev_free)
screamer 0:7a64fbb4069d 197 #define LWIP_MEM_FREE_UNPROTECT() SYS_ARCH_UNPROTECT(lev_free)
screamer 0:7a64fbb4069d 198 #define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc)
screamer 0:7a64fbb4069d 199 #define LWIP_MEM_ALLOC_PROTECT() SYS_ARCH_PROTECT(lev_alloc)
screamer 0:7a64fbb4069d 200 #define LWIP_MEM_ALLOC_UNPROTECT() SYS_ARCH_UNPROTECT(lev_alloc)
screamer 0:7a64fbb4069d 201
screamer 0:7a64fbb4069d 202 #else /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
screamer 0:7a64fbb4069d 203
screamer 0:7a64fbb4069d 204 /* Protect the heap only by using a semaphore */
screamer 0:7a64fbb4069d 205 #define LWIP_MEM_FREE_DECL_PROTECT()
screamer 0:7a64fbb4069d 206 #define LWIP_MEM_FREE_PROTECT() sys_arch_sem_wait(mem_sem, 0)
screamer 0:7a64fbb4069d 207 #define LWIP_MEM_FREE_UNPROTECT() sys_sem_signal(mem_sem)
screamer 0:7a64fbb4069d 208 /* mem_malloc is protected using semaphore AND LWIP_MEM_ALLOC_PROTECT */
screamer 0:7a64fbb4069d 209 #define LWIP_MEM_ALLOC_DECL_PROTECT()
screamer 0:7a64fbb4069d 210 #define LWIP_MEM_ALLOC_PROTECT()
screamer 0:7a64fbb4069d 211 #define LWIP_MEM_ALLOC_UNPROTECT()
screamer 0:7a64fbb4069d 212
screamer 0:7a64fbb4069d 213 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
screamer 0:7a64fbb4069d 214
screamer 0:7a64fbb4069d 215
screamer 0:7a64fbb4069d 216 /**
screamer 0:7a64fbb4069d 217 * "Plug holes" by combining adjacent empty struct mems.
screamer 0:7a64fbb4069d 218 * After this function is through, there should not exist
screamer 0:7a64fbb4069d 219 * one empty struct mem pointing to another empty struct mem.
screamer 0:7a64fbb4069d 220 *
screamer 0:7a64fbb4069d 221 * @param mem this points to a struct mem which just has been freed
screamer 0:7a64fbb4069d 222 * @internal this function is only called by mem_free() and mem_realloc()
screamer 0:7a64fbb4069d 223 *
screamer 0:7a64fbb4069d 224 * This assumes access to the heap is protected by the calling function
screamer 0:7a64fbb4069d 225 * already.
screamer 0:7a64fbb4069d 226 */
screamer 0:7a64fbb4069d 227 static void
screamer 0:7a64fbb4069d 228 plug_holes(struct mem *mem)
screamer 0:7a64fbb4069d 229 {
screamer 0:7a64fbb4069d 230 struct mem *nmem;
screamer 0:7a64fbb4069d 231 struct mem *pmem;
screamer 0:7a64fbb4069d 232
screamer 0:7a64fbb4069d 233 LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram);
screamer 0:7a64fbb4069d 234 LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end);
screamer 0:7a64fbb4069d 235 LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0);
screamer 0:7a64fbb4069d 236
screamer 0:7a64fbb4069d 237 /* plug hole forward */
screamer 0:7a64fbb4069d 238 LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED);
screamer 0:7a64fbb4069d 239
screamer 0:7a64fbb4069d 240 nmem = (struct mem *)&ram[mem->next];
screamer 0:7a64fbb4069d 241 if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) {
screamer 0:7a64fbb4069d 242 /* if mem->next is unused and not end of ram, combine mem and mem->next */
screamer 0:7a64fbb4069d 243 if (lfree == nmem) {
screamer 0:7a64fbb4069d 244 lfree = mem;
screamer 0:7a64fbb4069d 245 }
screamer 0:7a64fbb4069d 246 mem->next = nmem->next;
screamer 0:7a64fbb4069d 247 ((struct mem *)&ram[nmem->next])->prev = (u8_t *)mem - ram;
screamer 0:7a64fbb4069d 248 }
screamer 0:7a64fbb4069d 249
screamer 0:7a64fbb4069d 250 /* plug hole backward */
screamer 0:7a64fbb4069d 251 pmem = (struct mem *)&ram[mem->prev];
screamer 0:7a64fbb4069d 252 if (pmem != mem && pmem->used == 0) {
screamer 0:7a64fbb4069d 253 /* if mem->prev is unused, combine mem and mem->prev */
screamer 0:7a64fbb4069d 254 if (lfree == mem) {
screamer 0:7a64fbb4069d 255 lfree = pmem;
screamer 0:7a64fbb4069d 256 }
screamer 0:7a64fbb4069d 257 pmem->next = mem->next;
screamer 0:7a64fbb4069d 258 ((struct mem *)&ram[mem->next])->prev = (u8_t *)pmem - ram;
screamer 0:7a64fbb4069d 259 }
screamer 0:7a64fbb4069d 260 }
screamer 0:7a64fbb4069d 261
screamer 0:7a64fbb4069d 262 /**
screamer 0:7a64fbb4069d 263 * Zero the heap and initialize start, end and lowest-free
screamer 0:7a64fbb4069d 264 */
screamer 0:7a64fbb4069d 265 void
screamer 0:7a64fbb4069d 266 mem_init(void)
screamer 0:7a64fbb4069d 267 {
screamer 0:7a64fbb4069d 268 struct mem *mem;
screamer 0:7a64fbb4069d 269
screamer 0:7a64fbb4069d 270 LWIP_ASSERT("Sanity check alignment",
screamer 0:7a64fbb4069d 271 (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT-1)) == 0);
screamer 0:7a64fbb4069d 272
screamer 0:7a64fbb4069d 273 /* align the heap */
screamer 0:7a64fbb4069d 274 ram = LWIP_MEM_ALIGN(ram_heap);
screamer 0:7a64fbb4069d 275 /* initialize the start of the heap */
screamer 0:7a64fbb4069d 276 mem = (struct mem *)ram;
screamer 0:7a64fbb4069d 277 mem->next = MEM_SIZE_ALIGNED;
screamer 0:7a64fbb4069d 278 mem->prev = 0;
screamer 0:7a64fbb4069d 279 mem->used = 0;
screamer 0:7a64fbb4069d 280 /* initialize the end of the heap */
screamer 0:7a64fbb4069d 281 ram_end = (struct mem *)&ram[MEM_SIZE_ALIGNED];
screamer 0:7a64fbb4069d 282 ram_end->used = 1;
screamer 0:7a64fbb4069d 283 ram_end->next = MEM_SIZE_ALIGNED;
screamer 0:7a64fbb4069d 284 ram_end->prev = MEM_SIZE_ALIGNED;
screamer 0:7a64fbb4069d 285
screamer 0:7a64fbb4069d 286 //mem_sem = sys_sem_new(1);
screamer 0:7a64fbb4069d 287
screamer 0:7a64fbb4069d 288 /* initialize the lowest-free pointer to the start of the heap */
screamer 0:7a64fbb4069d 289 lfree = (struct mem *)ram;
screamer 0:7a64fbb4069d 290
screamer 0:7a64fbb4069d 291 MEM_STATS_AVAIL(avail, MEM_SIZE_ALIGNED);
screamer 0:7a64fbb4069d 292 }
screamer 0:7a64fbb4069d 293
screamer 0:7a64fbb4069d 294 /**
screamer 0:7a64fbb4069d 295 * Put a struct mem back on the heap
screamer 0:7a64fbb4069d 296 *
screamer 0:7a64fbb4069d 297 * @param rmem is the data portion of a struct mem as returned by a previous
screamer 0:7a64fbb4069d 298 * call to mem_malloc()
screamer 0:7a64fbb4069d 299 */
screamer 0:7a64fbb4069d 300 void
screamer 0:7a64fbb4069d 301 mem_free(void *rmem)
screamer 0:7a64fbb4069d 302 {
screamer 0:7a64fbb4069d 303 struct mem *mem;
screamer 0:7a64fbb4069d 304 LWIP_MEM_FREE_DECL_PROTECT();
screamer 0:7a64fbb4069d 305
screamer 0:7a64fbb4069d 306 if (rmem == NULL) {
screamer 0:7a64fbb4069d 307 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | 2, ("mem_free(p == NULL) was called.\n"));
screamer 0:7a64fbb4069d 308 return;
screamer 0:7a64fbb4069d 309 }
screamer 0:7a64fbb4069d 310 LWIP_ASSERT("mem_free: sanity check alignment", (((mem_ptr_t)rmem) & (MEM_ALIGNMENT-1)) == 0);
screamer 0:7a64fbb4069d 311
screamer 0:7a64fbb4069d 312 LWIP_ASSERT("mem_free: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
screamer 0:7a64fbb4069d 313 (u8_t *)rmem < (u8_t *)ram_end);
screamer 0:7a64fbb4069d 314
screamer 0:7a64fbb4069d 315 if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
screamer 0:7a64fbb4069d 316 SYS_ARCH_DECL_PROTECT(lev);
screamer 0:7a64fbb4069d 317 LWIP_DEBUGF(MEM_DEBUG | 3, ("mem_free: illegal memory\n"));
screamer 0:7a64fbb4069d 318 /* protect mem stats from concurrent access */
screamer 0:7a64fbb4069d 319 SYS_ARCH_PROTECT(lev);
screamer 0:7a64fbb4069d 320 MEM_STATS_INC(illegal);
screamer 0:7a64fbb4069d 321 SYS_ARCH_UNPROTECT(lev);
screamer 0:7a64fbb4069d 322 return;
screamer 0:7a64fbb4069d 323 }
screamer 0:7a64fbb4069d 324 /* protect the heap from concurrent access */
screamer 0:7a64fbb4069d 325 LWIP_MEM_FREE_PROTECT();
screamer 0:7a64fbb4069d 326 /* Get the corresponding struct mem ... */
screamer 0:7a64fbb4069d 327 mem = (struct mem *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
screamer 0:7a64fbb4069d 328 /* ... which has to be in a used state ... */
screamer 0:7a64fbb4069d 329 LWIP_ASSERT("mem_free: mem->used", mem->used);
screamer 0:7a64fbb4069d 330 /* ... and is now unused. */
screamer 0:7a64fbb4069d 331 mem->used = 0;
screamer 0:7a64fbb4069d 332
screamer 0:7a64fbb4069d 333 if (mem < lfree) {
screamer 0:7a64fbb4069d 334 /* the newly freed struct is now the lowest */
screamer 0:7a64fbb4069d 335 lfree = mem;
screamer 0:7a64fbb4069d 336 }
screamer 0:7a64fbb4069d 337
screamer 0:7a64fbb4069d 338 MEM_STATS_DEC_USED(used, mem->next - ((u8_t *)mem - ram));
screamer 0:7a64fbb4069d 339
screamer 0:7a64fbb4069d 340 /* finally, see if prev or next are free also */
screamer 0:7a64fbb4069d 341 plug_holes(mem);
screamer 0:7a64fbb4069d 342 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
screamer 0:7a64fbb4069d 343 mem_free_count = 1;
screamer 0:7a64fbb4069d 344 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
screamer 0:7a64fbb4069d 345 LWIP_MEM_FREE_UNPROTECT();
screamer 0:7a64fbb4069d 346 }
screamer 0:7a64fbb4069d 347
screamer 0:7a64fbb4069d 348 /**
screamer 0:7a64fbb4069d 349 * In contrast to its name, mem_realloc can only shrink memory, not expand it.
screamer 0:7a64fbb4069d 350 * Since the only use (for now) is in pbuf_realloc (which also can only shrink),
screamer 0:7a64fbb4069d 351 * this shouldn't be a problem!
screamer 0:7a64fbb4069d 352 *
screamer 0:7a64fbb4069d 353 * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked
screamer 0:7a64fbb4069d 354 * @param newsize required size after shrinking (needs to be smaller than or
screamer 0:7a64fbb4069d 355 * equal to the previous size)
screamer 0:7a64fbb4069d 356 * @return for compatibility reasons: is always == rmem, at the moment
screamer 0:7a64fbb4069d 357 * or NULL if newsize is > old size, in which case rmem is NOT touched
screamer 0:7a64fbb4069d 358 * or freed!
screamer 0:7a64fbb4069d 359 */
screamer 0:7a64fbb4069d 360 void *
screamer 0:7a64fbb4069d 361 mem_realloc(void *rmem, mem_size_t newsize)
screamer 0:7a64fbb4069d 362 {
screamer 0:7a64fbb4069d 363 mem_size_t size;
screamer 0:7a64fbb4069d 364 mem_size_t ptr, ptr2;
screamer 0:7a64fbb4069d 365 struct mem *mem, *mem2;
screamer 0:7a64fbb4069d 366 /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */
screamer 0:7a64fbb4069d 367 LWIP_MEM_FREE_DECL_PROTECT();
screamer 0:7a64fbb4069d 368
screamer 0:7a64fbb4069d 369 /* Expand the size of the allocated memory region so that we can
screamer 0:7a64fbb4069d 370 adjust for alignment. */
screamer 0:7a64fbb4069d 371 newsize = LWIP_MEM_ALIGN_SIZE(newsize);
screamer 0:7a64fbb4069d 372
screamer 0:7a64fbb4069d 373 if(newsize < MIN_SIZE_ALIGNED) {
screamer 0:7a64fbb4069d 374 /* every data block must be at least MIN_SIZE_ALIGNED long */
screamer 0:7a64fbb4069d 375 newsize = MIN_SIZE_ALIGNED;
screamer 0:7a64fbb4069d 376 }
screamer 0:7a64fbb4069d 377
screamer 0:7a64fbb4069d 378 if (newsize > MEM_SIZE_ALIGNED) {
screamer 0:7a64fbb4069d 379 return NULL;
screamer 0:7a64fbb4069d 380 }
screamer 0:7a64fbb4069d 381
screamer 0:7a64fbb4069d 382 LWIP_ASSERT("mem_realloc: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
screamer 0:7a64fbb4069d 383 (u8_t *)rmem < (u8_t *)ram_end);
screamer 0:7a64fbb4069d 384
screamer 0:7a64fbb4069d 385 if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
screamer 0:7a64fbb4069d 386 SYS_ARCH_DECL_PROTECT(lev);
screamer 0:7a64fbb4069d 387 LWIP_DEBUGF(MEM_DEBUG | 3, ("mem_realloc: illegal memory\n"));
screamer 0:7a64fbb4069d 388 /* protect mem stats from concurrent access */
screamer 0:7a64fbb4069d 389 SYS_ARCH_PROTECT(lev);
screamer 0:7a64fbb4069d 390 MEM_STATS_INC(illegal);
screamer 0:7a64fbb4069d 391 SYS_ARCH_UNPROTECT(lev);
screamer 0:7a64fbb4069d 392 return rmem;
screamer 0:7a64fbb4069d 393 }
screamer 0:7a64fbb4069d 394 /* Get the corresponding struct mem ... */
screamer 0:7a64fbb4069d 395 mem = (struct mem *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
screamer 0:7a64fbb4069d 396 /* ... and its offset pointer */
screamer 0:7a64fbb4069d 397 ptr = (u8_t *)mem - ram;
screamer 0:7a64fbb4069d 398
screamer 0:7a64fbb4069d 399 size = mem->next - ptr - SIZEOF_STRUCT_MEM;
screamer 0:7a64fbb4069d 400 LWIP_ASSERT("mem_realloc can only shrink memory", newsize <= size);
screamer 0:7a64fbb4069d 401 if (newsize > size) {
screamer 0:7a64fbb4069d 402 /* not supported */
screamer 0:7a64fbb4069d 403 return NULL;
screamer 0:7a64fbb4069d 404 }
screamer 0:7a64fbb4069d 405 if (newsize == size) {
screamer 0:7a64fbb4069d 406 /* No change in size, simply return */
screamer 0:7a64fbb4069d 407 return rmem;
screamer 0:7a64fbb4069d 408 }
screamer 0:7a64fbb4069d 409
screamer 0:7a64fbb4069d 410 /* protect the heap from concurrent access */
screamer 0:7a64fbb4069d 411 LWIP_MEM_FREE_PROTECT();
screamer 0:7a64fbb4069d 412
screamer 0:7a64fbb4069d 413 MEM_STATS_DEC_USED(used, (size - newsize));
screamer 0:7a64fbb4069d 414
screamer 0:7a64fbb4069d 415 mem2 = (struct mem *)&ram[mem->next];
screamer 0:7a64fbb4069d 416 if(mem2->used == 0) {
screamer 0:7a64fbb4069d 417 /* The next struct is unused, we can simply move it at little */
screamer 0:7a64fbb4069d 418 mem_size_t next;
screamer 0:7a64fbb4069d 419 /* remember the old next pointer */
screamer 0:7a64fbb4069d 420 next = mem2->next;
screamer 0:7a64fbb4069d 421 /* create new struct mem which is moved directly after the shrinked mem */
screamer 0:7a64fbb4069d 422 ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
screamer 0:7a64fbb4069d 423 if (lfree == mem2) {
screamer 0:7a64fbb4069d 424 lfree = (struct mem *)&ram[ptr2];
screamer 0:7a64fbb4069d 425 }
screamer 0:7a64fbb4069d 426 mem2 = (struct mem *)&ram[ptr2];
screamer 0:7a64fbb4069d 427 mem2->used = 0;
screamer 0:7a64fbb4069d 428 /* restore the next pointer */
screamer 0:7a64fbb4069d 429 mem2->next = next;
screamer 0:7a64fbb4069d 430 /* link it back to mem */
screamer 0:7a64fbb4069d 431 mem2->prev = ptr;
screamer 0:7a64fbb4069d 432 /* link mem to it */
screamer 0:7a64fbb4069d 433 mem->next = ptr2;
screamer 0:7a64fbb4069d 434 /* last thing to restore linked list: as we have moved mem2,
screamer 0:7a64fbb4069d 435 * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not
screamer 0:7a64fbb4069d 436 * the end of the heap */
screamer 0:7a64fbb4069d 437 if (mem2->next != MEM_SIZE_ALIGNED) {
screamer 0:7a64fbb4069d 438 ((struct mem *)&ram[mem2->next])->prev = ptr2;
screamer 0:7a64fbb4069d 439 }
screamer 0:7a64fbb4069d 440 /* no need to plug holes, we've already done that */
screamer 0:7a64fbb4069d 441 } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) {
screamer 0:7a64fbb4069d 442 /* Next struct is used but there's room for another struct mem with
screamer 0:7a64fbb4069d 443 * at least MIN_SIZE_ALIGNED of data.
screamer 0:7a64fbb4069d 444 * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem
screamer 0:7a64fbb4069d 445 * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED').
screamer 0:7a64fbb4069d 446 * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
screamer 0:7a64fbb4069d 447 * region that couldn't hold data, but when mem->next gets freed,
screamer 0:7a64fbb4069d 448 * the 2 regions would be combined, resulting in more free memory */
screamer 0:7a64fbb4069d 449 ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
screamer 0:7a64fbb4069d 450 mem2 = (struct mem *)&ram[ptr2];
screamer 0:7a64fbb4069d 451 if (mem2 < lfree) {
screamer 0:7a64fbb4069d 452 lfree = mem2;
screamer 0:7a64fbb4069d 453 }
screamer 0:7a64fbb4069d 454 mem2->used = 0;
screamer 0:7a64fbb4069d 455 mem2->next = mem->next;
screamer 0:7a64fbb4069d 456 mem2->prev = ptr;
screamer 0:7a64fbb4069d 457 mem->next = ptr2;
screamer 0:7a64fbb4069d 458 if (mem2->next != MEM_SIZE_ALIGNED) {
screamer 0:7a64fbb4069d 459 ((struct mem *)&ram[mem2->next])->prev = ptr2;
screamer 0:7a64fbb4069d 460 }
screamer 0:7a64fbb4069d 461 /* the original mem->next is used, so no need to plug holes! */
screamer 0:7a64fbb4069d 462 }
screamer 0:7a64fbb4069d 463 /* else {
screamer 0:7a64fbb4069d 464 next struct mem is used but size between mem and mem2 is not big enough
screamer 0:7a64fbb4069d 465 to create another struct mem
screamer 0:7a64fbb4069d 466 -> don't do anyhting.
screamer 0:7a64fbb4069d 467 -> the remaining space stays unused since it is too small
screamer 0:7a64fbb4069d 468 } */
screamer 0:7a64fbb4069d 469 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
screamer 0:7a64fbb4069d 470 mem_free_count = 1;
screamer 0:7a64fbb4069d 471 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
screamer 0:7a64fbb4069d 472 LWIP_MEM_FREE_UNPROTECT();
screamer 0:7a64fbb4069d 473 return rmem;
screamer 0:7a64fbb4069d 474 }
screamer 0:7a64fbb4069d 475
screamer 0:7a64fbb4069d 476 /**
screamer 0:7a64fbb4069d 477 * Adam's mem_malloc() plus solution for bug #17922
screamer 0:7a64fbb4069d 478 * Allocate a block of memory with a minimum of 'size' bytes.
screamer 0:7a64fbb4069d 479 *
screamer 0:7a64fbb4069d 480 * @param size is the minimum size of the requested block in bytes.
screamer 0:7a64fbb4069d 481 * @return pointer to allocated memory or NULL if no free memory was found.
screamer 0:7a64fbb4069d 482 *
screamer 0:7a64fbb4069d 483 * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT).
screamer 0:7a64fbb4069d 484 */
screamer 0:7a64fbb4069d 485 void *
screamer 0:7a64fbb4069d 486 mem_malloc(mem_size_t size)
screamer 0:7a64fbb4069d 487 {
screamer 0:7a64fbb4069d 488 mem_size_t ptr, ptr2;
screamer 0:7a64fbb4069d 489 struct mem *mem, *mem2;
screamer 0:7a64fbb4069d 490 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
screamer 0:7a64fbb4069d 491 u8_t local_mem_free_count = 0;
screamer 0:7a64fbb4069d 492 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
screamer 0:7a64fbb4069d 493 LWIP_MEM_ALLOC_DECL_PROTECT();
screamer 0:7a64fbb4069d 494
screamer 0:7a64fbb4069d 495 if (size == 0) {
screamer 0:7a64fbb4069d 496 return NULL;
screamer 0:7a64fbb4069d 497 }
screamer 0:7a64fbb4069d 498
screamer 0:7a64fbb4069d 499 /* Expand the size of the allocated memory region so that we can
screamer 0:7a64fbb4069d 500 adjust for alignment. */
screamer 0:7a64fbb4069d 501 size = LWIP_MEM_ALIGN_SIZE(size);
screamer 0:7a64fbb4069d 502
screamer 0:7a64fbb4069d 503 if(size < MIN_SIZE_ALIGNED) {
screamer 0:7a64fbb4069d 504 /* every data block must be at least MIN_SIZE_ALIGNED long */
screamer 0:7a64fbb4069d 505 size = MIN_SIZE_ALIGNED;
screamer 0:7a64fbb4069d 506 }
screamer 0:7a64fbb4069d 507
screamer 0:7a64fbb4069d 508 if (size > MEM_SIZE_ALIGNED) {
screamer 0:7a64fbb4069d 509 return NULL;
screamer 0:7a64fbb4069d 510 }
screamer 0:7a64fbb4069d 511
screamer 0:7a64fbb4069d 512 /* protect the heap from concurrent access */
screamer 0:7a64fbb4069d 513 sys_arch_sem_wait(mem_sem, 0);
screamer 0:7a64fbb4069d 514 LWIP_MEM_ALLOC_PROTECT();
screamer 0:7a64fbb4069d 515 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
screamer 0:7a64fbb4069d 516 /* run as long as a mem_free disturbed mem_malloc */
screamer 0:7a64fbb4069d 517 do {
screamer 0:7a64fbb4069d 518 local_mem_free_count = 0;
screamer 0:7a64fbb4069d 519 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
screamer 0:7a64fbb4069d 520
screamer 0:7a64fbb4069d 521 /* Scan through the heap searching for a free block that is big enough,
screamer 0:7a64fbb4069d 522 * beginning with the lowest free block.
screamer 0:7a64fbb4069d 523 */
screamer 0:7a64fbb4069d 524 for (ptr = (u8_t *)lfree - ram; ptr < MEM_SIZE_ALIGNED - size;
screamer 0:7a64fbb4069d 525 ptr = ((struct mem *)&ram[ptr])->next) {
screamer 0:7a64fbb4069d 526 mem = (struct mem *)&ram[ptr];
screamer 0:7a64fbb4069d 527 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
screamer 0:7a64fbb4069d 528 mem_free_count = 0;
screamer 0:7a64fbb4069d 529 LWIP_MEM_ALLOC_UNPROTECT();
screamer 0:7a64fbb4069d 530 /* allow mem_free to run */
screamer 0:7a64fbb4069d 531 LWIP_MEM_ALLOC_PROTECT();
screamer 0:7a64fbb4069d 532 if (mem_free_count != 0) {
screamer 0:7a64fbb4069d 533 local_mem_free_count = mem_free_count;
screamer 0:7a64fbb4069d 534 }
screamer 0:7a64fbb4069d 535 mem_free_count = 0;
screamer 0:7a64fbb4069d 536 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
screamer 0:7a64fbb4069d 537
screamer 0:7a64fbb4069d 538 if ((!mem->used) &&
screamer 0:7a64fbb4069d 539 (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) {
screamer 0:7a64fbb4069d 540 /* mem is not used and at least perfect fit is possible:
screamer 0:7a64fbb4069d 541 * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
screamer 0:7a64fbb4069d 542
screamer 0:7a64fbb4069d 543 if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) {
screamer 0:7a64fbb4069d 544 /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing
screamer 0:7a64fbb4069d 545 * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
screamer 0:7a64fbb4069d 546 * -> split large block, create empty remainder,
screamer 0:7a64fbb4069d 547 * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
screamer 0:7a64fbb4069d 548 * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
screamer 0:7a64fbb4069d 549 * struct mem would fit in but no data between mem2 and mem2->next
screamer 0:7a64fbb4069d 550 * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
screamer 0:7a64fbb4069d 551 * region that couldn't hold data, but when mem->next gets freed,
screamer 0:7a64fbb4069d 552 * the 2 regions would be combined, resulting in more free memory
screamer 0:7a64fbb4069d 553 */
screamer 0:7a64fbb4069d 554 ptr2 = ptr + SIZEOF_STRUCT_MEM + size;
screamer 0:7a64fbb4069d 555 /* create mem2 struct */
screamer 0:7a64fbb4069d 556 mem2 = (struct mem *)&ram[ptr2];
screamer 0:7a64fbb4069d 557 mem2->used = 0;
screamer 0:7a64fbb4069d 558 mem2->next = mem->next;
screamer 0:7a64fbb4069d 559 mem2->prev = ptr;
screamer 0:7a64fbb4069d 560 /* and insert it between mem and mem->next */
screamer 0:7a64fbb4069d 561 mem->next = ptr2;
screamer 0:7a64fbb4069d 562 mem->used = 1;
screamer 0:7a64fbb4069d 563
screamer 0:7a64fbb4069d 564 if (mem2->next != MEM_SIZE_ALIGNED) {
screamer 0:7a64fbb4069d 565 ((struct mem *)&ram[mem2->next])->prev = ptr2;
screamer 0:7a64fbb4069d 566 }
screamer 0:7a64fbb4069d 567 MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM));
screamer 0:7a64fbb4069d 568 } else {
screamer 0:7a64fbb4069d 569 /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
screamer 0:7a64fbb4069d 570 * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
screamer 0:7a64fbb4069d 571 * take care of this).
screamer 0:7a64fbb4069d 572 * -> near fit or excact fit: do not split, no mem2 creation
screamer 0:7a64fbb4069d 573 * also can't move mem->next directly behind mem, since mem->next
screamer 0:7a64fbb4069d 574 * will always be used at this point!
screamer 0:7a64fbb4069d 575 */
screamer 0:7a64fbb4069d 576 mem->used = 1;
screamer 0:7a64fbb4069d 577 MEM_STATS_INC_USED(used, mem->next - ((u8_t *)mem - ram));
screamer 0:7a64fbb4069d 578 }
screamer 0:7a64fbb4069d 579
screamer 0:7a64fbb4069d 580 if (mem == lfree) {
screamer 0:7a64fbb4069d 581 /* Find next free block after mem and update lowest free pointer */
screamer 0:7a64fbb4069d 582 while (lfree->used && lfree != ram_end) {
screamer 0:7a64fbb4069d 583 LWIP_MEM_ALLOC_UNPROTECT();
screamer 0:7a64fbb4069d 584 /* prevent high interrupt latency... */
screamer 0:7a64fbb4069d 585 LWIP_MEM_ALLOC_PROTECT();
screamer 0:7a64fbb4069d 586 lfree = (struct mem *)&ram[lfree->next];
screamer 0:7a64fbb4069d 587 }
screamer 0:7a64fbb4069d 588 LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used)));
screamer 0:7a64fbb4069d 589 }
screamer 0:7a64fbb4069d 590 LWIP_MEM_ALLOC_UNPROTECT();
screamer 0:7a64fbb4069d 591 sys_sem_signal(mem_sem);
screamer 0:7a64fbb4069d 592 LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.",
screamer 0:7a64fbb4069d 593 (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end);
screamer 0:7a64fbb4069d 594 LWIP_ASSERT("mem_malloc: allocated memory properly aligned.",
screamer 0:7a64fbb4069d 595 ((mem_ptr_t)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0);
screamer 0:7a64fbb4069d 596 LWIP_ASSERT("mem_malloc: sanity check alignment",
screamer 0:7a64fbb4069d 597 (((mem_ptr_t)mem) & (MEM_ALIGNMENT-1)) == 0);
screamer 0:7a64fbb4069d 598
screamer 0:7a64fbb4069d 599 return (u8_t *)mem + SIZEOF_STRUCT_MEM;
screamer 0:7a64fbb4069d 600 }
screamer 0:7a64fbb4069d 601 }
screamer 0:7a64fbb4069d 602 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
screamer 0:7a64fbb4069d 603 /* if we got interrupted by a mem_free, try again */
screamer 0:7a64fbb4069d 604 } while(local_mem_free_count != 0);
screamer 0:7a64fbb4069d 605 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
screamer 0:7a64fbb4069d 606 LWIP_DEBUGF(MEM_DEBUG | 2, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size));
screamer 0:7a64fbb4069d 607 MEM_STATS_INC(err);
screamer 0:7a64fbb4069d 608 LWIP_MEM_ALLOC_UNPROTECT();
screamer 0:7a64fbb4069d 609 sys_sem_signal(mem_sem);
screamer 0:7a64fbb4069d 610 return NULL;
screamer 0:7a64fbb4069d 611 }
screamer 0:7a64fbb4069d 612
screamer 0:7a64fbb4069d 613 #endif /* MEM_USE_POOLS */
screamer 0:7a64fbb4069d 614 /**
screamer 0:7a64fbb4069d 615 * Contiguously allocates enough space for count objects that are size bytes
screamer 0:7a64fbb4069d 616 * of memory each and returns a pointer to the allocated memory.
screamer 0:7a64fbb4069d 617 *
screamer 0:7a64fbb4069d 618 * The allocated memory is filled with bytes of value zero.
screamer 0:7a64fbb4069d 619 *
screamer 0:7a64fbb4069d 620 * @param count number of objects to allocate
screamer 0:7a64fbb4069d 621 * @param size size of the objects to allocate
screamer 0:7a64fbb4069d 622 * @return pointer to allocated memory / NULL pointer if there is an error
screamer 0:7a64fbb4069d 623 */
screamer 0:7a64fbb4069d 624 void *mem_calloc(mem_size_t count, mem_size_t size)
screamer 0:7a64fbb4069d 625 {
screamer 0:7a64fbb4069d 626 void *p;
screamer 0:7a64fbb4069d 627
screamer 0:7a64fbb4069d 628 /* allocate 'count' objects of size 'size' */
screamer 0:7a64fbb4069d 629 p = mem_malloc(count * size);
screamer 0:7a64fbb4069d 630 if (p) {
screamer 0:7a64fbb4069d 631 /* zero the memory */
screamer 0:7a64fbb4069d 632 memset(p, 0, count * size);
screamer 0:7a64fbb4069d 633 }
screamer 0:7a64fbb4069d 634 return p;
screamer 0:7a64fbb4069d 635 }
screamer 0:7a64fbb4069d 636 #endif /* !MEM_LIBC_MALLOC */