Example program with HTTPServer and sensor data streaming over TCPSockets, using Donatien Garnier's Net APIs and services code on top of LWIP. Files StreamServer.h and .cpp encapsulate streaming over TCPSockets. Broadcast is done by sendToAll(), and all incoming data is echoed back to the client. Echo code can be replaced with some remote control of the streaming interface. See main() that shows how to periodically send some data to all subscribed clients. To subscribe, a client should open a socket at <mbed_ip> port 123. I used few lines in TCL code to set up a quick sink for the data. HTTP files are served on port 80 concurrently to the streaming.

Dependencies:   mbed

Committer:
iva2k
Date:
Mon Jun 14 03:24:33 2010 +0000
Revision:
1:3ee499525aa5
Parent:
0:e614f7875b60

        

Who changed what in which revision?

UserRevisionLine numberNew contents of line
iva2k 0:e614f7875b60 1 /**
iva2k 0:e614f7875b60 2 * @file
iva2k 0:e614f7875b60 3 * Dynamic memory manager
iva2k 0:e614f7875b60 4 *
iva2k 0:e614f7875b60 5 * This is a lightweight replacement for the standard C library malloc().
iva2k 0:e614f7875b60 6 *
iva2k 0:e614f7875b60 7 * If you want to use the standard C library malloc() instead, define
iva2k 0:e614f7875b60 8 * MEM_LIBC_MALLOC to 1 in your lwipopts.h
iva2k 0:e614f7875b60 9 *
iva2k 0:e614f7875b60 10 * To let mem_malloc() use pools (prevents fragmentation and is much faster than
iva2k 0:e614f7875b60 11 * a heap but might waste some memory), define MEM_USE_POOLS to 1, define
iva2k 0:e614f7875b60 12 * MEM_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list
iva2k 0:e614f7875b60 13 * of pools like this (more pools can be added between _START and _END):
iva2k 0:e614f7875b60 14 *
iva2k 0:e614f7875b60 15 * Define three pools with sizes 256, 512, and 1512 bytes
iva2k 0:e614f7875b60 16 * LWIP_MALLOC_MEMPOOL_START
iva2k 0:e614f7875b60 17 * LWIP_MALLOC_MEMPOOL(20, 256)
iva2k 0:e614f7875b60 18 * LWIP_MALLOC_MEMPOOL(10, 512)
iva2k 0:e614f7875b60 19 * LWIP_MALLOC_MEMPOOL(5, 1512)
iva2k 0:e614f7875b60 20 * LWIP_MALLOC_MEMPOOL_END
iva2k 0:e614f7875b60 21 */
iva2k 0:e614f7875b60 22
iva2k 0:e614f7875b60 23 /*
iva2k 0:e614f7875b60 24 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
iva2k 0:e614f7875b60 25 * All rights reserved.
iva2k 0:e614f7875b60 26 *
iva2k 0:e614f7875b60 27 * Redistribution and use in source and binary forms, with or without modification,
iva2k 0:e614f7875b60 28 * are permitted provided that the following conditions are met:
iva2k 0:e614f7875b60 29 *
iva2k 0:e614f7875b60 30 * 1. Redistributions of source code must retain the above copyright notice,
iva2k 0:e614f7875b60 31 * this list of conditions and the following disclaimer.
iva2k 0:e614f7875b60 32 * 2. Redistributions in binary form must reproduce the above copyright notice,
iva2k 0:e614f7875b60 33 * this list of conditions and the following disclaimer in the documentation
iva2k 0:e614f7875b60 34 * and/or other materials provided with the distribution.
iva2k 0:e614f7875b60 35 * 3. The name of the author may not be used to endorse or promote products
iva2k 0:e614f7875b60 36 * derived from this software without specific prior written permission.
iva2k 0:e614f7875b60 37 *
iva2k 0:e614f7875b60 38 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
iva2k 0:e614f7875b60 39 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
iva2k 0:e614f7875b60 40 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
iva2k 0:e614f7875b60 41 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
iva2k 0:e614f7875b60 42 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
iva2k 0:e614f7875b60 43 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
iva2k 0:e614f7875b60 44 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
iva2k 0:e614f7875b60 45 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
iva2k 0:e614f7875b60 46 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
iva2k 0:e614f7875b60 47 * OF SUCH DAMAGE.
iva2k 0:e614f7875b60 48 *
iva2k 0:e614f7875b60 49 * This file is part of the lwIP TCP/IP stack.
iva2k 0:e614f7875b60 50 *
iva2k 0:e614f7875b60 51 * Author: Adam Dunkels <adam@sics.se>
iva2k 0:e614f7875b60 52 * Simon Goldschmidt
iva2k 0:e614f7875b60 53 *
iva2k 0:e614f7875b60 54 */
iva2k 0:e614f7875b60 55
iva2k 0:e614f7875b60 56 #include "lwip/opt.h"
iva2k 0:e614f7875b60 57
iva2k 0:e614f7875b60 58 #if !MEM_LIBC_MALLOC /* don't build if not configured for use in lwipopts.h */
iva2k 0:e614f7875b60 59
iva2k 0:e614f7875b60 60 #include "lwip/def.h"
iva2k 0:e614f7875b60 61 #include "lwip/mem.h"
iva2k 0:e614f7875b60 62 #include "lwip/sys.h"
iva2k 0:e614f7875b60 63 #include "lwip/stats.h"
iva2k 0:e614f7875b60 64 #include "lwip/err.h"
iva2k 0:e614f7875b60 65
iva2k 0:e614f7875b60 66 #include <string.h>
iva2k 0:e614f7875b60 67
iva2k 0:e614f7875b60 68 #if MEM_USE_POOLS
iva2k 0:e614f7875b60 69 /* lwIP head implemented with different sized pools */
iva2k 0:e614f7875b60 70
iva2k 0:e614f7875b60 71 /**
iva2k 0:e614f7875b60 72 * Allocate memory: determine the smallest pool that is big enough
iva2k 0:e614f7875b60 73 * to contain an element of 'size' and get an element from that pool.
iva2k 0:e614f7875b60 74 *
iva2k 0:e614f7875b60 75 * @param size the size in bytes of the memory needed
iva2k 0:e614f7875b60 76 * @return a pointer to the allocated memory or NULL if the pool is empty
iva2k 0:e614f7875b60 77 */
iva2k 0:e614f7875b60 78 void *
iva2k 0:e614f7875b60 79 mem_malloc(mem_size_t size)
iva2k 0:e614f7875b60 80 {
iva2k 0:e614f7875b60 81 struct memp_malloc_helper *element;
iva2k 0:e614f7875b60 82 memp_t poolnr;
iva2k 0:e614f7875b60 83 mem_size_t required_size = size + sizeof(struct memp_malloc_helper);
iva2k 0:e614f7875b60 84
iva2k 0:e614f7875b60 85 for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr++) {
iva2k 0:e614f7875b60 86 #if MEM_USE_POOLS_TRY_BIGGER_POOL
iva2k 0:e614f7875b60 87 again:
iva2k 0:e614f7875b60 88 #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
iva2k 0:e614f7875b60 89 /* is this pool big enough to hold an element of the required size
iva2k 0:e614f7875b60 90 plus a struct memp_malloc_helper that saves the pool this element came from? */
iva2k 0:e614f7875b60 91 if (required_size <= memp_sizes[poolnr]) {
iva2k 0:e614f7875b60 92 break;
iva2k 0:e614f7875b60 93 }
iva2k 0:e614f7875b60 94 }
iva2k 0:e614f7875b60 95 if (poolnr > MEMP_POOL_LAST) {
iva2k 0:e614f7875b60 96 LWIP_ASSERT("mem_malloc(): no pool is that big!", 0);
iva2k 0:e614f7875b60 97 return NULL;
iva2k 0:e614f7875b60 98 }
iva2k 0:e614f7875b60 99 element = (struct memp_malloc_helper*)memp_malloc(poolnr);
iva2k 0:e614f7875b60 100 if (element == NULL) {
iva2k 0:e614f7875b60 101 /* No need to DEBUGF or ASSERT: This error is already
iva2k 0:e614f7875b60 102 taken care of in memp.c */
iva2k 0:e614f7875b60 103 #if MEM_USE_POOLS_TRY_BIGGER_POOL
iva2k 0:e614f7875b60 104 /** Try a bigger pool if this one is empty! */
iva2k 0:e614f7875b60 105 if (poolnr < MEMP_POOL_LAST) {
iva2k 0:e614f7875b60 106 poolnr++;
iva2k 0:e614f7875b60 107 goto again;
iva2k 0:e614f7875b60 108 }
iva2k 0:e614f7875b60 109 #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
iva2k 0:e614f7875b60 110 return NULL;
iva2k 0:e614f7875b60 111 }
iva2k 0:e614f7875b60 112
iva2k 0:e614f7875b60 113 /* save the pool number this element came from */
iva2k 0:e614f7875b60 114 element->poolnr = poolnr;
iva2k 0:e614f7875b60 115 /* and return a pointer to the memory directly after the struct memp_malloc_helper */
iva2k 0:e614f7875b60 116 element++;
iva2k 0:e614f7875b60 117
iva2k 0:e614f7875b60 118 return element;
iva2k 0:e614f7875b60 119 }
iva2k 0:e614f7875b60 120
iva2k 0:e614f7875b60 121 /**
iva2k 0:e614f7875b60 122 * Free memory previously allocated by mem_malloc. Loads the pool number
iva2k 0:e614f7875b60 123 * and calls memp_free with that pool number to put the element back into
iva2k 0:e614f7875b60 124 * its pool
iva2k 0:e614f7875b60 125 *
iva2k 0:e614f7875b60 126 * @param rmem the memory element to free
iva2k 0:e614f7875b60 127 */
iva2k 0:e614f7875b60 128 void
iva2k 0:e614f7875b60 129 mem_free(void *rmem)
iva2k 0:e614f7875b60 130 {
iva2k 0:e614f7875b60 131 struct memp_malloc_helper *hmem = (struct memp_malloc_helper*)rmem;
iva2k 0:e614f7875b60 132
iva2k 0:e614f7875b60 133 LWIP_ASSERT("rmem != NULL", (rmem != NULL));
iva2k 0:e614f7875b60 134 LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
iva2k 0:e614f7875b60 135
iva2k 0:e614f7875b60 136 /* get the original struct memp_malloc_helper */
iva2k 0:e614f7875b60 137 hmem--;
iva2k 0:e614f7875b60 138
iva2k 0:e614f7875b60 139 LWIP_ASSERT("hmem != NULL", (hmem != NULL));
iva2k 0:e614f7875b60 140 LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem)));
iva2k 0:e614f7875b60 141 LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX));
iva2k 0:e614f7875b60 142
iva2k 0:e614f7875b60 143 /* and put it in the pool we saved earlier */
iva2k 0:e614f7875b60 144 memp_free(hmem->poolnr, hmem);
iva2k 0:e614f7875b60 145 }
iva2k 0:e614f7875b60 146
iva2k 0:e614f7875b60 147 #else /* MEM_USE_POOLS */
iva2k 0:e614f7875b60 148 /* lwIP replacement for your libc malloc() */
iva2k 0:e614f7875b60 149
iva2k 0:e614f7875b60 150 /**
iva2k 0:e614f7875b60 151 * The heap is made up as a list of structs of this type.
iva2k 0:e614f7875b60 152 * This does not have to be aligned since for getting its size,
iva2k 0:e614f7875b60 153 * we only use the macro SIZEOF_STRUCT_MEM, which automatically alignes.
iva2k 0:e614f7875b60 154 */
iva2k 0:e614f7875b60 155 struct mem {
iva2k 0:e614f7875b60 156 /** index (-> ram[next]) of the next struct */
iva2k 0:e614f7875b60 157 mem_size_t next;
iva2k 0:e614f7875b60 158 /** index (-> ram[prev]) of the previous struct */
iva2k 0:e614f7875b60 159 mem_size_t prev;
iva2k 0:e614f7875b60 160 /** 1: this area is used; 0: this area is unused */
iva2k 0:e614f7875b60 161 u8_t used;
iva2k 0:e614f7875b60 162 };
iva2k 0:e614f7875b60 163
iva2k 0:e614f7875b60 164 /** All allocated blocks will be MIN_SIZE bytes big, at least!
iva2k 0:e614f7875b60 165 * MIN_SIZE can be overridden to suit your needs. Smaller values save space,
iva2k 0:e614f7875b60 166 * larger values could prevent too small blocks to fragment the RAM too much. */
iva2k 0:e614f7875b60 167 #ifndef MIN_SIZE
iva2k 0:e614f7875b60 168 #define MIN_SIZE 12
iva2k 0:e614f7875b60 169 #endif /* MIN_SIZE */
iva2k 0:e614f7875b60 170 /* some alignment macros: we define them here for better source code layout */
iva2k 0:e614f7875b60 171 #define MIN_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MIN_SIZE)
iva2k 0:e614f7875b60 172 #define SIZEOF_STRUCT_MEM LWIP_MEM_ALIGN_SIZE(sizeof(struct mem))
iva2k 0:e614f7875b60 173 #define MEM_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SIZE)
iva2k 0:e614f7875b60 174
iva2k 0:e614f7875b60 175 /** If you want to relocate the heap to external memory, simply define
iva2k 0:e614f7875b60 176 * LWIP_RAM_HEAP_POINTER as a void-pointer to that location.
iva2k 0:e614f7875b60 177 * If so, make sure the memory at that location is big enough (see below on
iva2k 0:e614f7875b60 178 * how that space is calculated). */
iva2k 0:e614f7875b60 179 #ifndef LWIP_RAM_HEAP_POINTER
iva2k 0:e614f7875b60 180 /** the heap. we need one struct mem at the end and some room for alignment */
iva2k 0:e614f7875b60 181 u8_t ram_heap[MEM_SIZE_ALIGNED + (2*SIZEOF_STRUCT_MEM) + MEM_ALIGNMENT] MEM_POSITION;
iva2k 0:e614f7875b60 182 #define LWIP_RAM_HEAP_POINTER ram_heap
iva2k 0:e614f7875b60 183 #endif /* LWIP_RAM_HEAP_POINTER */
iva2k 0:e614f7875b60 184
iva2k 0:e614f7875b60 185 /** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */
iva2k 0:e614f7875b60 186 static u8_t *ram;
iva2k 0:e614f7875b60 187 /** the last entry, always unused! */
iva2k 0:e614f7875b60 188 static struct mem *ram_end;
iva2k 0:e614f7875b60 189 /** pointer to the lowest free block, this is used for faster search */
iva2k 0:e614f7875b60 190 static struct mem *lfree;
iva2k 0:e614f7875b60 191
iva2k 0:e614f7875b60 192 #if (NO_SYS==0) //Pointless if monothreaded app
iva2k 0:e614f7875b60 193 /** concurrent access protection */
iva2k 0:e614f7875b60 194 static sys_mutex_t mem_mutex;
iva2k 0:e614f7875b60 195 #endif
iva2k 0:e614f7875b60 196
iva2k 0:e614f7875b60 197 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
iva2k 0:e614f7875b60 198
iva2k 0:e614f7875b60 199 static volatile u8_t mem_free_count;
iva2k 0:e614f7875b60 200
iva2k 0:e614f7875b60 201 /* Allow mem_free from other (e.g. interrupt) context */
iva2k 0:e614f7875b60 202 #define LWIP_MEM_FREE_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_free)
iva2k 0:e614f7875b60 203 #define LWIP_MEM_FREE_PROTECT() SYS_ARCH_PROTECT(lev_free)
iva2k 0:e614f7875b60 204 #define LWIP_MEM_FREE_UNPROTECT() SYS_ARCH_UNPROTECT(lev_free)
iva2k 0:e614f7875b60 205 #define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc)
iva2k 0:e614f7875b60 206 #define LWIP_MEM_ALLOC_PROTECT() SYS_ARCH_PROTECT(lev_alloc)
iva2k 0:e614f7875b60 207 #define LWIP_MEM_ALLOC_UNPROTECT() SYS_ARCH_UNPROTECT(lev_alloc)
iva2k 0:e614f7875b60 208
iva2k 0:e614f7875b60 209 #else /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
iva2k 0:e614f7875b60 210
iva2k 0:e614f7875b60 211 /* Protect the heap only by using a semaphore */
iva2k 0:e614f7875b60 212 #define LWIP_MEM_FREE_DECL_PROTECT()
iva2k 0:e614f7875b60 213 #define LWIP_MEM_FREE_PROTECT() sys_mutex_lock(&mem_mutex)
iva2k 0:e614f7875b60 214 #define LWIP_MEM_FREE_UNPROTECT() sys_mutex_unlock(&mem_mutex)
iva2k 0:e614f7875b60 215 /* mem_malloc is protected using semaphore AND LWIP_MEM_ALLOC_PROTECT */
iva2k 0:e614f7875b60 216 #define LWIP_MEM_ALLOC_DECL_PROTECT()
iva2k 0:e614f7875b60 217 #define LWIP_MEM_ALLOC_PROTECT()
iva2k 0:e614f7875b60 218 #define LWIP_MEM_ALLOC_UNPROTECT()
iva2k 0:e614f7875b60 219
iva2k 0:e614f7875b60 220 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
iva2k 0:e614f7875b60 221
iva2k 0:e614f7875b60 222
iva2k 0:e614f7875b60 223 /**
iva2k 0:e614f7875b60 224 * "Plug holes" by combining adjacent empty struct mems.
iva2k 0:e614f7875b60 225 * After this function is through, there should not exist
iva2k 0:e614f7875b60 226 * one empty struct mem pointing to another empty struct mem.
iva2k 0:e614f7875b60 227 *
iva2k 0:e614f7875b60 228 * @param mem this points to a struct mem which just has been freed
iva2k 0:e614f7875b60 229 * @internal this function is only called by mem_free() and mem_trim()
iva2k 0:e614f7875b60 230 *
iva2k 0:e614f7875b60 231 * This assumes access to the heap is protected by the calling function
iva2k 0:e614f7875b60 232 * already.
iva2k 0:e614f7875b60 233 */
iva2k 0:e614f7875b60 234 static void
iva2k 0:e614f7875b60 235 plug_holes(struct mem *mem)
iva2k 0:e614f7875b60 236 {
iva2k 0:e614f7875b60 237 struct mem *nmem;
iva2k 0:e614f7875b60 238 struct mem *pmem;
iva2k 0:e614f7875b60 239
iva2k 0:e614f7875b60 240 LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram);
iva2k 0:e614f7875b60 241 LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end);
iva2k 0:e614f7875b60 242 LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0);
iva2k 0:e614f7875b60 243
iva2k 0:e614f7875b60 244 /* plug hole forward */
iva2k 0:e614f7875b60 245 LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED);
iva2k 0:e614f7875b60 246
iva2k 0:e614f7875b60 247 nmem = (struct mem *)(void *)&ram[mem->next];
iva2k 0:e614f7875b60 248 if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) {
iva2k 0:e614f7875b60 249 /* if mem->next is unused and not end of ram, combine mem and mem->next */
iva2k 0:e614f7875b60 250 if (lfree == nmem) {
iva2k 0:e614f7875b60 251 lfree = mem;
iva2k 0:e614f7875b60 252 }
iva2k 0:e614f7875b60 253 mem->next = nmem->next;
iva2k 0:e614f7875b60 254 ((struct mem *)(void *)&ram[nmem->next])->prev = (mem_size_t)((u8_t *)mem - ram);
iva2k 0:e614f7875b60 255 }
iva2k 0:e614f7875b60 256
iva2k 0:e614f7875b60 257 /* plug hole backward */
iva2k 0:e614f7875b60 258 pmem = (struct mem *)(void *)&ram[mem->prev];
iva2k 0:e614f7875b60 259 if (pmem != mem && pmem->used == 0) {
iva2k 0:e614f7875b60 260 /* if mem->prev is unused, combine mem and mem->prev */
iva2k 0:e614f7875b60 261 if (lfree == mem) {
iva2k 0:e614f7875b60 262 lfree = pmem;
iva2k 0:e614f7875b60 263 }
iva2k 0:e614f7875b60 264 pmem->next = mem->next;
iva2k 0:e614f7875b60 265 ((struct mem *)(void *)&ram[mem->next])->prev = (mem_size_t)((u8_t *)pmem - ram);
iva2k 0:e614f7875b60 266 }
iva2k 0:e614f7875b60 267 }
iva2k 0:e614f7875b60 268
iva2k 0:e614f7875b60 269 /**
iva2k 0:e614f7875b60 270 * Zero the heap and initialize start, end and lowest-free
iva2k 0:e614f7875b60 271 */
iva2k 0:e614f7875b60 272 void
iva2k 0:e614f7875b60 273 mem_init(void)
iva2k 0:e614f7875b60 274 {
iva2k 0:e614f7875b60 275 struct mem *mem;
iva2k 0:e614f7875b60 276
iva2k 0:e614f7875b60 277 LWIP_ASSERT("Sanity check alignment",
iva2k 0:e614f7875b60 278 (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT-1)) == 0);
iva2k 0:e614f7875b60 279
iva2k 0:e614f7875b60 280 /* align the heap */
iva2k 0:e614f7875b60 281 ram = (u8_t *)LWIP_MEM_ALIGN(LWIP_RAM_HEAP_POINTER);
iva2k 0:e614f7875b60 282 /* initialize the start of the heap */
iva2k 0:e614f7875b60 283 mem = (struct mem *)(void *)ram;
iva2k 0:e614f7875b60 284 mem->next = MEM_SIZE_ALIGNED;
iva2k 0:e614f7875b60 285 mem->prev = 0;
iva2k 0:e614f7875b60 286 mem->used = 0;
iva2k 0:e614f7875b60 287 /* initialize the end of the heap */
iva2k 0:e614f7875b60 288 ram_end = (struct mem *)(void *)&ram[MEM_SIZE_ALIGNED];
iva2k 0:e614f7875b60 289 ram_end->used = 1;
iva2k 0:e614f7875b60 290 ram_end->next = MEM_SIZE_ALIGNED;
iva2k 0:e614f7875b60 291 ram_end->prev = MEM_SIZE_ALIGNED;
iva2k 0:e614f7875b60 292
iva2k 0:e614f7875b60 293 /* initialize the lowest-free pointer to the start of the heap */
iva2k 0:e614f7875b60 294 lfree = (struct mem *)(void *)ram;
iva2k 0:e614f7875b60 295
iva2k 0:e614f7875b60 296 MEM_STATS_AVAIL(avail, MEM_SIZE_ALIGNED);
iva2k 0:e614f7875b60 297
iva2k 0:e614f7875b60 298 if(sys_mutex_new(&mem_mutex) != ERR_OK) {
iva2k 0:e614f7875b60 299 LWIP_ASSERT("failed to create mem_mutex", 0);
iva2k 0:e614f7875b60 300 }
iva2k 0:e614f7875b60 301 }
iva2k 0:e614f7875b60 302
iva2k 0:e614f7875b60 303 /**
iva2k 0:e614f7875b60 304 * Put a struct mem back on the heap
iva2k 0:e614f7875b60 305 *
iva2k 0:e614f7875b60 306 * @param rmem is the data portion of a struct mem as returned by a previous
iva2k 0:e614f7875b60 307 * call to mem_malloc()
iva2k 0:e614f7875b60 308 */
iva2k 0:e614f7875b60 309 void
iva2k 0:e614f7875b60 310 mem_free(void *rmem)
iva2k 0:e614f7875b60 311 {
iva2k 0:e614f7875b60 312 struct mem *mem;
iva2k 0:e614f7875b60 313 LWIP_MEM_FREE_DECL_PROTECT();
iva2k 0:e614f7875b60 314
iva2k 0:e614f7875b60 315 if (rmem == NULL) {
iva2k 0:e614f7875b60 316 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("mem_free(p == NULL) was called.\n"));
iva2k 0:e614f7875b60 317 return;
iva2k 0:e614f7875b60 318 }
iva2k 0:e614f7875b60 319 LWIP_ASSERT("mem_free: sanity check alignment", (((mem_ptr_t)rmem) & (MEM_ALIGNMENT-1)) == 0);
iva2k 0:e614f7875b60 320
iva2k 0:e614f7875b60 321 LWIP_ASSERT("mem_free: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
iva2k 0:e614f7875b60 322 (u8_t *)rmem < (u8_t *)ram_end);
iva2k 0:e614f7875b60 323
iva2k 0:e614f7875b60 324 if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
iva2k 0:e614f7875b60 325 SYS_ARCH_DECL_PROTECT(lev);
iva2k 0:e614f7875b60 326 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory\n"));
iva2k 0:e614f7875b60 327 /* protect mem stats from concurrent access */
iva2k 0:e614f7875b60 328 SYS_ARCH_PROTECT(lev);
iva2k 0:e614f7875b60 329 MEM_STATS_INC(illegal);
iva2k 0:e614f7875b60 330 SYS_ARCH_UNPROTECT(lev);
iva2k 0:e614f7875b60 331 return;
iva2k 0:e614f7875b60 332 }
iva2k 0:e614f7875b60 333 /* protect the heap from concurrent access */
iva2k 0:e614f7875b60 334 LWIP_MEM_FREE_PROTECT();
iva2k 0:e614f7875b60 335 /* Get the corresponding struct mem ... */
iva2k 0:e614f7875b60 336 mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
iva2k 0:e614f7875b60 337 /* ... which has to be in a used state ... */
iva2k 0:e614f7875b60 338 LWIP_ASSERT("mem_free: mem->used", mem->used);
iva2k 0:e614f7875b60 339 /* ... and is now unused. */
iva2k 0:e614f7875b60 340 mem->used = 0;
iva2k 0:e614f7875b60 341
iva2k 0:e614f7875b60 342 if (mem < lfree) {
iva2k 0:e614f7875b60 343 /* the newly freed struct is now the lowest */
iva2k 0:e614f7875b60 344 lfree = mem;
iva2k 0:e614f7875b60 345 }
iva2k 0:e614f7875b60 346
iva2k 0:e614f7875b60 347 MEM_STATS_DEC_USED(used, mem->next - (mem_size_t)(((u8_t *)mem - ram)));
iva2k 0:e614f7875b60 348
iva2k 0:e614f7875b60 349 /* finally, see if prev or next are free also */
iva2k 0:e614f7875b60 350 plug_holes(mem);
iva2k 0:e614f7875b60 351 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
iva2k 0:e614f7875b60 352 mem_free_count = 1;
iva2k 0:e614f7875b60 353 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
iva2k 0:e614f7875b60 354 LWIP_MEM_FREE_UNPROTECT();
iva2k 0:e614f7875b60 355 }
iva2k 0:e614f7875b60 356
iva2k 0:e614f7875b60 357 /**
iva2k 0:e614f7875b60 358 * Shrink memory returned by mem_malloc().
iva2k 0:e614f7875b60 359 *
iva2k 0:e614f7875b60 360 * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked
iva2k 0:e614f7875b60 361 * @param newsize required size after shrinking (needs to be smaller than or
iva2k 0:e614f7875b60 362 * equal to the previous size)
iva2k 0:e614f7875b60 363 * @return for compatibility reasons: is always == rmem, at the moment
iva2k 0:e614f7875b60 364 * or NULL if newsize is > old size, in which case rmem is NOT touched
iva2k 0:e614f7875b60 365 * or freed!
iva2k 0:e614f7875b60 366 */
iva2k 0:e614f7875b60 367 void *
iva2k 0:e614f7875b60 368 mem_trim(void *rmem, mem_size_t newsize)
iva2k 0:e614f7875b60 369 {
iva2k 0:e614f7875b60 370 mem_size_t size;
iva2k 0:e614f7875b60 371 mem_size_t ptr, ptr2;
iva2k 0:e614f7875b60 372 struct mem *mem, *mem2;
iva2k 0:e614f7875b60 373 /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */
iva2k 0:e614f7875b60 374 LWIP_MEM_FREE_DECL_PROTECT();
iva2k 0:e614f7875b60 375
iva2k 0:e614f7875b60 376 /* Expand the size of the allocated memory region so that we can
iva2k 0:e614f7875b60 377 adjust for alignment. */
iva2k 0:e614f7875b60 378 newsize = LWIP_MEM_ALIGN_SIZE(newsize);
iva2k 0:e614f7875b60 379
iva2k 0:e614f7875b60 380 if(newsize < MIN_SIZE_ALIGNED) {
iva2k 0:e614f7875b60 381 /* every data block must be at least MIN_SIZE_ALIGNED long */
iva2k 0:e614f7875b60 382 newsize = MIN_SIZE_ALIGNED;
iva2k 0:e614f7875b60 383 }
iva2k 0:e614f7875b60 384
iva2k 0:e614f7875b60 385 if (newsize > MEM_SIZE_ALIGNED) {
iva2k 0:e614f7875b60 386 return NULL;
iva2k 0:e614f7875b60 387 }
iva2k 0:e614f7875b60 388
iva2k 0:e614f7875b60 389 LWIP_ASSERT("mem_trim: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
iva2k 0:e614f7875b60 390 (u8_t *)rmem < (u8_t *)ram_end);
iva2k 0:e614f7875b60 391
iva2k 0:e614f7875b60 392 if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
iva2k 0:e614f7875b60 393 SYS_ARCH_DECL_PROTECT(lev);
iva2k 0:e614f7875b60 394 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_trim: illegal memory\n"));
iva2k 0:e614f7875b60 395 /* protect mem stats from concurrent access */
iva2k 0:e614f7875b60 396 SYS_ARCH_PROTECT(lev);
iva2k 0:e614f7875b60 397 MEM_STATS_INC(illegal);
iva2k 0:e614f7875b60 398 SYS_ARCH_UNPROTECT(lev);
iva2k 0:e614f7875b60 399 return rmem;
iva2k 0:e614f7875b60 400 }
iva2k 0:e614f7875b60 401 /* Get the corresponding struct mem ... */
iva2k 0:e614f7875b60 402 mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
iva2k 0:e614f7875b60 403 /* ... and its offset pointer */
iva2k 0:e614f7875b60 404 ptr = (mem_size_t)((u8_t *)mem - ram);
iva2k 0:e614f7875b60 405
iva2k 0:e614f7875b60 406 size = mem->next - ptr - SIZEOF_STRUCT_MEM;
iva2k 0:e614f7875b60 407 LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size);
iva2k 0:e614f7875b60 408 if (newsize > size) {
iva2k 0:e614f7875b60 409 /* not supported */
iva2k 0:e614f7875b60 410 return NULL;
iva2k 0:e614f7875b60 411 }
iva2k 0:e614f7875b60 412 if (newsize == size) {
iva2k 0:e614f7875b60 413 /* No change in size, simply return */
iva2k 0:e614f7875b60 414 return rmem;
iva2k 0:e614f7875b60 415 }
iva2k 0:e614f7875b60 416
iva2k 0:e614f7875b60 417 /* protect the heap from concurrent access */
iva2k 0:e614f7875b60 418 LWIP_MEM_FREE_PROTECT();
iva2k 0:e614f7875b60 419
iva2k 0:e614f7875b60 420 mem2 = (struct mem *)(void *)&ram[mem->next];
iva2k 0:e614f7875b60 421 if(mem2->used == 0) {
iva2k 0:e614f7875b60 422 /* The next struct is unused, we can simply move it at little */
iva2k 0:e614f7875b60 423 mem_size_t next;
iva2k 0:e614f7875b60 424 /* remember the old next pointer */
iva2k 0:e614f7875b60 425 next = mem2->next;
iva2k 0:e614f7875b60 426 /* create new struct mem which is moved directly after the shrinked mem */
iva2k 0:e614f7875b60 427 ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
iva2k 0:e614f7875b60 428 if (lfree == mem2) {
iva2k 0:e614f7875b60 429 lfree = (struct mem *)(void *)&ram[ptr2];
iva2k 0:e614f7875b60 430 }
iva2k 0:e614f7875b60 431 mem2 = (struct mem *)(void *)&ram[ptr2];
iva2k 0:e614f7875b60 432 mem2->used = 0;
iva2k 0:e614f7875b60 433 /* restore the next pointer */
iva2k 0:e614f7875b60 434 mem2->next = next;
iva2k 0:e614f7875b60 435 /* link it back to mem */
iva2k 0:e614f7875b60 436 mem2->prev = ptr;
iva2k 0:e614f7875b60 437 /* link mem to it */
iva2k 0:e614f7875b60 438 mem->next = ptr2;
iva2k 0:e614f7875b60 439 /* last thing to restore linked list: as we have moved mem2,
iva2k 0:e614f7875b60 440 * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not
iva2k 0:e614f7875b60 441 * the end of the heap */
iva2k 0:e614f7875b60 442 if (mem2->next != MEM_SIZE_ALIGNED) {
iva2k 0:e614f7875b60 443 ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
iva2k 0:e614f7875b60 444 }
iva2k 0:e614f7875b60 445 MEM_STATS_DEC_USED(used, (size - newsize));
iva2k 0:e614f7875b60 446 /* no need to plug holes, we've already done that */
iva2k 0:e614f7875b60 447 } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) {
iva2k 0:e614f7875b60 448 /* Next struct is used but there's room for another struct mem with
iva2k 0:e614f7875b60 449 * at least MIN_SIZE_ALIGNED of data.
iva2k 0:e614f7875b60 450 * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem
iva2k 0:e614f7875b60 451 * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED').
iva2k 0:e614f7875b60 452 * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
iva2k 0:e614f7875b60 453 * region that couldn't hold data, but when mem->next gets freed,
iva2k 0:e614f7875b60 454 * the 2 regions would be combined, resulting in more free memory */
iva2k 0:e614f7875b60 455 ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
iva2k 0:e614f7875b60 456 mem2 = (struct mem *)(void *)&ram[ptr2];
iva2k 0:e614f7875b60 457 if (mem2 < lfree) {
iva2k 0:e614f7875b60 458 lfree = mem2;
iva2k 0:e614f7875b60 459 }
iva2k 0:e614f7875b60 460 mem2->used = 0;
iva2k 0:e614f7875b60 461 mem2->next = mem->next;
iva2k 0:e614f7875b60 462 mem2->prev = ptr;
iva2k 0:e614f7875b60 463 mem->next = ptr2;
iva2k 0:e614f7875b60 464 if (mem2->next != MEM_SIZE_ALIGNED) {
iva2k 0:e614f7875b60 465 ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
iva2k 0:e614f7875b60 466 }
iva2k 0:e614f7875b60 467 MEM_STATS_DEC_USED(used, (size - newsize));
iva2k 0:e614f7875b60 468 /* the original mem->next is used, so no need to plug holes! */
iva2k 0:e614f7875b60 469 }
iva2k 0:e614f7875b60 470 /* else {
iva2k 0:e614f7875b60 471 next struct mem is used but size between mem and mem2 is not big enough
iva2k 0:e614f7875b60 472 to create another struct mem
iva2k 0:e614f7875b60 473 -> don't do anyhting.
iva2k 0:e614f7875b60 474 -> the remaining space stays unused since it is too small
iva2k 0:e614f7875b60 475 } */
iva2k 0:e614f7875b60 476 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
iva2k 0:e614f7875b60 477 mem_free_count = 1;
iva2k 0:e614f7875b60 478 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
iva2k 0:e614f7875b60 479 LWIP_MEM_FREE_UNPROTECT();
iva2k 0:e614f7875b60 480 return rmem;
iva2k 0:e614f7875b60 481 }
iva2k 0:e614f7875b60 482
iva2k 0:e614f7875b60 483 /**
iva2k 0:e614f7875b60 484 * Adam's mem_malloc() plus solution for bug #17922
iva2k 0:e614f7875b60 485 * Allocate a block of memory with a minimum of 'size' bytes.
iva2k 0:e614f7875b60 486 *
iva2k 0:e614f7875b60 487 * @param size is the minimum size of the requested block in bytes.
iva2k 0:e614f7875b60 488 * @return pointer to allocated memory or NULL if no free memory was found.
iva2k 0:e614f7875b60 489 *
iva2k 0:e614f7875b60 490 * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT).
iva2k 0:e614f7875b60 491 */
iva2k 0:e614f7875b60 492 void *
iva2k 0:e614f7875b60 493 mem_malloc(mem_size_t size)
iva2k 0:e614f7875b60 494 {
iva2k 0:e614f7875b60 495 mem_size_t ptr, ptr2;
iva2k 0:e614f7875b60 496 struct mem *mem, *mem2;
iva2k 0:e614f7875b60 497 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
iva2k 0:e614f7875b60 498 u8_t local_mem_free_count = 0;
iva2k 0:e614f7875b60 499 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
iva2k 0:e614f7875b60 500 LWIP_MEM_ALLOC_DECL_PROTECT();
iva2k 0:e614f7875b60 501
iva2k 0:e614f7875b60 502 if (size == 0) {
iva2k 0:e614f7875b60 503 return NULL;
iva2k 0:e614f7875b60 504 }
iva2k 0:e614f7875b60 505
iva2k 0:e614f7875b60 506 /* Expand the size of the allocated memory region so that we can
iva2k 0:e614f7875b60 507 adjust for alignment. */
iva2k 0:e614f7875b60 508 size = LWIP_MEM_ALIGN_SIZE(size);
iva2k 0:e614f7875b60 509
iva2k 0:e614f7875b60 510 if(size < MIN_SIZE_ALIGNED) {
iva2k 0:e614f7875b60 511 /* every data block must be at least MIN_SIZE_ALIGNED long */
iva2k 0:e614f7875b60 512 size = MIN_SIZE_ALIGNED;
iva2k 0:e614f7875b60 513 }
iva2k 0:e614f7875b60 514
iva2k 0:e614f7875b60 515 if (size > MEM_SIZE_ALIGNED) {
iva2k 0:e614f7875b60 516 return NULL;
iva2k 0:e614f7875b60 517 }
iva2k 0:e614f7875b60 518
iva2k 0:e614f7875b60 519 /* protect the heap from concurrent access */
iva2k 0:e614f7875b60 520 sys_mutex_lock(&mem_mutex);
iva2k 0:e614f7875b60 521 LWIP_MEM_ALLOC_PROTECT();
iva2k 0:e614f7875b60 522 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
iva2k 0:e614f7875b60 523 /* run as long as a mem_free disturbed mem_malloc */
iva2k 0:e614f7875b60 524 do {
iva2k 0:e614f7875b60 525 local_mem_free_count = 0;
iva2k 0:e614f7875b60 526 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
iva2k 0:e614f7875b60 527
iva2k 0:e614f7875b60 528 /* Scan through the heap searching for a free block that is big enough,
iva2k 0:e614f7875b60 529 * beginning with the lowest free block.
iva2k 0:e614f7875b60 530 */
iva2k 0:e614f7875b60 531 for (ptr = (mem_size_t)((u8_t *)lfree - ram); ptr < MEM_SIZE_ALIGNED - size;
iva2k 0:e614f7875b60 532 ptr = ((struct mem *)(void *)&ram[ptr])->next) {
iva2k 0:e614f7875b60 533 mem = (struct mem *)(void *)&ram[ptr];
iva2k 0:e614f7875b60 534 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
iva2k 0:e614f7875b60 535 mem_free_count = 0;
iva2k 0:e614f7875b60 536 LWIP_MEM_ALLOC_UNPROTECT();
iva2k 0:e614f7875b60 537 /* allow mem_free to run */
iva2k 0:e614f7875b60 538 LWIP_MEM_ALLOC_PROTECT();
iva2k 0:e614f7875b60 539 if (mem_free_count != 0) {
iva2k 0:e614f7875b60 540 local_mem_free_count = mem_free_count;
iva2k 0:e614f7875b60 541 }
iva2k 0:e614f7875b60 542 mem_free_count = 0;
iva2k 0:e614f7875b60 543 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
iva2k 0:e614f7875b60 544
iva2k 0:e614f7875b60 545 if ((!mem->used) &&
iva2k 0:e614f7875b60 546 (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) {
iva2k 0:e614f7875b60 547 /* mem is not used and at least perfect fit is possible:
iva2k 0:e614f7875b60 548 * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
iva2k 0:e614f7875b60 549
iva2k 0:e614f7875b60 550 if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) {
iva2k 0:e614f7875b60 551 /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing
iva2k 0:e614f7875b60 552 * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
iva2k 0:e614f7875b60 553 * -> split large block, create empty remainder,
iva2k 0:e614f7875b60 554 * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
iva2k 0:e614f7875b60 555 * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
iva2k 0:e614f7875b60 556 * struct mem would fit in but no data between mem2 and mem2->next
iva2k 0:e614f7875b60 557 * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
iva2k 0:e614f7875b60 558 * region that couldn't hold data, but when mem->next gets freed,
iva2k 0:e614f7875b60 559 * the 2 regions would be combined, resulting in more free memory
iva2k 0:e614f7875b60 560 */
iva2k 0:e614f7875b60 561 ptr2 = ptr + SIZEOF_STRUCT_MEM + size;
iva2k 0:e614f7875b60 562 /* create mem2 struct */
iva2k 0:e614f7875b60 563 mem2 = (struct mem *)(void *)&ram[ptr2];
iva2k 0:e614f7875b60 564 mem2->used = 0;
iva2k 0:e614f7875b60 565 mem2->next = mem->next;
iva2k 0:e614f7875b60 566 mem2->prev = ptr;
iva2k 0:e614f7875b60 567 /* and insert it between mem and mem->next */
iva2k 0:e614f7875b60 568 mem->next = ptr2;
iva2k 0:e614f7875b60 569 mem->used = 1;
iva2k 0:e614f7875b60 570
iva2k 0:e614f7875b60 571 if (mem2->next != MEM_SIZE_ALIGNED) {
iva2k 0:e614f7875b60 572 ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
iva2k 0:e614f7875b60 573 }
iva2k 0:e614f7875b60 574 MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM));
iva2k 0:e614f7875b60 575 } else {
iva2k 0:e614f7875b60 576 /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
iva2k 0:e614f7875b60 577 * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
iva2k 0:e614f7875b60 578 * take care of this).
iva2k 0:e614f7875b60 579 * -> near fit or excact fit: do not split, no mem2 creation
iva2k 0:e614f7875b60 580 * also can't move mem->next directly behind mem, since mem->next
iva2k 0:e614f7875b60 581 * will always be used at this point!
iva2k 0:e614f7875b60 582 */
iva2k 0:e614f7875b60 583 mem->used = 1;
iva2k 0:e614f7875b60 584 MEM_STATS_INC_USED(used, mem->next - (mem_size_t)((u8_t *)mem - ram));
iva2k 0:e614f7875b60 585 }
iva2k 0:e614f7875b60 586
iva2k 0:e614f7875b60 587 if (mem == lfree) {
iva2k 0:e614f7875b60 588 /* Find next free block after mem and update lowest free pointer */
iva2k 0:e614f7875b60 589 while (lfree->used && lfree != ram_end) {
iva2k 0:e614f7875b60 590 LWIP_MEM_ALLOC_UNPROTECT();
iva2k 0:e614f7875b60 591 /* prevent high interrupt latency... */
iva2k 0:e614f7875b60 592 LWIP_MEM_ALLOC_PROTECT();
iva2k 0:e614f7875b60 593 lfree = (struct mem *)(void *)&ram[lfree->next];
iva2k 0:e614f7875b60 594 }
iva2k 0:e614f7875b60 595 LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used)));
iva2k 0:e614f7875b60 596 }
iva2k 0:e614f7875b60 597 LWIP_MEM_ALLOC_UNPROTECT();
iva2k 0:e614f7875b60 598 sys_mutex_unlock(&mem_mutex);
iva2k 0:e614f7875b60 599 LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.",
iva2k 0:e614f7875b60 600 (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end);
iva2k 0:e614f7875b60 601 LWIP_ASSERT("mem_malloc: allocated memory properly aligned.",
iva2k 0:e614f7875b60 602 ((mem_ptr_t)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0);
iva2k 0:e614f7875b60 603 LWIP_ASSERT("mem_malloc: sanity check alignment",
iva2k 0:e614f7875b60 604 (((mem_ptr_t)mem) & (MEM_ALIGNMENT-1)) == 0);
iva2k 0:e614f7875b60 605
iva2k 0:e614f7875b60 606 return (u8_t *)mem + SIZEOF_STRUCT_MEM;
iva2k 0:e614f7875b60 607 }
iva2k 0:e614f7875b60 608 }
iva2k 0:e614f7875b60 609 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
iva2k 0:e614f7875b60 610 /* if we got interrupted by a mem_free, try again */
iva2k 0:e614f7875b60 611 } while(local_mem_free_count != 0);
iva2k 0:e614f7875b60 612 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
iva2k 0:e614f7875b60 613 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size));
iva2k 0:e614f7875b60 614 MEM_STATS_INC(err);
iva2k 0:e614f7875b60 615 LWIP_MEM_ALLOC_UNPROTECT();
iva2k 0:e614f7875b60 616 sys_mutex_unlock(&mem_mutex);
iva2k 0:e614f7875b60 617 return NULL;
iva2k 0:e614f7875b60 618 }
iva2k 0:e614f7875b60 619
iva2k 0:e614f7875b60 620 #endif /* MEM_USE_POOLS */
iva2k 0:e614f7875b60 621 /**
iva2k 0:e614f7875b60 622 * Contiguously allocates enough space for count objects that are size bytes
iva2k 0:e614f7875b60 623 * of memory each and returns a pointer to the allocated memory.
iva2k 0:e614f7875b60 624 *
iva2k 0:e614f7875b60 625 * The allocated memory is filled with bytes of value zero.
iva2k 0:e614f7875b60 626 *
iva2k 0:e614f7875b60 627 * @param count number of objects to allocate
iva2k 0:e614f7875b60 628 * @param size size of the objects to allocate
iva2k 0:e614f7875b60 629 * @return pointer to allocated memory / NULL pointer if there is an error
iva2k 0:e614f7875b60 630 */
iva2k 0:e614f7875b60 631 void *mem_calloc(mem_size_t count, mem_size_t size)
iva2k 0:e614f7875b60 632 {
iva2k 0:e614f7875b60 633 void *p;
iva2k 0:e614f7875b60 634
iva2k 0:e614f7875b60 635 /* allocate 'count' objects of size 'size' */
iva2k 0:e614f7875b60 636 p = mem_malloc(count * size);
iva2k 0:e614f7875b60 637 if (p) {
iva2k 0:e614f7875b60 638 /* zero the memory */
iva2k 0:e614f7875b60 639 memset(p, 0, count * size);
iva2k 0:e614f7875b60 640 }
iva2k 0:e614f7875b60 641 return p;
iva2k 0:e614f7875b60 642 }
iva2k 0:e614f7875b60 643
iva2k 0:e614f7875b60 644 #endif /* !MEM_LIBC_MALLOC */