Forked from STM32F7 internet for nucleo F746ZG
Dependents: Nucleo_F746ZG_Ethernet_MQTT_Ultrasound
Fork of F7_Ethernet by
lwip/core/mem.c@2:b4727195c450, 2016-10-24 (annotated)
- Committer:
- EmbeddedSam
- Date:
- Mon Oct 24 12:59:21 2016 +0000
- Revision:
- 2:b4727195c450
- Parent:
- 0:d26c1b55cfca
working with 1 ultrasound sensor approx 10hz update rate on mqtt
Who changed what in which revision?
User | Revision | Line number | New contents of line |
---|---|---|---|
DieterGraef | 0:d26c1b55cfca | 1 | /** |
DieterGraef | 0:d26c1b55cfca | 2 | * @file |
DieterGraef | 0:d26c1b55cfca | 3 | * Dynamic memory manager |
DieterGraef | 0:d26c1b55cfca | 4 | * |
DieterGraef | 0:d26c1b55cfca | 5 | * This is a lightweight replacement for the standard C library malloc(). |
DieterGraef | 0:d26c1b55cfca | 6 | * |
DieterGraef | 0:d26c1b55cfca | 7 | * If you want to use the standard C library malloc() instead, define |
DieterGraef | 0:d26c1b55cfca | 8 | * MEM_LIBC_MALLOC to 1 in your lwipopts.h |
DieterGraef | 0:d26c1b55cfca | 9 | * |
DieterGraef | 0:d26c1b55cfca | 10 | * To let mem_malloc() use pools (prevents fragmentation and is much faster than |
DieterGraef | 0:d26c1b55cfca | 11 | * a heap but might waste some memory), define MEM_USE_POOLS to 1, define |
DieterGraef | 0:d26c1b55cfca | 12 | * MEM_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list |
DieterGraef | 0:d26c1b55cfca | 13 | * of pools like this (more pools can be added between _START and _END): |
DieterGraef | 0:d26c1b55cfca | 14 | * |
DieterGraef | 0:d26c1b55cfca | 15 | * Define three pools with sizes 256, 512, and 1512 bytes |
DieterGraef | 0:d26c1b55cfca | 16 | * LWIP_MALLOC_MEMPOOL_START |
DieterGraef | 0:d26c1b55cfca | 17 | * LWIP_MALLOC_MEMPOOL(20, 256) |
DieterGraef | 0:d26c1b55cfca | 18 | * LWIP_MALLOC_MEMPOOL(10, 512) |
DieterGraef | 0:d26c1b55cfca | 19 | * LWIP_MALLOC_MEMPOOL(5, 1512) |
DieterGraef | 0:d26c1b55cfca | 20 | * LWIP_MALLOC_MEMPOOL_END |
DieterGraef | 0:d26c1b55cfca | 21 | */ |
DieterGraef | 0:d26c1b55cfca | 22 | |
DieterGraef | 0:d26c1b55cfca | 23 | /* |
DieterGraef | 0:d26c1b55cfca | 24 | * Copyright (c) 2001-2004 Swedish Institute of Computer Science. |
DieterGraef | 0:d26c1b55cfca | 25 | * All rights reserved. |
DieterGraef | 0:d26c1b55cfca | 26 | * |
DieterGraef | 0:d26c1b55cfca | 27 | * Redistribution and use in source and binary forms, with or without modification, |
DieterGraef | 0:d26c1b55cfca | 28 | * are permitted provided that the following conditions are met: |
DieterGraef | 0:d26c1b55cfca | 29 | * |
DieterGraef | 0:d26c1b55cfca | 30 | * 1. Redistributions of source code must retain the above copyright notice, |
DieterGraef | 0:d26c1b55cfca | 31 | * this list of conditions and the following disclaimer. |
DieterGraef | 0:d26c1b55cfca | 32 | * 2. Redistributions in binary form must reproduce the above copyright notice, |
DieterGraef | 0:d26c1b55cfca | 33 | * this list of conditions and the following disclaimer in the documentation |
DieterGraef | 0:d26c1b55cfca | 34 | * and/or other materials provided with the distribution. |
DieterGraef | 0:d26c1b55cfca | 35 | * 3. The name of the author may not be used to endorse or promote products |
DieterGraef | 0:d26c1b55cfca | 36 | * derived from this software without specific prior written permission. |
DieterGraef | 0:d26c1b55cfca | 37 | * |
DieterGraef | 0:d26c1b55cfca | 38 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
DieterGraef | 0:d26c1b55cfca | 39 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF |
DieterGraef | 0:d26c1b55cfca | 40 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT |
DieterGraef | 0:d26c1b55cfca | 41 | * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
DieterGraef | 0:d26c1b55cfca | 42 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT |
DieterGraef | 0:d26c1b55cfca | 43 | * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
DieterGraef | 0:d26c1b55cfca | 44 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
DieterGraef | 0:d26c1b55cfca | 45 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING |
DieterGraef | 0:d26c1b55cfca | 46 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY |
DieterGraef | 0:d26c1b55cfca | 47 | * OF SUCH DAMAGE. |
DieterGraef | 0:d26c1b55cfca | 48 | * |
DieterGraef | 0:d26c1b55cfca | 49 | * This file is part of the lwIP TCP/IP stack. |
DieterGraef | 0:d26c1b55cfca | 50 | * |
DieterGraef | 0:d26c1b55cfca | 51 | * Author: Adam Dunkels <adam@sics.se> |
DieterGraef | 0:d26c1b55cfca | 52 | * Simon Goldschmidt |
DieterGraef | 0:d26c1b55cfca | 53 | * |
DieterGraef | 0:d26c1b55cfca | 54 | */ |
DieterGraef | 0:d26c1b55cfca | 55 | |
DieterGraef | 0:d26c1b55cfca | 56 | #include "lwip/opt.h" |
DieterGraef | 0:d26c1b55cfca | 57 | |
DieterGraef | 0:d26c1b55cfca | 58 | #if !MEM_LIBC_MALLOC /* don't build if not configured for use in lwipopts.h */ |
DieterGraef | 0:d26c1b55cfca | 59 | |
DieterGraef | 0:d26c1b55cfca | 60 | #include "lwip/def.h" |
DieterGraef | 0:d26c1b55cfca | 61 | #include "lwip/mem.h" |
DieterGraef | 0:d26c1b55cfca | 62 | #include "lwip/sys.h" |
DieterGraef | 0:d26c1b55cfca | 63 | #include "lwip/stats.h" |
DieterGraef | 0:d26c1b55cfca | 64 | #include "lwip/err.h" |
DieterGraef | 0:d26c1b55cfca | 65 | |
DieterGraef | 0:d26c1b55cfca | 66 | #include <string.h> |
DieterGraef | 0:d26c1b55cfca | 67 | |
DieterGraef | 0:d26c1b55cfca | 68 | #if MEM_USE_POOLS |
DieterGraef | 0:d26c1b55cfca | 69 | /* lwIP head implemented with different sized pools */ |
DieterGraef | 0:d26c1b55cfca | 70 | |
DieterGraef | 0:d26c1b55cfca | 71 | /** |
DieterGraef | 0:d26c1b55cfca | 72 | * Allocate memory: determine the smallest pool that is big enough |
DieterGraef | 0:d26c1b55cfca | 73 | * to contain an element of 'size' and get an element from that pool. |
DieterGraef | 0:d26c1b55cfca | 74 | * |
DieterGraef | 0:d26c1b55cfca | 75 | * @param size the size in bytes of the memory needed |
DieterGraef | 0:d26c1b55cfca | 76 | * @return a pointer to the allocated memory or NULL if the pool is empty |
DieterGraef | 0:d26c1b55cfca | 77 | */ |
DieterGraef | 0:d26c1b55cfca | 78 | void * |
DieterGraef | 0:d26c1b55cfca | 79 | mem_malloc(mem_size_t size) |
DieterGraef | 0:d26c1b55cfca | 80 | { |
DieterGraef | 0:d26c1b55cfca | 81 | void *ret; |
DieterGraef | 0:d26c1b55cfca | 82 | struct memp_malloc_helper *element; |
DieterGraef | 0:d26c1b55cfca | 83 | memp_t poolnr; |
DieterGraef | 0:d26c1b55cfca | 84 | mem_size_t required_size = size + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)); |
DieterGraef | 0:d26c1b55cfca | 85 | |
DieterGraef | 0:d26c1b55cfca | 86 | for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr = (memp_t)(poolnr + 1)) { |
DieterGraef | 0:d26c1b55cfca | 87 | #if MEM_USE_POOLS_TRY_BIGGER_POOL |
DieterGraef | 0:d26c1b55cfca | 88 | again: |
DieterGraef | 0:d26c1b55cfca | 89 | #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */ |
DieterGraef | 0:d26c1b55cfca | 90 | /* is this pool big enough to hold an element of the required size |
DieterGraef | 0:d26c1b55cfca | 91 | plus a struct memp_malloc_helper that saves the pool this element came from? */ |
DieterGraef | 0:d26c1b55cfca | 92 | if (required_size <= memp_sizes[poolnr]) { |
DieterGraef | 0:d26c1b55cfca | 93 | break; |
DieterGraef | 0:d26c1b55cfca | 94 | } |
DieterGraef | 0:d26c1b55cfca | 95 | } |
DieterGraef | 0:d26c1b55cfca | 96 | if (poolnr > MEMP_POOL_LAST) { |
DieterGraef | 0:d26c1b55cfca | 97 | LWIP_ASSERT("mem_malloc(): no pool is that big!", 0); |
DieterGraef | 0:d26c1b55cfca | 98 | return NULL; |
DieterGraef | 0:d26c1b55cfca | 99 | } |
DieterGraef | 0:d26c1b55cfca | 100 | element = (struct memp_malloc_helper*)memp_malloc(poolnr); |
DieterGraef | 0:d26c1b55cfca | 101 | if (element == NULL) { |
DieterGraef | 0:d26c1b55cfca | 102 | /* No need to DEBUGF or ASSERT: This error is already |
DieterGraef | 0:d26c1b55cfca | 103 | taken care of in memp.c */ |
DieterGraef | 0:d26c1b55cfca | 104 | #if MEM_USE_POOLS_TRY_BIGGER_POOL |
DieterGraef | 0:d26c1b55cfca | 105 | /** Try a bigger pool if this one is empty! */ |
DieterGraef | 0:d26c1b55cfca | 106 | if (poolnr < MEMP_POOL_LAST) { |
DieterGraef | 0:d26c1b55cfca | 107 | poolnr++; |
DieterGraef | 0:d26c1b55cfca | 108 | goto again; |
DieterGraef | 0:d26c1b55cfca | 109 | } |
DieterGraef | 0:d26c1b55cfca | 110 | #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */ |
DieterGraef | 0:d26c1b55cfca | 111 | return NULL; |
DieterGraef | 0:d26c1b55cfca | 112 | } |
DieterGraef | 0:d26c1b55cfca | 113 | |
DieterGraef | 0:d26c1b55cfca | 114 | /* save the pool number this element came from */ |
DieterGraef | 0:d26c1b55cfca | 115 | element->poolnr = poolnr; |
DieterGraef | 0:d26c1b55cfca | 116 | /* and return a pointer to the memory directly after the struct memp_malloc_helper */ |
DieterGraef | 0:d26c1b55cfca | 117 | ret = (u8_t*)element + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)); |
DieterGraef | 0:d26c1b55cfca | 118 | |
DieterGraef | 0:d26c1b55cfca | 119 | return ret; |
DieterGraef | 0:d26c1b55cfca | 120 | } |
DieterGraef | 0:d26c1b55cfca | 121 | |
DieterGraef | 0:d26c1b55cfca | 122 | /** |
DieterGraef | 0:d26c1b55cfca | 123 | * Free memory previously allocated by mem_malloc. Loads the pool number |
DieterGraef | 0:d26c1b55cfca | 124 | * and calls memp_free with that pool number to put the element back into |
DieterGraef | 0:d26c1b55cfca | 125 | * its pool |
DieterGraef | 0:d26c1b55cfca | 126 | * |
DieterGraef | 0:d26c1b55cfca | 127 | * @param rmem the memory element to free |
DieterGraef | 0:d26c1b55cfca | 128 | */ |
DieterGraef | 0:d26c1b55cfca | 129 | void |
DieterGraef | 0:d26c1b55cfca | 130 | mem_free(void *rmem) |
DieterGraef | 0:d26c1b55cfca | 131 | { |
DieterGraef | 0:d26c1b55cfca | 132 | struct memp_malloc_helper *hmem; |
DieterGraef | 0:d26c1b55cfca | 133 | |
DieterGraef | 0:d26c1b55cfca | 134 | LWIP_ASSERT("rmem != NULL", (rmem != NULL)); |
DieterGraef | 0:d26c1b55cfca | 135 | LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem))); |
DieterGraef | 0:d26c1b55cfca | 136 | |
DieterGraef | 0:d26c1b55cfca | 137 | /* get the original struct memp_malloc_helper */ |
DieterGraef | 0:d26c1b55cfca | 138 | hmem = (struct memp_malloc_helper*)(void*)((u8_t*)rmem - LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper))); |
DieterGraef | 0:d26c1b55cfca | 139 | |
DieterGraef | 0:d26c1b55cfca | 140 | LWIP_ASSERT("hmem != NULL", (hmem != NULL)); |
DieterGraef | 0:d26c1b55cfca | 141 | LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem))); |
DieterGraef | 0:d26c1b55cfca | 142 | LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX)); |
DieterGraef | 0:d26c1b55cfca | 143 | |
DieterGraef | 0:d26c1b55cfca | 144 | /* and put it in the pool we saved earlier */ |
DieterGraef | 0:d26c1b55cfca | 145 | memp_free(hmem->poolnr, hmem); |
DieterGraef | 0:d26c1b55cfca | 146 | } |
DieterGraef | 0:d26c1b55cfca | 147 | |
DieterGraef | 0:d26c1b55cfca | 148 | #else /* MEM_USE_POOLS */ |
DieterGraef | 0:d26c1b55cfca | 149 | /* lwIP replacement for your libc malloc() */ |
DieterGraef | 0:d26c1b55cfca | 150 | |
DieterGraef | 0:d26c1b55cfca | 151 | /** |
DieterGraef | 0:d26c1b55cfca | 152 | * The heap is made up as a list of structs of this type. |
DieterGraef | 0:d26c1b55cfca | 153 | * This does not have to be aligned since for getting its size, |
DieterGraef | 0:d26c1b55cfca | 154 | * we only use the macro SIZEOF_STRUCT_MEM, which automatically alignes. |
DieterGraef | 0:d26c1b55cfca | 155 | */ |
DieterGraef | 0:d26c1b55cfca | 156 | struct mem { |
DieterGraef | 0:d26c1b55cfca | 157 | /** index (-> ram[next]) of the next struct */ |
DieterGraef | 0:d26c1b55cfca | 158 | mem_size_t next; |
DieterGraef | 0:d26c1b55cfca | 159 | /** index (-> ram[prev]) of the previous struct */ |
DieterGraef | 0:d26c1b55cfca | 160 | mem_size_t prev; |
DieterGraef | 0:d26c1b55cfca | 161 | /** 1: this area is used; 0: this area is unused */ |
DieterGraef | 0:d26c1b55cfca | 162 | u8_t used; |
DieterGraef | 0:d26c1b55cfca | 163 | }; |
DieterGraef | 0:d26c1b55cfca | 164 | |
DieterGraef | 0:d26c1b55cfca | 165 | /** All allocated blocks will be MIN_SIZE bytes big, at least! |
DieterGraef | 0:d26c1b55cfca | 166 | * MIN_SIZE can be overridden to suit your needs. Smaller values save space, |
DieterGraef | 0:d26c1b55cfca | 167 | * larger values could prevent too small blocks to fragment the RAM too much. */ |
DieterGraef | 0:d26c1b55cfca | 168 | #ifndef MIN_SIZE |
DieterGraef | 0:d26c1b55cfca | 169 | #define MIN_SIZE 12 |
DieterGraef | 0:d26c1b55cfca | 170 | #endif /* MIN_SIZE */ |
DieterGraef | 0:d26c1b55cfca | 171 | /* some alignment macros: we define them here for better source code layout */ |
DieterGraef | 0:d26c1b55cfca | 172 | #define MIN_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MIN_SIZE) |
DieterGraef | 0:d26c1b55cfca | 173 | #define SIZEOF_STRUCT_MEM LWIP_MEM_ALIGN_SIZE(sizeof(struct mem)) |
DieterGraef | 0:d26c1b55cfca | 174 | #define MEM_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SIZE) |
DieterGraef | 0:d26c1b55cfca | 175 | |
DieterGraef | 0:d26c1b55cfca | 176 | /** If you want to relocate the heap to external memory, simply define |
DieterGraef | 0:d26c1b55cfca | 177 | * LWIP_RAM_HEAP_POINTER as a void-pointer to that location. |
DieterGraef | 0:d26c1b55cfca | 178 | * If so, make sure the memory at that location is big enough (see below on |
DieterGraef | 0:d26c1b55cfca | 179 | * how that space is calculated). */ |
DieterGraef | 0:d26c1b55cfca | 180 | #ifndef LWIP_RAM_HEAP_POINTER |
DieterGraef | 0:d26c1b55cfca | 181 | /** the heap. we need one struct mem at the end and some room for alignment */ |
DieterGraef | 0:d26c1b55cfca | 182 | u8_t ram_heap[MEM_SIZE_ALIGNED + (2*SIZEOF_STRUCT_MEM) + MEM_ALIGNMENT]; |
DieterGraef | 0:d26c1b55cfca | 183 | #define LWIP_RAM_HEAP_POINTER ram_heap |
DieterGraef | 0:d26c1b55cfca | 184 | #endif /* LWIP_RAM_HEAP_POINTER */ |
DieterGraef | 0:d26c1b55cfca | 185 | |
DieterGraef | 0:d26c1b55cfca | 186 | /** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */ |
DieterGraef | 0:d26c1b55cfca | 187 | static u8_t *ram; |
DieterGraef | 0:d26c1b55cfca | 188 | /** the last entry, always unused! */ |
DieterGraef | 0:d26c1b55cfca | 189 | static struct mem *ram_end; |
DieterGraef | 0:d26c1b55cfca | 190 | /** pointer to the lowest free block, this is used for faster search */ |
DieterGraef | 0:d26c1b55cfca | 191 | static struct mem *lfree; |
DieterGraef | 0:d26c1b55cfca | 192 | |
DieterGraef | 0:d26c1b55cfca | 193 | /** concurrent access protection */ |
DieterGraef | 0:d26c1b55cfca | 194 | #if !NO_SYS |
DieterGraef | 0:d26c1b55cfca | 195 | static sys_mutex_t mem_mutex; |
DieterGraef | 0:d26c1b55cfca | 196 | #endif |
DieterGraef | 0:d26c1b55cfca | 197 | |
DieterGraef | 0:d26c1b55cfca | 198 | #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT |
DieterGraef | 0:d26c1b55cfca | 199 | |
DieterGraef | 0:d26c1b55cfca | 200 | static volatile u8_t mem_free_count; |
DieterGraef | 0:d26c1b55cfca | 201 | |
DieterGraef | 0:d26c1b55cfca | 202 | /* Allow mem_free from other (e.g. interrupt) context */ |
DieterGraef | 0:d26c1b55cfca | 203 | #define LWIP_MEM_FREE_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_free) |
DieterGraef | 0:d26c1b55cfca | 204 | #define LWIP_MEM_FREE_PROTECT() SYS_ARCH_PROTECT(lev_free) |
DieterGraef | 0:d26c1b55cfca | 205 | #define LWIP_MEM_FREE_UNPROTECT() SYS_ARCH_UNPROTECT(lev_free) |
DieterGraef | 0:d26c1b55cfca | 206 | #define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc) |
DieterGraef | 0:d26c1b55cfca | 207 | #define LWIP_MEM_ALLOC_PROTECT() SYS_ARCH_PROTECT(lev_alloc) |
DieterGraef | 0:d26c1b55cfca | 208 | #define LWIP_MEM_ALLOC_UNPROTECT() SYS_ARCH_UNPROTECT(lev_alloc) |
DieterGraef | 0:d26c1b55cfca | 209 | |
DieterGraef | 0:d26c1b55cfca | 210 | #else /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ |
DieterGraef | 0:d26c1b55cfca | 211 | |
DieterGraef | 0:d26c1b55cfca | 212 | /* Protect the heap only by using a semaphore */ |
DieterGraef | 0:d26c1b55cfca | 213 | #define LWIP_MEM_FREE_DECL_PROTECT() |
DieterGraef | 0:d26c1b55cfca | 214 | #define LWIP_MEM_FREE_PROTECT() sys_mutex_lock(&mem_mutex) |
DieterGraef | 0:d26c1b55cfca | 215 | #define LWIP_MEM_FREE_UNPROTECT() sys_mutex_unlock(&mem_mutex) |
DieterGraef | 0:d26c1b55cfca | 216 | /* mem_malloc is protected using semaphore AND LWIP_MEM_ALLOC_PROTECT */ |
DieterGraef | 0:d26c1b55cfca | 217 | #define LWIP_MEM_ALLOC_DECL_PROTECT() |
DieterGraef | 0:d26c1b55cfca | 218 | #define LWIP_MEM_ALLOC_PROTECT() |
DieterGraef | 0:d26c1b55cfca | 219 | #define LWIP_MEM_ALLOC_UNPROTECT() |
DieterGraef | 0:d26c1b55cfca | 220 | |
DieterGraef | 0:d26c1b55cfca | 221 | #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ |
DieterGraef | 0:d26c1b55cfca | 222 | |
DieterGraef | 0:d26c1b55cfca | 223 | |
DieterGraef | 0:d26c1b55cfca | 224 | /** |
DieterGraef | 0:d26c1b55cfca | 225 | * "Plug holes" by combining adjacent empty struct mems. |
DieterGraef | 0:d26c1b55cfca | 226 | * After this function is through, there should not exist |
DieterGraef | 0:d26c1b55cfca | 227 | * one empty struct mem pointing to another empty struct mem. |
DieterGraef | 0:d26c1b55cfca | 228 | * |
DieterGraef | 0:d26c1b55cfca | 229 | * @param mem this points to a struct mem which just has been freed |
DieterGraef | 0:d26c1b55cfca | 230 | * @internal this function is only called by mem_free() and mem_trim() |
DieterGraef | 0:d26c1b55cfca | 231 | * |
DieterGraef | 0:d26c1b55cfca | 232 | * This assumes access to the heap is protected by the calling function |
DieterGraef | 0:d26c1b55cfca | 233 | * already. |
DieterGraef | 0:d26c1b55cfca | 234 | */ |
DieterGraef | 0:d26c1b55cfca | 235 | static void |
DieterGraef | 0:d26c1b55cfca | 236 | plug_holes(struct mem *mem) |
DieterGraef | 0:d26c1b55cfca | 237 | { |
DieterGraef | 0:d26c1b55cfca | 238 | struct mem *nmem; |
DieterGraef | 0:d26c1b55cfca | 239 | struct mem *pmem; |
DieterGraef | 0:d26c1b55cfca | 240 | |
DieterGraef | 0:d26c1b55cfca | 241 | LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram); |
DieterGraef | 0:d26c1b55cfca | 242 | LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end); |
DieterGraef | 0:d26c1b55cfca | 243 | LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0); |
DieterGraef | 0:d26c1b55cfca | 244 | |
DieterGraef | 0:d26c1b55cfca | 245 | /* plug hole forward */ |
DieterGraef | 0:d26c1b55cfca | 246 | LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED); |
DieterGraef | 0:d26c1b55cfca | 247 | |
DieterGraef | 0:d26c1b55cfca | 248 | nmem = (struct mem *)(void *)&ram[mem->next]; |
DieterGraef | 0:d26c1b55cfca | 249 | if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) { |
DieterGraef | 0:d26c1b55cfca | 250 | /* if mem->next is unused and not end of ram, combine mem and mem->next */ |
DieterGraef | 0:d26c1b55cfca | 251 | if (lfree == nmem) { |
DieterGraef | 0:d26c1b55cfca | 252 | lfree = mem; |
DieterGraef | 0:d26c1b55cfca | 253 | } |
DieterGraef | 0:d26c1b55cfca | 254 | mem->next = nmem->next; |
DieterGraef | 0:d26c1b55cfca | 255 | ((struct mem *)(void *)&ram[nmem->next])->prev = (mem_size_t)((u8_t *)mem - ram); |
DieterGraef | 0:d26c1b55cfca | 256 | } |
DieterGraef | 0:d26c1b55cfca | 257 | |
DieterGraef | 0:d26c1b55cfca | 258 | /* plug hole backward */ |
DieterGraef | 0:d26c1b55cfca | 259 | pmem = (struct mem *)(void *)&ram[mem->prev]; |
DieterGraef | 0:d26c1b55cfca | 260 | if (pmem != mem && pmem->used == 0) { |
DieterGraef | 0:d26c1b55cfca | 261 | /* if mem->prev is unused, combine mem and mem->prev */ |
DieterGraef | 0:d26c1b55cfca | 262 | if (lfree == mem) { |
DieterGraef | 0:d26c1b55cfca | 263 | lfree = pmem; |
DieterGraef | 0:d26c1b55cfca | 264 | } |
DieterGraef | 0:d26c1b55cfca | 265 | pmem->next = mem->next; |
DieterGraef | 0:d26c1b55cfca | 266 | ((struct mem *)(void *)&ram[mem->next])->prev = (mem_size_t)((u8_t *)pmem - ram); |
DieterGraef | 0:d26c1b55cfca | 267 | } |
DieterGraef | 0:d26c1b55cfca | 268 | } |
DieterGraef | 0:d26c1b55cfca | 269 | |
DieterGraef | 0:d26c1b55cfca | 270 | /** |
DieterGraef | 0:d26c1b55cfca | 271 | * Zero the heap and initialize start, end and lowest-free |
DieterGraef | 0:d26c1b55cfca | 272 | */ |
DieterGraef | 0:d26c1b55cfca | 273 | void |
DieterGraef | 0:d26c1b55cfca | 274 | mem_init(void) |
DieterGraef | 0:d26c1b55cfca | 275 | { |
DieterGraef | 0:d26c1b55cfca | 276 | struct mem *mem; |
DieterGraef | 0:d26c1b55cfca | 277 | |
DieterGraef | 0:d26c1b55cfca | 278 | LWIP_ASSERT("Sanity check alignment", |
DieterGraef | 0:d26c1b55cfca | 279 | (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT-1)) == 0); |
DieterGraef | 0:d26c1b55cfca | 280 | |
DieterGraef | 0:d26c1b55cfca | 281 | /* align the heap */ |
DieterGraef | 0:d26c1b55cfca | 282 | ram = (u8_t *)LWIP_MEM_ALIGN(LWIP_RAM_HEAP_POINTER); |
DieterGraef | 0:d26c1b55cfca | 283 | /* initialize the start of the heap */ |
DieterGraef | 0:d26c1b55cfca | 284 | mem = (struct mem *)(void *)ram; |
DieterGraef | 0:d26c1b55cfca | 285 | mem->next = MEM_SIZE_ALIGNED; |
DieterGraef | 0:d26c1b55cfca | 286 | mem->prev = 0; |
DieterGraef | 0:d26c1b55cfca | 287 | mem->used = 0; |
DieterGraef | 0:d26c1b55cfca | 288 | /* initialize the end of the heap */ |
DieterGraef | 0:d26c1b55cfca | 289 | ram_end = (struct mem *)(void *)&ram[MEM_SIZE_ALIGNED]; |
DieterGraef | 0:d26c1b55cfca | 290 | ram_end->used = 1; |
DieterGraef | 0:d26c1b55cfca | 291 | ram_end->next = MEM_SIZE_ALIGNED; |
DieterGraef | 0:d26c1b55cfca | 292 | ram_end->prev = MEM_SIZE_ALIGNED; |
DieterGraef | 0:d26c1b55cfca | 293 | |
DieterGraef | 0:d26c1b55cfca | 294 | /* initialize the lowest-free pointer to the start of the heap */ |
DieterGraef | 0:d26c1b55cfca | 295 | lfree = (struct mem *)(void *)ram; |
DieterGraef | 0:d26c1b55cfca | 296 | |
DieterGraef | 0:d26c1b55cfca | 297 | MEM_STATS_AVAIL(avail, MEM_SIZE_ALIGNED); |
DieterGraef | 0:d26c1b55cfca | 298 | |
DieterGraef | 0:d26c1b55cfca | 299 | if(sys_mutex_new(&mem_mutex) != ERR_OK) { |
DieterGraef | 0:d26c1b55cfca | 300 | LWIP_ASSERT("failed to create mem_mutex", 0); |
DieterGraef | 0:d26c1b55cfca | 301 | } |
DieterGraef | 0:d26c1b55cfca | 302 | } |
DieterGraef | 0:d26c1b55cfca | 303 | |
DieterGraef | 0:d26c1b55cfca | 304 | /** |
DieterGraef | 0:d26c1b55cfca | 305 | * Put a struct mem back on the heap |
DieterGraef | 0:d26c1b55cfca | 306 | * |
DieterGraef | 0:d26c1b55cfca | 307 | * @param rmem is the data portion of a struct mem as returned by a previous |
DieterGraef | 0:d26c1b55cfca | 308 | * call to mem_malloc() |
DieterGraef | 0:d26c1b55cfca | 309 | */ |
DieterGraef | 0:d26c1b55cfca | 310 | void |
DieterGraef | 0:d26c1b55cfca | 311 | mem_free(void *rmem) |
DieterGraef | 0:d26c1b55cfca | 312 | { |
DieterGraef | 0:d26c1b55cfca | 313 | struct mem *mem; |
DieterGraef | 0:d26c1b55cfca | 314 | LWIP_MEM_FREE_DECL_PROTECT(); |
DieterGraef | 0:d26c1b55cfca | 315 | |
DieterGraef | 0:d26c1b55cfca | 316 | if (rmem == NULL) { |
DieterGraef | 0:d26c1b55cfca | 317 | LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("mem_free(p == NULL) was called.\n")); |
DieterGraef | 0:d26c1b55cfca | 318 | return; |
DieterGraef | 0:d26c1b55cfca | 319 | } |
DieterGraef | 0:d26c1b55cfca | 320 | LWIP_ASSERT("mem_free: sanity check alignment", (((mem_ptr_t)rmem) & (MEM_ALIGNMENT-1)) == 0); |
DieterGraef | 0:d26c1b55cfca | 321 | |
DieterGraef | 0:d26c1b55cfca | 322 | LWIP_ASSERT("mem_free: legal memory", (u8_t *)rmem >= (u8_t *)ram && |
DieterGraef | 0:d26c1b55cfca | 323 | (u8_t *)rmem < (u8_t *)ram_end); |
DieterGraef | 0:d26c1b55cfca | 324 | |
DieterGraef | 0:d26c1b55cfca | 325 | if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) { |
DieterGraef | 0:d26c1b55cfca | 326 | SYS_ARCH_DECL_PROTECT(lev); |
DieterGraef | 0:d26c1b55cfca | 327 | LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory\n")); |
DieterGraef | 0:d26c1b55cfca | 328 | /* protect mem stats from concurrent access */ |
DieterGraef | 0:d26c1b55cfca | 329 | SYS_ARCH_PROTECT(lev); |
DieterGraef | 0:d26c1b55cfca | 330 | MEM_STATS_INC(illegal); |
DieterGraef | 0:d26c1b55cfca | 331 | SYS_ARCH_UNPROTECT(lev); |
DieterGraef | 0:d26c1b55cfca | 332 | return; |
DieterGraef | 0:d26c1b55cfca | 333 | } |
DieterGraef | 0:d26c1b55cfca | 334 | /* protect the heap from concurrent access */ |
DieterGraef | 0:d26c1b55cfca | 335 | LWIP_MEM_FREE_PROTECT(); |
DieterGraef | 0:d26c1b55cfca | 336 | /* Get the corresponding struct mem ... */ |
DieterGraef | 0:d26c1b55cfca | 337 | mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM); |
DieterGraef | 0:d26c1b55cfca | 338 | /* ... which has to be in a used state ... */ |
DieterGraef | 0:d26c1b55cfca | 339 | LWIP_ASSERT("mem_free: mem->used", mem->used); |
DieterGraef | 0:d26c1b55cfca | 340 | /* ... and is now unused. */ |
DieterGraef | 0:d26c1b55cfca | 341 | mem->used = 0; |
DieterGraef | 0:d26c1b55cfca | 342 | |
DieterGraef | 0:d26c1b55cfca | 343 | if (mem < lfree) { |
DieterGraef | 0:d26c1b55cfca | 344 | /* the newly freed struct is now the lowest */ |
DieterGraef | 0:d26c1b55cfca | 345 | lfree = mem; |
DieterGraef | 0:d26c1b55cfca | 346 | } |
DieterGraef | 0:d26c1b55cfca | 347 | |
DieterGraef | 0:d26c1b55cfca | 348 | MEM_STATS_DEC_USED(used, mem->next - (mem_size_t)(((u8_t *)mem - ram))); |
DieterGraef | 0:d26c1b55cfca | 349 | |
DieterGraef | 0:d26c1b55cfca | 350 | /* finally, see if prev or next are free also */ |
DieterGraef | 0:d26c1b55cfca | 351 | plug_holes(mem); |
DieterGraef | 0:d26c1b55cfca | 352 | #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT |
DieterGraef | 0:d26c1b55cfca | 353 | mem_free_count = 1; |
DieterGraef | 0:d26c1b55cfca | 354 | #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ |
DieterGraef | 0:d26c1b55cfca | 355 | LWIP_MEM_FREE_UNPROTECT(); |
DieterGraef | 0:d26c1b55cfca | 356 | } |
DieterGraef | 0:d26c1b55cfca | 357 | |
DieterGraef | 0:d26c1b55cfca | 358 | /** |
DieterGraef | 0:d26c1b55cfca | 359 | * Shrink memory returned by mem_malloc(). |
DieterGraef | 0:d26c1b55cfca | 360 | * |
DieterGraef | 0:d26c1b55cfca | 361 | * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked |
DieterGraef | 0:d26c1b55cfca | 362 | * @param newsize required size after shrinking (needs to be smaller than or |
DieterGraef | 0:d26c1b55cfca | 363 | * equal to the previous size) |
DieterGraef | 0:d26c1b55cfca | 364 | * @return for compatibility reasons: is always == rmem, at the moment |
DieterGraef | 0:d26c1b55cfca | 365 | * or NULL if newsize is > old size, in which case rmem is NOT touched |
DieterGraef | 0:d26c1b55cfca | 366 | * or freed! |
DieterGraef | 0:d26c1b55cfca | 367 | */ |
DieterGraef | 0:d26c1b55cfca | 368 | void * |
DieterGraef | 0:d26c1b55cfca | 369 | mem_trim(void *rmem, mem_size_t newsize) |
DieterGraef | 0:d26c1b55cfca | 370 | { |
DieterGraef | 0:d26c1b55cfca | 371 | mem_size_t size; |
DieterGraef | 0:d26c1b55cfca | 372 | mem_size_t ptr, ptr2; |
DieterGraef | 0:d26c1b55cfca | 373 | struct mem *mem, *mem2; |
DieterGraef | 0:d26c1b55cfca | 374 | /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */ |
DieterGraef | 0:d26c1b55cfca | 375 | LWIP_MEM_FREE_DECL_PROTECT(); |
DieterGraef | 0:d26c1b55cfca | 376 | |
DieterGraef | 0:d26c1b55cfca | 377 | /* Expand the size of the allocated memory region so that we can |
DieterGraef | 0:d26c1b55cfca | 378 | adjust for alignment. */ |
DieterGraef | 0:d26c1b55cfca | 379 | newsize = LWIP_MEM_ALIGN_SIZE(newsize); |
DieterGraef | 0:d26c1b55cfca | 380 | |
DieterGraef | 0:d26c1b55cfca | 381 | if(newsize < MIN_SIZE_ALIGNED) { |
DieterGraef | 0:d26c1b55cfca | 382 | /* every data block must be at least MIN_SIZE_ALIGNED long */ |
DieterGraef | 0:d26c1b55cfca | 383 | newsize = MIN_SIZE_ALIGNED; |
DieterGraef | 0:d26c1b55cfca | 384 | } |
DieterGraef | 0:d26c1b55cfca | 385 | |
DieterGraef | 0:d26c1b55cfca | 386 | if (newsize > MEM_SIZE_ALIGNED) { |
DieterGraef | 0:d26c1b55cfca | 387 | return NULL; |
DieterGraef | 0:d26c1b55cfca | 388 | } |
DieterGraef | 0:d26c1b55cfca | 389 | |
DieterGraef | 0:d26c1b55cfca | 390 | LWIP_ASSERT("mem_trim: legal memory", (u8_t *)rmem >= (u8_t *)ram && |
DieterGraef | 0:d26c1b55cfca | 391 | (u8_t *)rmem < (u8_t *)ram_end); |
DieterGraef | 0:d26c1b55cfca | 392 | |
DieterGraef | 0:d26c1b55cfca | 393 | if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) { |
DieterGraef | 0:d26c1b55cfca | 394 | SYS_ARCH_DECL_PROTECT(lev); |
DieterGraef | 0:d26c1b55cfca | 395 | LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_trim: illegal memory\n")); |
DieterGraef | 0:d26c1b55cfca | 396 | /* protect mem stats from concurrent access */ |
DieterGraef | 0:d26c1b55cfca | 397 | SYS_ARCH_PROTECT(lev); |
DieterGraef | 0:d26c1b55cfca | 398 | MEM_STATS_INC(illegal); |
DieterGraef | 0:d26c1b55cfca | 399 | SYS_ARCH_UNPROTECT(lev); |
DieterGraef | 0:d26c1b55cfca | 400 | return rmem; |
DieterGraef | 0:d26c1b55cfca | 401 | } |
DieterGraef | 0:d26c1b55cfca | 402 | /* Get the corresponding struct mem ... */ |
DieterGraef | 0:d26c1b55cfca | 403 | mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM); |
DieterGraef | 0:d26c1b55cfca | 404 | /* ... and its offset pointer */ |
DieterGraef | 0:d26c1b55cfca | 405 | ptr = (mem_size_t)((u8_t *)mem - ram); |
DieterGraef | 0:d26c1b55cfca | 406 | |
DieterGraef | 0:d26c1b55cfca | 407 | size = mem->next - ptr - SIZEOF_STRUCT_MEM; |
DieterGraef | 0:d26c1b55cfca | 408 | LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size); |
DieterGraef | 0:d26c1b55cfca | 409 | if (newsize > size) { |
DieterGraef | 0:d26c1b55cfca | 410 | /* not supported */ |
DieterGraef | 0:d26c1b55cfca | 411 | return NULL; |
DieterGraef | 0:d26c1b55cfca | 412 | } |
DieterGraef | 0:d26c1b55cfca | 413 | if (newsize == size) { |
DieterGraef | 0:d26c1b55cfca | 414 | /* No change in size, simply return */ |
DieterGraef | 0:d26c1b55cfca | 415 | return rmem; |
DieterGraef | 0:d26c1b55cfca | 416 | } |
DieterGraef | 0:d26c1b55cfca | 417 | |
DieterGraef | 0:d26c1b55cfca | 418 | /* protect the heap from concurrent access */ |
DieterGraef | 0:d26c1b55cfca | 419 | LWIP_MEM_FREE_PROTECT(); |
DieterGraef | 0:d26c1b55cfca | 420 | |
DieterGraef | 0:d26c1b55cfca | 421 | mem2 = (struct mem *)(void *)&ram[mem->next]; |
DieterGraef | 0:d26c1b55cfca | 422 | if(mem2->used == 0) { |
DieterGraef | 0:d26c1b55cfca | 423 | /* The next struct is unused, we can simply move it at little */ |
DieterGraef | 0:d26c1b55cfca | 424 | mem_size_t next; |
DieterGraef | 0:d26c1b55cfca | 425 | /* remember the old next pointer */ |
DieterGraef | 0:d26c1b55cfca | 426 | next = mem2->next; |
DieterGraef | 0:d26c1b55cfca | 427 | /* create new struct mem which is moved directly after the shrinked mem */ |
DieterGraef | 0:d26c1b55cfca | 428 | ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize; |
DieterGraef | 0:d26c1b55cfca | 429 | if (lfree == mem2) { |
DieterGraef | 0:d26c1b55cfca | 430 | lfree = (struct mem *)(void *)&ram[ptr2]; |
DieterGraef | 0:d26c1b55cfca | 431 | } |
DieterGraef | 0:d26c1b55cfca | 432 | mem2 = (struct mem *)(void *)&ram[ptr2]; |
DieterGraef | 0:d26c1b55cfca | 433 | mem2->used = 0; |
DieterGraef | 0:d26c1b55cfca | 434 | /* restore the next pointer */ |
DieterGraef | 0:d26c1b55cfca | 435 | mem2->next = next; |
DieterGraef | 0:d26c1b55cfca | 436 | /* link it back to mem */ |
DieterGraef | 0:d26c1b55cfca | 437 | mem2->prev = ptr; |
DieterGraef | 0:d26c1b55cfca | 438 | /* link mem to it */ |
DieterGraef | 0:d26c1b55cfca | 439 | mem->next = ptr2; |
DieterGraef | 0:d26c1b55cfca | 440 | /* last thing to restore linked list: as we have moved mem2, |
DieterGraef | 0:d26c1b55cfca | 441 | * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not |
DieterGraef | 0:d26c1b55cfca | 442 | * the end of the heap */ |
DieterGraef | 0:d26c1b55cfca | 443 | if (mem2->next != MEM_SIZE_ALIGNED) { |
DieterGraef | 0:d26c1b55cfca | 444 | ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2; |
DieterGraef | 0:d26c1b55cfca | 445 | } |
DieterGraef | 0:d26c1b55cfca | 446 | MEM_STATS_DEC_USED(used, (size - newsize)); |
DieterGraef | 0:d26c1b55cfca | 447 | /* no need to plug holes, we've already done that */ |
DieterGraef | 0:d26c1b55cfca | 448 | } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) { |
DieterGraef | 0:d26c1b55cfca | 449 | /* Next struct is used but there's room for another struct mem with |
DieterGraef | 0:d26c1b55cfca | 450 | * at least MIN_SIZE_ALIGNED of data. |
DieterGraef | 0:d26c1b55cfca | 451 | * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem |
DieterGraef | 0:d26c1b55cfca | 452 | * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED'). |
DieterGraef | 0:d26c1b55cfca | 453 | * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty |
DieterGraef | 0:d26c1b55cfca | 454 | * region that couldn't hold data, but when mem->next gets freed, |
DieterGraef | 0:d26c1b55cfca | 455 | * the 2 regions would be combined, resulting in more free memory */ |
DieterGraef | 0:d26c1b55cfca | 456 | ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize; |
DieterGraef | 0:d26c1b55cfca | 457 | mem2 = (struct mem *)(void *)&ram[ptr2]; |
DieterGraef | 0:d26c1b55cfca | 458 | if (mem2 < lfree) { |
DieterGraef | 0:d26c1b55cfca | 459 | lfree = mem2; |
DieterGraef | 0:d26c1b55cfca | 460 | } |
DieterGraef | 0:d26c1b55cfca | 461 | mem2->used = 0; |
DieterGraef | 0:d26c1b55cfca | 462 | mem2->next = mem->next; |
DieterGraef | 0:d26c1b55cfca | 463 | mem2->prev = ptr; |
DieterGraef | 0:d26c1b55cfca | 464 | mem->next = ptr2; |
DieterGraef | 0:d26c1b55cfca | 465 | if (mem2->next != MEM_SIZE_ALIGNED) { |
DieterGraef | 0:d26c1b55cfca | 466 | ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2; |
DieterGraef | 0:d26c1b55cfca | 467 | } |
DieterGraef | 0:d26c1b55cfca | 468 | MEM_STATS_DEC_USED(used, (size - newsize)); |
DieterGraef | 0:d26c1b55cfca | 469 | /* the original mem->next is used, so no need to plug holes! */ |
DieterGraef | 0:d26c1b55cfca | 470 | } |
DieterGraef | 0:d26c1b55cfca | 471 | /* else { |
DieterGraef | 0:d26c1b55cfca | 472 | next struct mem is used but size between mem and mem2 is not big enough |
DieterGraef | 0:d26c1b55cfca | 473 | to create another struct mem |
DieterGraef | 0:d26c1b55cfca | 474 | -> don't do anyhting. |
DieterGraef | 0:d26c1b55cfca | 475 | -> the remaining space stays unused since it is too small |
DieterGraef | 0:d26c1b55cfca | 476 | } */ |
DieterGraef | 0:d26c1b55cfca | 477 | #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT |
DieterGraef | 0:d26c1b55cfca | 478 | mem_free_count = 1; |
DieterGraef | 0:d26c1b55cfca | 479 | #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ |
DieterGraef | 0:d26c1b55cfca | 480 | LWIP_MEM_FREE_UNPROTECT(); |
DieterGraef | 0:d26c1b55cfca | 481 | return rmem; |
DieterGraef | 0:d26c1b55cfca | 482 | } |
DieterGraef | 0:d26c1b55cfca | 483 | |
DieterGraef | 0:d26c1b55cfca | 484 | /** |
DieterGraef | 0:d26c1b55cfca | 485 | * Adam's mem_malloc() plus solution for bug #17922 |
DieterGraef | 0:d26c1b55cfca | 486 | * Allocate a block of memory with a minimum of 'size' bytes. |
DieterGraef | 0:d26c1b55cfca | 487 | * |
DieterGraef | 0:d26c1b55cfca | 488 | * @param size is the minimum size of the requested block in bytes. |
DieterGraef | 0:d26c1b55cfca | 489 | * @return pointer to allocated memory or NULL if no free memory was found. |
DieterGraef | 0:d26c1b55cfca | 490 | * |
DieterGraef | 0:d26c1b55cfca | 491 | * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT). |
DieterGraef | 0:d26c1b55cfca | 492 | */ |
DieterGraef | 0:d26c1b55cfca | 493 | void * |
DieterGraef | 0:d26c1b55cfca | 494 | mem_malloc(mem_size_t size) |
DieterGraef | 0:d26c1b55cfca | 495 | { |
DieterGraef | 0:d26c1b55cfca | 496 | mem_size_t ptr, ptr2; |
DieterGraef | 0:d26c1b55cfca | 497 | struct mem *mem, *mem2; |
DieterGraef | 0:d26c1b55cfca | 498 | #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT |
DieterGraef | 0:d26c1b55cfca | 499 | u8_t local_mem_free_count = 0; |
DieterGraef | 0:d26c1b55cfca | 500 | #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ |
DieterGraef | 0:d26c1b55cfca | 501 | LWIP_MEM_ALLOC_DECL_PROTECT(); |
DieterGraef | 0:d26c1b55cfca | 502 | |
DieterGraef | 0:d26c1b55cfca | 503 | if (size == 0) { |
DieterGraef | 0:d26c1b55cfca | 504 | return NULL; |
DieterGraef | 0:d26c1b55cfca | 505 | } |
DieterGraef | 0:d26c1b55cfca | 506 | |
DieterGraef | 0:d26c1b55cfca | 507 | /* Expand the size of the allocated memory region so that we can |
DieterGraef | 0:d26c1b55cfca | 508 | adjust for alignment. */ |
DieterGraef | 0:d26c1b55cfca | 509 | size = LWIP_MEM_ALIGN_SIZE(size); |
DieterGraef | 0:d26c1b55cfca | 510 | |
DieterGraef | 0:d26c1b55cfca | 511 | if(size < MIN_SIZE_ALIGNED) { |
DieterGraef | 0:d26c1b55cfca | 512 | /* every data block must be at least MIN_SIZE_ALIGNED long */ |
DieterGraef | 0:d26c1b55cfca | 513 | size = MIN_SIZE_ALIGNED; |
DieterGraef | 0:d26c1b55cfca | 514 | } |
DieterGraef | 0:d26c1b55cfca | 515 | |
DieterGraef | 0:d26c1b55cfca | 516 | if (size > MEM_SIZE_ALIGNED) { |
DieterGraef | 0:d26c1b55cfca | 517 | return NULL; |
DieterGraef | 0:d26c1b55cfca | 518 | } |
DieterGraef | 0:d26c1b55cfca | 519 | |
DieterGraef | 0:d26c1b55cfca | 520 | /* protect the heap from concurrent access */ |
DieterGraef | 0:d26c1b55cfca | 521 | sys_mutex_lock(&mem_mutex); |
DieterGraef | 0:d26c1b55cfca | 522 | LWIP_MEM_ALLOC_PROTECT(); |
DieterGraef | 0:d26c1b55cfca | 523 | #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT |
DieterGraef | 0:d26c1b55cfca | 524 | /* run as long as a mem_free disturbed mem_malloc or mem_trim */ |
DieterGraef | 0:d26c1b55cfca | 525 | do { |
DieterGraef | 0:d26c1b55cfca | 526 | local_mem_free_count = 0; |
DieterGraef | 0:d26c1b55cfca | 527 | #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ |
DieterGraef | 0:d26c1b55cfca | 528 | |
DieterGraef | 0:d26c1b55cfca | 529 | /* Scan through the heap searching for a free block that is big enough, |
DieterGraef | 0:d26c1b55cfca | 530 | * beginning with the lowest free block. |
DieterGraef | 0:d26c1b55cfca | 531 | */ |
DieterGraef | 0:d26c1b55cfca | 532 | for (ptr = (mem_size_t)((u8_t *)lfree - ram); ptr < MEM_SIZE_ALIGNED - size; |
DieterGraef | 0:d26c1b55cfca | 533 | ptr = ((struct mem *)(void *)&ram[ptr])->next) { |
DieterGraef | 0:d26c1b55cfca | 534 | mem = (struct mem *)(void *)&ram[ptr]; |
DieterGraef | 0:d26c1b55cfca | 535 | #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT |
DieterGraef | 0:d26c1b55cfca | 536 | mem_free_count = 0; |
DieterGraef | 0:d26c1b55cfca | 537 | LWIP_MEM_ALLOC_UNPROTECT(); |
DieterGraef | 0:d26c1b55cfca | 538 | /* allow mem_free or mem_trim to run */ |
DieterGraef | 0:d26c1b55cfca | 539 | LWIP_MEM_ALLOC_PROTECT(); |
DieterGraef | 0:d26c1b55cfca | 540 | if (mem_free_count != 0) { |
DieterGraef | 0:d26c1b55cfca | 541 | /* If mem_free or mem_trim have run, we have to restart since they |
DieterGraef | 0:d26c1b55cfca | 542 | could have altered our current struct mem. */ |
DieterGraef | 0:d26c1b55cfca | 543 | local_mem_free_count = 1; |
DieterGraef | 0:d26c1b55cfca | 544 | break; |
DieterGraef | 0:d26c1b55cfca | 545 | } |
DieterGraef | 0:d26c1b55cfca | 546 | #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ |
DieterGraef | 0:d26c1b55cfca | 547 | |
DieterGraef | 0:d26c1b55cfca | 548 | if ((!mem->used) && |
DieterGraef | 0:d26c1b55cfca | 549 | (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) { |
DieterGraef | 0:d26c1b55cfca | 550 | /* mem is not used and at least perfect fit is possible: |
DieterGraef | 0:d26c1b55cfca | 551 | * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */ |
DieterGraef | 0:d26c1b55cfca | 552 | |
DieterGraef | 0:d26c1b55cfca | 553 | if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) { |
DieterGraef | 0:d26c1b55cfca | 554 | /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing |
DieterGraef | 0:d26c1b55cfca | 555 | * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem') |
DieterGraef | 0:d26c1b55cfca | 556 | * -> split large block, create empty remainder, |
DieterGraef | 0:d26c1b55cfca | 557 | * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if |
DieterGraef | 0:d26c1b55cfca | 558 | * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size, |
DieterGraef | 0:d26c1b55cfca | 559 | * struct mem would fit in but no data between mem2 and mem2->next |
DieterGraef | 0:d26c1b55cfca | 560 | * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty |
DieterGraef | 0:d26c1b55cfca | 561 | * region that couldn't hold data, but when mem->next gets freed, |
DieterGraef | 0:d26c1b55cfca | 562 | * the 2 regions would be combined, resulting in more free memory |
DieterGraef | 0:d26c1b55cfca | 563 | */ |
DieterGraef | 0:d26c1b55cfca | 564 | ptr2 = ptr + SIZEOF_STRUCT_MEM + size; |
DieterGraef | 0:d26c1b55cfca | 565 | /* create mem2 struct */ |
DieterGraef | 0:d26c1b55cfca | 566 | mem2 = (struct mem *)(void *)&ram[ptr2]; |
DieterGraef | 0:d26c1b55cfca | 567 | mem2->used = 0; |
DieterGraef | 0:d26c1b55cfca | 568 | mem2->next = mem->next; |
DieterGraef | 0:d26c1b55cfca | 569 | mem2->prev = ptr; |
DieterGraef | 0:d26c1b55cfca | 570 | /* and insert it between mem and mem->next */ |
DieterGraef | 0:d26c1b55cfca | 571 | mem->next = ptr2; |
DieterGraef | 0:d26c1b55cfca | 572 | mem->used = 1; |
DieterGraef | 0:d26c1b55cfca | 573 | |
DieterGraef | 0:d26c1b55cfca | 574 | if (mem2->next != MEM_SIZE_ALIGNED) { |
DieterGraef | 0:d26c1b55cfca | 575 | ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2; |
DieterGraef | 0:d26c1b55cfca | 576 | } |
DieterGraef | 0:d26c1b55cfca | 577 | MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM)); |
DieterGraef | 0:d26c1b55cfca | 578 | } else { |
DieterGraef | 0:d26c1b55cfca | 579 | /* (a mem2 struct does no fit into the user data space of mem and mem->next will always |
DieterGraef | 0:d26c1b55cfca | 580 | * be used at this point: if not we have 2 unused structs in a row, plug_holes should have |
DieterGraef | 0:d26c1b55cfca | 581 | * take care of this). |
DieterGraef | 0:d26c1b55cfca | 582 | * -> near fit or excact fit: do not split, no mem2 creation |
DieterGraef | 0:d26c1b55cfca | 583 | * also can't move mem->next directly behind mem, since mem->next |
DieterGraef | 0:d26c1b55cfca | 584 | * will always be used at this point! |
DieterGraef | 0:d26c1b55cfca | 585 | */ |
DieterGraef | 0:d26c1b55cfca | 586 | mem->used = 1; |
DieterGraef | 0:d26c1b55cfca | 587 | MEM_STATS_INC_USED(used, mem->next - (mem_size_t)((u8_t *)mem - ram)); |
DieterGraef | 0:d26c1b55cfca | 588 | } |
DieterGraef | 0:d26c1b55cfca | 589 | #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT |
DieterGraef | 0:d26c1b55cfca | 590 | mem_malloc_adjust_lfree: |
DieterGraef | 0:d26c1b55cfca | 591 | #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ |
DieterGraef | 0:d26c1b55cfca | 592 | if (mem == lfree) { |
DieterGraef | 0:d26c1b55cfca | 593 | struct mem *cur = lfree; |
DieterGraef | 0:d26c1b55cfca | 594 | /* Find next free block after mem and update lowest free pointer */ |
DieterGraef | 0:d26c1b55cfca | 595 | while (cur->used && cur != ram_end) { |
DieterGraef | 0:d26c1b55cfca | 596 | #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT |
DieterGraef | 0:d26c1b55cfca | 597 | mem_free_count = 0; |
DieterGraef | 0:d26c1b55cfca | 598 | LWIP_MEM_ALLOC_UNPROTECT(); |
DieterGraef | 0:d26c1b55cfca | 599 | /* prevent high interrupt latency... */ |
DieterGraef | 0:d26c1b55cfca | 600 | LWIP_MEM_ALLOC_PROTECT(); |
DieterGraef | 0:d26c1b55cfca | 601 | if (mem_free_count != 0) { |
DieterGraef | 0:d26c1b55cfca | 602 | /* If mem_free or mem_trim have run, we have to restart since they |
DieterGraef | 0:d26c1b55cfca | 603 | could have altered our current struct mem or lfree. */ |
DieterGraef | 0:d26c1b55cfca | 604 | goto mem_malloc_adjust_lfree; |
DieterGraef | 0:d26c1b55cfca | 605 | } |
DieterGraef | 0:d26c1b55cfca | 606 | #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ |
DieterGraef | 0:d26c1b55cfca | 607 | cur = (struct mem *)(void *)&ram[cur->next]; |
DieterGraef | 0:d26c1b55cfca | 608 | } |
DieterGraef | 0:d26c1b55cfca | 609 | lfree = cur; |
DieterGraef | 0:d26c1b55cfca | 610 | LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used))); |
DieterGraef | 0:d26c1b55cfca | 611 | } |
DieterGraef | 0:d26c1b55cfca | 612 | LWIP_MEM_ALLOC_UNPROTECT(); |
DieterGraef | 0:d26c1b55cfca | 613 | sys_mutex_unlock(&mem_mutex); |
DieterGraef | 0:d26c1b55cfca | 614 | LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.", |
DieterGraef | 0:d26c1b55cfca | 615 | (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end); |
DieterGraef | 0:d26c1b55cfca | 616 | LWIP_ASSERT("mem_malloc: allocated memory properly aligned.", |
DieterGraef | 0:d26c1b55cfca | 617 | ((mem_ptr_t)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0); |
DieterGraef | 0:d26c1b55cfca | 618 | LWIP_ASSERT("mem_malloc: sanity check alignment", |
DieterGraef | 0:d26c1b55cfca | 619 | (((mem_ptr_t)mem) & (MEM_ALIGNMENT-1)) == 0); |
DieterGraef | 0:d26c1b55cfca | 620 | |
DieterGraef | 0:d26c1b55cfca | 621 | return (u8_t *)mem + SIZEOF_STRUCT_MEM; |
DieterGraef | 0:d26c1b55cfca | 622 | } |
DieterGraef | 0:d26c1b55cfca | 623 | } |
DieterGraef | 0:d26c1b55cfca | 624 | #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT |
DieterGraef | 0:d26c1b55cfca | 625 | /* if we got interrupted by a mem_free, try again */ |
DieterGraef | 0:d26c1b55cfca | 626 | } while(local_mem_free_count != 0); |
DieterGraef | 0:d26c1b55cfca | 627 | #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ |
DieterGraef | 0:d26c1b55cfca | 628 | LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size)); |
DieterGraef | 0:d26c1b55cfca | 629 | MEM_STATS_INC(err); |
DieterGraef | 0:d26c1b55cfca | 630 | LWIP_MEM_ALLOC_UNPROTECT(); |
DieterGraef | 0:d26c1b55cfca | 631 | sys_mutex_unlock(&mem_mutex); |
DieterGraef | 0:d26c1b55cfca | 632 | return NULL; |
DieterGraef | 0:d26c1b55cfca | 633 | } |
DieterGraef | 0:d26c1b55cfca | 634 | |
DieterGraef | 0:d26c1b55cfca | 635 | #endif /* MEM_USE_POOLS */ |
DieterGraef | 0:d26c1b55cfca | 636 | /** |
DieterGraef | 0:d26c1b55cfca | 637 | * Contiguously allocates enough space for count objects that are size bytes |
DieterGraef | 0:d26c1b55cfca | 638 | * of memory each and returns a pointer to the allocated memory. |
DieterGraef | 0:d26c1b55cfca | 639 | * |
DieterGraef | 0:d26c1b55cfca | 640 | * The allocated memory is filled with bytes of value zero. |
DieterGraef | 0:d26c1b55cfca | 641 | * |
DieterGraef | 0:d26c1b55cfca | 642 | * @param count number of objects to allocate |
DieterGraef | 0:d26c1b55cfca | 643 | * @param size size of the objects to allocate |
DieterGraef | 0:d26c1b55cfca | 644 | * @return pointer to allocated memory / NULL pointer if there is an error |
DieterGraef | 0:d26c1b55cfca | 645 | */ |
DieterGraef | 0:d26c1b55cfca | 646 | void *mem_calloc(mem_size_t count, mem_size_t size) |
DieterGraef | 0:d26c1b55cfca | 647 | { |
DieterGraef | 0:d26c1b55cfca | 648 | void *p; |
DieterGraef | 0:d26c1b55cfca | 649 | |
DieterGraef | 0:d26c1b55cfca | 650 | /* allocate 'count' objects of size 'size' */ |
DieterGraef | 0:d26c1b55cfca | 651 | p = mem_malloc(count * size); |
DieterGraef | 0:d26c1b55cfca | 652 | if (p) { |
DieterGraef | 0:d26c1b55cfca | 653 | /* zero the memory */ |
DieterGraef | 0:d26c1b55cfca | 654 | memset(p, 0, count * size); |
DieterGraef | 0:d26c1b55cfca | 655 | } |
DieterGraef | 0:d26c1b55cfca | 656 | return p; |
DieterGraef | 0:d26c1b55cfca | 657 | } |
DieterGraef | 0:d26c1b55cfca | 658 | |
DieterGraef | 0:d26c1b55cfca | 659 | #endif /* !MEM_LIBC_MALLOC */ |