Maxim mbed development library
events/equeue/equeue.c@0:0e018d759a2a, 2016-11-08 (annotated)
- Committer:
- switches
- Date:
- Tue Nov 08 18:27:11 2016 +0000
- Revision:
- 0:0e018d759a2a
Initial commit
Who changed what in which revision?
User | Revision | Line number | New contents of line |
---|---|---|---|
switches | 0:0e018d759a2a | 1 | /* |
switches | 0:0e018d759a2a | 2 | * Flexible event queue for dispatching events |
switches | 0:0e018d759a2a | 3 | * |
switches | 0:0e018d759a2a | 4 | * Copyright (c) 2016 Christopher Haster |
switches | 0:0e018d759a2a | 5 | * |
switches | 0:0e018d759a2a | 6 | * Licensed under the Apache License, Version 2.0 (the "License"); |
switches | 0:0e018d759a2a | 7 | * you may not use this file except in compliance with the License. |
switches | 0:0e018d759a2a | 8 | * You may obtain a copy of the License at |
switches | 0:0e018d759a2a | 9 | * |
switches | 0:0e018d759a2a | 10 | * http://www.apache.org/licenses/LICENSE-2.0 |
switches | 0:0e018d759a2a | 11 | * |
switches | 0:0e018d759a2a | 12 | * Unless required by applicable law or agreed to in writing, software |
switches | 0:0e018d759a2a | 13 | * distributed under the License is distributed on an "AS IS" BASIS, |
switches | 0:0e018d759a2a | 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
switches | 0:0e018d759a2a | 15 | * See the License for the specific language governing permissions and |
switches | 0:0e018d759a2a | 16 | * limitations under the License. |
switches | 0:0e018d759a2a | 17 | */ |
switches | 0:0e018d759a2a | 18 | #include "equeue/equeue.h" |
switches | 0:0e018d759a2a | 19 | |
switches | 0:0e018d759a2a | 20 | #include <stdlib.h> |
switches | 0:0e018d759a2a | 21 | #include <string.h> |
switches | 0:0e018d759a2a | 22 | |
switches | 0:0e018d759a2a | 23 | |
switches | 0:0e018d759a2a | 24 | // calculate the relative-difference between absolute times while |
switches | 0:0e018d759a2a | 25 | // correctly handling overflow conditions |
switches | 0:0e018d759a2a | 26 | static inline int equeue_tickdiff(unsigned a, unsigned b) { |
switches | 0:0e018d759a2a | 27 | return (int)(unsigned)(a - b); |
switches | 0:0e018d759a2a | 28 | } |
switches | 0:0e018d759a2a | 29 | |
switches | 0:0e018d759a2a | 30 | // calculate the relative-difference between absolute times, but |
switches | 0:0e018d759a2a | 31 | // also clamp to zero, resulting in only non-zero values. |
switches | 0:0e018d759a2a | 32 | static inline int equeue_clampdiff(unsigned a, unsigned b) { |
switches | 0:0e018d759a2a | 33 | int diff = equeue_tickdiff(a, b); |
switches | 0:0e018d759a2a | 34 | return ~(diff >> (8*sizeof(int)-1)) & diff; |
switches | 0:0e018d759a2a | 35 | } |
switches | 0:0e018d759a2a | 36 | |
switches | 0:0e018d759a2a | 37 | // Increment the unique id in an event, hiding the event from cancel |
switches | 0:0e018d759a2a | 38 | static inline void equeue_incid(equeue_t *q, struct equeue_event *e) { |
switches | 0:0e018d759a2a | 39 | e->id += 1; |
switches | 0:0e018d759a2a | 40 | if (!(e->id << q->npw2)) { |
switches | 0:0e018d759a2a | 41 | e->id = 1; |
switches | 0:0e018d759a2a | 42 | } |
switches | 0:0e018d759a2a | 43 | } |
switches | 0:0e018d759a2a | 44 | |
switches | 0:0e018d759a2a | 45 | |
switches | 0:0e018d759a2a | 46 | // equeue lifetime management |
switches | 0:0e018d759a2a | 47 | int equeue_create(equeue_t *q, size_t size) { |
switches | 0:0e018d759a2a | 48 | // dynamically allocate the specified buffer |
switches | 0:0e018d759a2a | 49 | void *buffer = malloc(size); |
switches | 0:0e018d759a2a | 50 | if (!buffer) { |
switches | 0:0e018d759a2a | 51 | return -1; |
switches | 0:0e018d759a2a | 52 | } |
switches | 0:0e018d759a2a | 53 | |
switches | 0:0e018d759a2a | 54 | int err = equeue_create_inplace(q, size, buffer); |
switches | 0:0e018d759a2a | 55 | q->allocated = buffer; |
switches | 0:0e018d759a2a | 56 | return err; |
switches | 0:0e018d759a2a | 57 | } |
switches | 0:0e018d759a2a | 58 | |
switches | 0:0e018d759a2a | 59 | int equeue_create_inplace(equeue_t *q, size_t size, void *buffer) { |
switches | 0:0e018d759a2a | 60 | // setup queue around provided buffer |
switches | 0:0e018d759a2a | 61 | q->buffer = buffer; |
switches | 0:0e018d759a2a | 62 | q->allocated = 0; |
switches | 0:0e018d759a2a | 63 | |
switches | 0:0e018d759a2a | 64 | q->npw2 = 0; |
switches | 0:0e018d759a2a | 65 | for (unsigned s = size; s; s >>= 1) { |
switches | 0:0e018d759a2a | 66 | q->npw2++; |
switches | 0:0e018d759a2a | 67 | } |
switches | 0:0e018d759a2a | 68 | |
switches | 0:0e018d759a2a | 69 | q->chunks = 0; |
switches | 0:0e018d759a2a | 70 | q->slab.size = size; |
switches | 0:0e018d759a2a | 71 | q->slab.data = buffer; |
switches | 0:0e018d759a2a | 72 | |
switches | 0:0e018d759a2a | 73 | q->queue = 0; |
switches | 0:0e018d759a2a | 74 | q->tick = equeue_tick(); |
switches | 0:0e018d759a2a | 75 | q->generation = 0; |
switches | 0:0e018d759a2a | 76 | q->breaks = 0; |
switches | 0:0e018d759a2a | 77 | |
switches | 0:0e018d759a2a | 78 | q->background.active = false; |
switches | 0:0e018d759a2a | 79 | q->background.update = 0; |
switches | 0:0e018d759a2a | 80 | q->background.timer = 0; |
switches | 0:0e018d759a2a | 81 | |
switches | 0:0e018d759a2a | 82 | // initialize platform resources |
switches | 0:0e018d759a2a | 83 | int err; |
switches | 0:0e018d759a2a | 84 | err = equeue_sema_create(&q->eventsema); |
switches | 0:0e018d759a2a | 85 | if (err < 0) { |
switches | 0:0e018d759a2a | 86 | return err; |
switches | 0:0e018d759a2a | 87 | } |
switches | 0:0e018d759a2a | 88 | |
switches | 0:0e018d759a2a | 89 | err = equeue_mutex_create(&q->queuelock); |
switches | 0:0e018d759a2a | 90 | if (err < 0) { |
switches | 0:0e018d759a2a | 91 | return err; |
switches | 0:0e018d759a2a | 92 | } |
switches | 0:0e018d759a2a | 93 | |
switches | 0:0e018d759a2a | 94 | err = equeue_mutex_create(&q->memlock); |
switches | 0:0e018d759a2a | 95 | if (err < 0) { |
switches | 0:0e018d759a2a | 96 | return err; |
switches | 0:0e018d759a2a | 97 | } |
switches | 0:0e018d759a2a | 98 | |
switches | 0:0e018d759a2a | 99 | return 0; |
switches | 0:0e018d759a2a | 100 | } |
switches | 0:0e018d759a2a | 101 | |
switches | 0:0e018d759a2a | 102 | void equeue_destroy(equeue_t *q) { |
switches | 0:0e018d759a2a | 103 | // call destructors on pending events |
switches | 0:0e018d759a2a | 104 | for (struct equeue_event *es = q->queue; es; es = es->next) { |
switches | 0:0e018d759a2a | 105 | for (struct equeue_event *e = q->queue; e; e = e->sibling) { |
switches | 0:0e018d759a2a | 106 | if (e->dtor) { |
switches | 0:0e018d759a2a | 107 | e->dtor(e + 1); |
switches | 0:0e018d759a2a | 108 | } |
switches | 0:0e018d759a2a | 109 | } |
switches | 0:0e018d759a2a | 110 | } |
switches | 0:0e018d759a2a | 111 | |
switches | 0:0e018d759a2a | 112 | // notify background timer |
switches | 0:0e018d759a2a | 113 | if (q->background.update) { |
switches | 0:0e018d759a2a | 114 | q->background.update(q->background.timer, -1); |
switches | 0:0e018d759a2a | 115 | } |
switches | 0:0e018d759a2a | 116 | |
switches | 0:0e018d759a2a | 117 | // clean up platform resources + memory |
switches | 0:0e018d759a2a | 118 | equeue_mutex_destroy(&q->memlock); |
switches | 0:0e018d759a2a | 119 | equeue_mutex_destroy(&q->queuelock); |
switches | 0:0e018d759a2a | 120 | equeue_sema_destroy(&q->eventsema); |
switches | 0:0e018d759a2a | 121 | free(q->allocated); |
switches | 0:0e018d759a2a | 122 | } |
switches | 0:0e018d759a2a | 123 | |
switches | 0:0e018d759a2a | 124 | |
switches | 0:0e018d759a2a | 125 | // equeue chunk allocation functions |
switches | 0:0e018d759a2a | 126 | static struct equeue_event *equeue_mem_alloc(equeue_t *q, size_t size) { |
switches | 0:0e018d759a2a | 127 | // add event overhead |
switches | 0:0e018d759a2a | 128 | size += sizeof(struct equeue_event); |
switches | 0:0e018d759a2a | 129 | size = (size + sizeof(void*)-1) & ~(sizeof(void*)-1); |
switches | 0:0e018d759a2a | 130 | |
switches | 0:0e018d759a2a | 131 | equeue_mutex_lock(&q->memlock); |
switches | 0:0e018d759a2a | 132 | |
switches | 0:0e018d759a2a | 133 | // check if a good chunk is available |
switches | 0:0e018d759a2a | 134 | for (struct equeue_event **p = &q->chunks; *p; p = &(*p)->next) { |
switches | 0:0e018d759a2a | 135 | if ((*p)->size >= size) { |
switches | 0:0e018d759a2a | 136 | struct equeue_event *e = *p; |
switches | 0:0e018d759a2a | 137 | if (e->sibling) { |
switches | 0:0e018d759a2a | 138 | *p = e->sibling; |
switches | 0:0e018d759a2a | 139 | (*p)->next = e->next; |
switches | 0:0e018d759a2a | 140 | } else { |
switches | 0:0e018d759a2a | 141 | *p = e->next; |
switches | 0:0e018d759a2a | 142 | } |
switches | 0:0e018d759a2a | 143 | |
switches | 0:0e018d759a2a | 144 | equeue_mutex_unlock(&q->memlock); |
switches | 0:0e018d759a2a | 145 | return e; |
switches | 0:0e018d759a2a | 146 | } |
switches | 0:0e018d759a2a | 147 | } |
switches | 0:0e018d759a2a | 148 | |
switches | 0:0e018d759a2a | 149 | // otherwise allocate a new chunk out of the slab |
switches | 0:0e018d759a2a | 150 | if (q->slab.size >= size) { |
switches | 0:0e018d759a2a | 151 | struct equeue_event *e = (struct equeue_event *)q->slab.data; |
switches | 0:0e018d759a2a | 152 | q->slab.data += size; |
switches | 0:0e018d759a2a | 153 | q->slab.size -= size; |
switches | 0:0e018d759a2a | 154 | e->size = size; |
switches | 0:0e018d759a2a | 155 | e->id = 1; |
switches | 0:0e018d759a2a | 156 | |
switches | 0:0e018d759a2a | 157 | equeue_mutex_unlock(&q->memlock); |
switches | 0:0e018d759a2a | 158 | return e; |
switches | 0:0e018d759a2a | 159 | } |
switches | 0:0e018d759a2a | 160 | |
switches | 0:0e018d759a2a | 161 | equeue_mutex_unlock(&q->memlock); |
switches | 0:0e018d759a2a | 162 | return 0; |
switches | 0:0e018d759a2a | 163 | } |
switches | 0:0e018d759a2a | 164 | |
switches | 0:0e018d759a2a | 165 | static void equeue_mem_dealloc(equeue_t *q, struct equeue_event *e) { |
switches | 0:0e018d759a2a | 166 | equeue_mutex_lock(&q->memlock); |
switches | 0:0e018d759a2a | 167 | |
switches | 0:0e018d759a2a | 168 | // stick chunk into list of chunks |
switches | 0:0e018d759a2a | 169 | struct equeue_event **p = &q->chunks; |
switches | 0:0e018d759a2a | 170 | while (*p && (*p)->size < e->size) { |
switches | 0:0e018d759a2a | 171 | p = &(*p)->next; |
switches | 0:0e018d759a2a | 172 | } |
switches | 0:0e018d759a2a | 173 | |
switches | 0:0e018d759a2a | 174 | if (*p && (*p)->size == e->size) { |
switches | 0:0e018d759a2a | 175 | e->sibling = *p; |
switches | 0:0e018d759a2a | 176 | e->next = (*p)->next; |
switches | 0:0e018d759a2a | 177 | } else { |
switches | 0:0e018d759a2a | 178 | e->sibling = 0; |
switches | 0:0e018d759a2a | 179 | e->next = *p; |
switches | 0:0e018d759a2a | 180 | } |
switches | 0:0e018d759a2a | 181 | *p = e; |
switches | 0:0e018d759a2a | 182 | |
switches | 0:0e018d759a2a | 183 | equeue_mutex_unlock(&q->memlock); |
switches | 0:0e018d759a2a | 184 | } |
switches | 0:0e018d759a2a | 185 | |
switches | 0:0e018d759a2a | 186 | void *equeue_alloc(equeue_t *q, size_t size) { |
switches | 0:0e018d759a2a | 187 | struct equeue_event *e = equeue_mem_alloc(q, size); |
switches | 0:0e018d759a2a | 188 | if (!e) { |
switches | 0:0e018d759a2a | 189 | return 0; |
switches | 0:0e018d759a2a | 190 | } |
switches | 0:0e018d759a2a | 191 | |
switches | 0:0e018d759a2a | 192 | e->target = 0; |
switches | 0:0e018d759a2a | 193 | e->period = -1; |
switches | 0:0e018d759a2a | 194 | e->dtor = 0; |
switches | 0:0e018d759a2a | 195 | |
switches | 0:0e018d759a2a | 196 | return e + 1; |
switches | 0:0e018d759a2a | 197 | } |
switches | 0:0e018d759a2a | 198 | |
switches | 0:0e018d759a2a | 199 | void equeue_dealloc(equeue_t *q, void *p) { |
switches | 0:0e018d759a2a | 200 | struct equeue_event *e = (struct equeue_event*)p - 1; |
switches | 0:0e018d759a2a | 201 | |
switches | 0:0e018d759a2a | 202 | if (e->dtor) { |
switches | 0:0e018d759a2a | 203 | e->dtor(e+1); |
switches | 0:0e018d759a2a | 204 | } |
switches | 0:0e018d759a2a | 205 | |
switches | 0:0e018d759a2a | 206 | equeue_mem_dealloc(q, e); |
switches | 0:0e018d759a2a | 207 | } |
switches | 0:0e018d759a2a | 208 | |
switches | 0:0e018d759a2a | 209 | |
switches | 0:0e018d759a2a | 210 | // equeue scheduling functions |
switches | 0:0e018d759a2a | 211 | static int equeue_enqueue(equeue_t *q, struct equeue_event *e, unsigned tick) { |
switches | 0:0e018d759a2a | 212 | // setup event and hash local id with buffer offset for unique id |
switches | 0:0e018d759a2a | 213 | int id = (e->id << q->npw2) | ((unsigned char *)e - q->buffer); |
switches | 0:0e018d759a2a | 214 | e->target = tick + equeue_clampdiff(e->target, tick); |
switches | 0:0e018d759a2a | 215 | e->generation = q->generation; |
switches | 0:0e018d759a2a | 216 | |
switches | 0:0e018d759a2a | 217 | equeue_mutex_lock(&q->queuelock); |
switches | 0:0e018d759a2a | 218 | |
switches | 0:0e018d759a2a | 219 | // find the event slot |
switches | 0:0e018d759a2a | 220 | struct equeue_event **p = &q->queue; |
switches | 0:0e018d759a2a | 221 | while (*p && equeue_tickdiff((*p)->target, e->target) < 0) { |
switches | 0:0e018d759a2a | 222 | p = &(*p)->next; |
switches | 0:0e018d759a2a | 223 | } |
switches | 0:0e018d759a2a | 224 | |
switches | 0:0e018d759a2a | 225 | // insert at head in slot |
switches | 0:0e018d759a2a | 226 | if (*p && (*p)->target == e->target) { |
switches | 0:0e018d759a2a | 227 | e->next = (*p)->next; |
switches | 0:0e018d759a2a | 228 | if (e->next) { |
switches | 0:0e018d759a2a | 229 | e->next->ref = &e->next; |
switches | 0:0e018d759a2a | 230 | } |
switches | 0:0e018d759a2a | 231 | |
switches | 0:0e018d759a2a | 232 | e->sibling = *p; |
switches | 0:0e018d759a2a | 233 | e->sibling->ref = &e->sibling; |
switches | 0:0e018d759a2a | 234 | } else { |
switches | 0:0e018d759a2a | 235 | e->next = *p; |
switches | 0:0e018d759a2a | 236 | if (e->next) { |
switches | 0:0e018d759a2a | 237 | e->next->ref = &e->next; |
switches | 0:0e018d759a2a | 238 | } |
switches | 0:0e018d759a2a | 239 | |
switches | 0:0e018d759a2a | 240 | e->sibling = 0; |
switches | 0:0e018d759a2a | 241 | } |
switches | 0:0e018d759a2a | 242 | |
switches | 0:0e018d759a2a | 243 | *p = e; |
switches | 0:0e018d759a2a | 244 | e->ref = p; |
switches | 0:0e018d759a2a | 245 | |
switches | 0:0e018d759a2a | 246 | // notify background timer |
switches | 0:0e018d759a2a | 247 | if ((q->background.update && q->background.active) && |
switches | 0:0e018d759a2a | 248 | (q->queue == e && !e->sibling)) { |
switches | 0:0e018d759a2a | 249 | q->background.update(q->background.timer, |
switches | 0:0e018d759a2a | 250 | equeue_clampdiff(e->target, tick)); |
switches | 0:0e018d759a2a | 251 | } |
switches | 0:0e018d759a2a | 252 | |
switches | 0:0e018d759a2a | 253 | equeue_mutex_unlock(&q->queuelock); |
switches | 0:0e018d759a2a | 254 | |
switches | 0:0e018d759a2a | 255 | return id; |
switches | 0:0e018d759a2a | 256 | } |
switches | 0:0e018d759a2a | 257 | |
switches | 0:0e018d759a2a | 258 | static struct equeue_event *equeue_unqueue(equeue_t *q, int id) { |
switches | 0:0e018d759a2a | 259 | // decode event from unique id and check that the local id matches |
switches | 0:0e018d759a2a | 260 | struct equeue_event *e = (struct equeue_event *) |
switches | 0:0e018d759a2a | 261 | &q->buffer[id & ((1 << q->npw2)-1)]; |
switches | 0:0e018d759a2a | 262 | |
switches | 0:0e018d759a2a | 263 | equeue_mutex_lock(&q->queuelock); |
switches | 0:0e018d759a2a | 264 | if (e->id != id >> q->npw2) { |
switches | 0:0e018d759a2a | 265 | equeue_mutex_unlock(&q->queuelock); |
switches | 0:0e018d759a2a | 266 | return 0; |
switches | 0:0e018d759a2a | 267 | } |
switches | 0:0e018d759a2a | 268 | |
switches | 0:0e018d759a2a | 269 | // clear the event and check if already in-flight |
switches | 0:0e018d759a2a | 270 | e->cb = 0; |
switches | 0:0e018d759a2a | 271 | e->period = -1; |
switches | 0:0e018d759a2a | 272 | |
switches | 0:0e018d759a2a | 273 | int diff = equeue_tickdiff(e->target, q->tick); |
switches | 0:0e018d759a2a | 274 | if (diff < 0 || (diff == 0 && e->generation != q->generation)) { |
switches | 0:0e018d759a2a | 275 | equeue_mutex_unlock(&q->queuelock); |
switches | 0:0e018d759a2a | 276 | return 0; |
switches | 0:0e018d759a2a | 277 | } |
switches | 0:0e018d759a2a | 278 | |
switches | 0:0e018d759a2a | 279 | // disentangle from queue |
switches | 0:0e018d759a2a | 280 | if (e->sibling) { |
switches | 0:0e018d759a2a | 281 | e->sibling->next = e->next; |
switches | 0:0e018d759a2a | 282 | if (e->sibling->next) { |
switches | 0:0e018d759a2a | 283 | e->sibling->next->ref = &e->sibling->next; |
switches | 0:0e018d759a2a | 284 | } |
switches | 0:0e018d759a2a | 285 | |
switches | 0:0e018d759a2a | 286 | *e->ref = e->sibling; |
switches | 0:0e018d759a2a | 287 | e->sibling->ref = e->ref; |
switches | 0:0e018d759a2a | 288 | } else { |
switches | 0:0e018d759a2a | 289 | *e->ref = e->next; |
switches | 0:0e018d759a2a | 290 | if (e->next) { |
switches | 0:0e018d759a2a | 291 | e->next->ref = e->ref; |
switches | 0:0e018d759a2a | 292 | } |
switches | 0:0e018d759a2a | 293 | } |
switches | 0:0e018d759a2a | 294 | |
switches | 0:0e018d759a2a | 295 | equeue_incid(q, e); |
switches | 0:0e018d759a2a | 296 | equeue_mutex_unlock(&q->queuelock); |
switches | 0:0e018d759a2a | 297 | |
switches | 0:0e018d759a2a | 298 | return e; |
switches | 0:0e018d759a2a | 299 | } |
switches | 0:0e018d759a2a | 300 | |
switches | 0:0e018d759a2a | 301 | static struct equeue_event *equeue_dequeue(equeue_t *q, unsigned target) { |
switches | 0:0e018d759a2a | 302 | equeue_mutex_lock(&q->queuelock); |
switches | 0:0e018d759a2a | 303 | |
switches | 0:0e018d759a2a | 304 | // find all expired events and mark a new generation |
switches | 0:0e018d759a2a | 305 | q->generation += 1; |
switches | 0:0e018d759a2a | 306 | if (equeue_tickdiff(q->tick, target) <= 0) { |
switches | 0:0e018d759a2a | 307 | q->tick = target; |
switches | 0:0e018d759a2a | 308 | } |
switches | 0:0e018d759a2a | 309 | |
switches | 0:0e018d759a2a | 310 | struct equeue_event *head = q->queue; |
switches | 0:0e018d759a2a | 311 | struct equeue_event **p = &head; |
switches | 0:0e018d759a2a | 312 | while (*p && equeue_tickdiff((*p)->target, target) <= 0) { |
switches | 0:0e018d759a2a | 313 | p = &(*p)->next; |
switches | 0:0e018d759a2a | 314 | } |
switches | 0:0e018d759a2a | 315 | |
switches | 0:0e018d759a2a | 316 | q->queue = *p; |
switches | 0:0e018d759a2a | 317 | if (q->queue) { |
switches | 0:0e018d759a2a | 318 | q->queue->ref = &q->queue; |
switches | 0:0e018d759a2a | 319 | } |
switches | 0:0e018d759a2a | 320 | |
switches | 0:0e018d759a2a | 321 | *p = 0; |
switches | 0:0e018d759a2a | 322 | |
switches | 0:0e018d759a2a | 323 | equeue_mutex_unlock(&q->queuelock); |
switches | 0:0e018d759a2a | 324 | |
switches | 0:0e018d759a2a | 325 | // reverse and flatten each slot to match insertion order |
switches | 0:0e018d759a2a | 326 | struct equeue_event **tail = &head; |
switches | 0:0e018d759a2a | 327 | struct equeue_event *ess = head; |
switches | 0:0e018d759a2a | 328 | while (ess) { |
switches | 0:0e018d759a2a | 329 | struct equeue_event *es = ess; |
switches | 0:0e018d759a2a | 330 | ess = es->next; |
switches | 0:0e018d759a2a | 331 | |
switches | 0:0e018d759a2a | 332 | struct equeue_event *prev = 0; |
switches | 0:0e018d759a2a | 333 | for (struct equeue_event *e = es; e; e = e->sibling) { |
switches | 0:0e018d759a2a | 334 | e->next = prev; |
switches | 0:0e018d759a2a | 335 | prev = e; |
switches | 0:0e018d759a2a | 336 | } |
switches | 0:0e018d759a2a | 337 | |
switches | 0:0e018d759a2a | 338 | *tail = prev; |
switches | 0:0e018d759a2a | 339 | tail = &es->next; |
switches | 0:0e018d759a2a | 340 | } |
switches | 0:0e018d759a2a | 341 | |
switches | 0:0e018d759a2a | 342 | return head; |
switches | 0:0e018d759a2a | 343 | } |
switches | 0:0e018d759a2a | 344 | |
switches | 0:0e018d759a2a | 345 | int equeue_post(equeue_t *q, void (*cb)(void*), void *p) { |
switches | 0:0e018d759a2a | 346 | struct equeue_event *e = (struct equeue_event*)p - 1; |
switches | 0:0e018d759a2a | 347 | unsigned tick = equeue_tick(); |
switches | 0:0e018d759a2a | 348 | e->cb = cb; |
switches | 0:0e018d759a2a | 349 | e->target = tick + e->target; |
switches | 0:0e018d759a2a | 350 | |
switches | 0:0e018d759a2a | 351 | int id = equeue_enqueue(q, e, tick); |
switches | 0:0e018d759a2a | 352 | equeue_sema_signal(&q->eventsema); |
switches | 0:0e018d759a2a | 353 | return id; |
switches | 0:0e018d759a2a | 354 | } |
switches | 0:0e018d759a2a | 355 | |
switches | 0:0e018d759a2a | 356 | void equeue_cancel(equeue_t *q, int id) { |
switches | 0:0e018d759a2a | 357 | if (!id) { |
switches | 0:0e018d759a2a | 358 | return; |
switches | 0:0e018d759a2a | 359 | } |
switches | 0:0e018d759a2a | 360 | |
switches | 0:0e018d759a2a | 361 | struct equeue_event *e = equeue_unqueue(q, id); |
switches | 0:0e018d759a2a | 362 | if (e) { |
switches | 0:0e018d759a2a | 363 | equeue_dealloc(q, e + 1); |
switches | 0:0e018d759a2a | 364 | } |
switches | 0:0e018d759a2a | 365 | } |
switches | 0:0e018d759a2a | 366 | |
switches | 0:0e018d759a2a | 367 | void equeue_break(equeue_t *q) { |
switches | 0:0e018d759a2a | 368 | equeue_mutex_lock(&q->queuelock); |
switches | 0:0e018d759a2a | 369 | q->breaks++; |
switches | 0:0e018d759a2a | 370 | equeue_mutex_unlock(&q->queuelock); |
switches | 0:0e018d759a2a | 371 | equeue_sema_signal(&q->eventsema); |
switches | 0:0e018d759a2a | 372 | } |
switches | 0:0e018d759a2a | 373 | |
switches | 0:0e018d759a2a | 374 | void equeue_dispatch(equeue_t *q, int ms) { |
switches | 0:0e018d759a2a | 375 | unsigned tick = equeue_tick(); |
switches | 0:0e018d759a2a | 376 | unsigned timeout = tick + ms; |
switches | 0:0e018d759a2a | 377 | q->background.active = false; |
switches | 0:0e018d759a2a | 378 | |
switches | 0:0e018d759a2a | 379 | while (1) { |
switches | 0:0e018d759a2a | 380 | // collect all the available events and next deadline |
switches | 0:0e018d759a2a | 381 | struct equeue_event *es = equeue_dequeue(q, tick); |
switches | 0:0e018d759a2a | 382 | |
switches | 0:0e018d759a2a | 383 | // dispatch events |
switches | 0:0e018d759a2a | 384 | while (es) { |
switches | 0:0e018d759a2a | 385 | struct equeue_event *e = es; |
switches | 0:0e018d759a2a | 386 | es = e->next; |
switches | 0:0e018d759a2a | 387 | |
switches | 0:0e018d759a2a | 388 | // actually dispatch the callbacks |
switches | 0:0e018d759a2a | 389 | void (*cb)(void *) = e->cb; |
switches | 0:0e018d759a2a | 390 | if (cb) { |
switches | 0:0e018d759a2a | 391 | cb(e + 1); |
switches | 0:0e018d759a2a | 392 | } |
switches | 0:0e018d759a2a | 393 | |
switches | 0:0e018d759a2a | 394 | // reenqueue periodic events or deallocate |
switches | 0:0e018d759a2a | 395 | if (e->period >= 0) { |
switches | 0:0e018d759a2a | 396 | e->target += e->period; |
switches | 0:0e018d759a2a | 397 | equeue_enqueue(q, e, equeue_tick()); |
switches | 0:0e018d759a2a | 398 | } else { |
switches | 0:0e018d759a2a | 399 | equeue_incid(q, e); |
switches | 0:0e018d759a2a | 400 | equeue_dealloc(q, e+1); |
switches | 0:0e018d759a2a | 401 | } |
switches | 0:0e018d759a2a | 402 | } |
switches | 0:0e018d759a2a | 403 | |
switches | 0:0e018d759a2a | 404 | int deadline = -1; |
switches | 0:0e018d759a2a | 405 | tick = equeue_tick(); |
switches | 0:0e018d759a2a | 406 | |
switches | 0:0e018d759a2a | 407 | // check if we should stop dispatching soon |
switches | 0:0e018d759a2a | 408 | if (ms >= 0) { |
switches | 0:0e018d759a2a | 409 | deadline = equeue_tickdiff(timeout, tick); |
switches | 0:0e018d759a2a | 410 | if (deadline <= 0) { |
switches | 0:0e018d759a2a | 411 | // update background timer if necessary |
switches | 0:0e018d759a2a | 412 | if (q->background.update) { |
switches | 0:0e018d759a2a | 413 | equeue_mutex_lock(&q->queuelock); |
switches | 0:0e018d759a2a | 414 | if (q->background.update && q->queue) { |
switches | 0:0e018d759a2a | 415 | q->background.update(q->background.timer, |
switches | 0:0e018d759a2a | 416 | equeue_clampdiff(q->queue->target, tick)); |
switches | 0:0e018d759a2a | 417 | } |
switches | 0:0e018d759a2a | 418 | q->background.active = true; |
switches | 0:0e018d759a2a | 419 | equeue_mutex_unlock(&q->queuelock); |
switches | 0:0e018d759a2a | 420 | } |
switches | 0:0e018d759a2a | 421 | return; |
switches | 0:0e018d759a2a | 422 | } |
switches | 0:0e018d759a2a | 423 | } |
switches | 0:0e018d759a2a | 424 | |
switches | 0:0e018d759a2a | 425 | // find closest deadline |
switches | 0:0e018d759a2a | 426 | equeue_mutex_lock(&q->queuelock); |
switches | 0:0e018d759a2a | 427 | if (q->queue) { |
switches | 0:0e018d759a2a | 428 | int diff = equeue_clampdiff(q->queue->target, tick); |
switches | 0:0e018d759a2a | 429 | if ((unsigned)diff < (unsigned)deadline) { |
switches | 0:0e018d759a2a | 430 | deadline = diff; |
switches | 0:0e018d759a2a | 431 | } |
switches | 0:0e018d759a2a | 432 | } |
switches | 0:0e018d759a2a | 433 | equeue_mutex_unlock(&q->queuelock); |
switches | 0:0e018d759a2a | 434 | |
switches | 0:0e018d759a2a | 435 | // wait for events |
switches | 0:0e018d759a2a | 436 | equeue_sema_wait(&q->eventsema, deadline); |
switches | 0:0e018d759a2a | 437 | |
switches | 0:0e018d759a2a | 438 | // check if we were notified to break out of dispatch |
switches | 0:0e018d759a2a | 439 | if (q->breaks) { |
switches | 0:0e018d759a2a | 440 | equeue_mutex_lock(&q->queuelock); |
switches | 0:0e018d759a2a | 441 | if (q->breaks > 0) { |
switches | 0:0e018d759a2a | 442 | q->breaks--; |
switches | 0:0e018d759a2a | 443 | equeue_mutex_unlock(&q->queuelock); |
switches | 0:0e018d759a2a | 444 | return; |
switches | 0:0e018d759a2a | 445 | } |
switches | 0:0e018d759a2a | 446 | equeue_mutex_unlock(&q->queuelock); |
switches | 0:0e018d759a2a | 447 | } |
switches | 0:0e018d759a2a | 448 | |
switches | 0:0e018d759a2a | 449 | // update tick for next iteration |
switches | 0:0e018d759a2a | 450 | tick = equeue_tick(); |
switches | 0:0e018d759a2a | 451 | } |
switches | 0:0e018d759a2a | 452 | } |
switches | 0:0e018d759a2a | 453 | |
switches | 0:0e018d759a2a | 454 | |
switches | 0:0e018d759a2a | 455 | // event functions |
switches | 0:0e018d759a2a | 456 | void equeue_event_delay(void *p, int ms) { |
switches | 0:0e018d759a2a | 457 | struct equeue_event *e = (struct equeue_event*)p - 1; |
switches | 0:0e018d759a2a | 458 | e->target = ms; |
switches | 0:0e018d759a2a | 459 | } |
switches | 0:0e018d759a2a | 460 | |
switches | 0:0e018d759a2a | 461 | void equeue_event_period(void *p, int ms) { |
switches | 0:0e018d759a2a | 462 | struct equeue_event *e = (struct equeue_event*)p - 1; |
switches | 0:0e018d759a2a | 463 | e->period = ms; |
switches | 0:0e018d759a2a | 464 | } |
switches | 0:0e018d759a2a | 465 | |
switches | 0:0e018d759a2a | 466 | void equeue_event_dtor(void *p, void (*dtor)(void *)) { |
switches | 0:0e018d759a2a | 467 | struct equeue_event *e = (struct equeue_event*)p - 1; |
switches | 0:0e018d759a2a | 468 | e->dtor = dtor; |
switches | 0:0e018d759a2a | 469 | } |
switches | 0:0e018d759a2a | 470 | |
switches | 0:0e018d759a2a | 471 | |
switches | 0:0e018d759a2a | 472 | // simple callbacks |
switches | 0:0e018d759a2a | 473 | struct ecallback { |
switches | 0:0e018d759a2a | 474 | void (*cb)(void*); |
switches | 0:0e018d759a2a | 475 | void *data; |
switches | 0:0e018d759a2a | 476 | }; |
switches | 0:0e018d759a2a | 477 | |
switches | 0:0e018d759a2a | 478 | static void ecallback_dispatch(void *p) { |
switches | 0:0e018d759a2a | 479 | struct ecallback *e = (struct ecallback*)p; |
switches | 0:0e018d759a2a | 480 | e->cb(e->data); |
switches | 0:0e018d759a2a | 481 | } |
switches | 0:0e018d759a2a | 482 | |
switches | 0:0e018d759a2a | 483 | int equeue_call(equeue_t *q, void (*cb)(void*), void *data) { |
switches | 0:0e018d759a2a | 484 | struct ecallback *e = equeue_alloc(q, sizeof(struct ecallback)); |
switches | 0:0e018d759a2a | 485 | if (!e) { |
switches | 0:0e018d759a2a | 486 | return 0; |
switches | 0:0e018d759a2a | 487 | } |
switches | 0:0e018d759a2a | 488 | |
switches | 0:0e018d759a2a | 489 | e->cb = cb; |
switches | 0:0e018d759a2a | 490 | e->data = data; |
switches | 0:0e018d759a2a | 491 | return equeue_post(q, ecallback_dispatch, e); |
switches | 0:0e018d759a2a | 492 | } |
switches | 0:0e018d759a2a | 493 | |
switches | 0:0e018d759a2a | 494 | int equeue_call_in(equeue_t *q, int ms, void (*cb)(void*), void *data) { |
switches | 0:0e018d759a2a | 495 | struct ecallback *e = equeue_alloc(q, sizeof(struct ecallback)); |
switches | 0:0e018d759a2a | 496 | if (!e) { |
switches | 0:0e018d759a2a | 497 | return 0; |
switches | 0:0e018d759a2a | 498 | } |
switches | 0:0e018d759a2a | 499 | |
switches | 0:0e018d759a2a | 500 | equeue_event_delay(e, ms); |
switches | 0:0e018d759a2a | 501 | e->cb = cb; |
switches | 0:0e018d759a2a | 502 | e->data = data; |
switches | 0:0e018d759a2a | 503 | return equeue_post(q, ecallback_dispatch, e); |
switches | 0:0e018d759a2a | 504 | } |
switches | 0:0e018d759a2a | 505 | |
switches | 0:0e018d759a2a | 506 | int equeue_call_every(equeue_t *q, int ms, void (*cb)(void*), void *data) { |
switches | 0:0e018d759a2a | 507 | struct ecallback *e = equeue_alloc(q, sizeof(struct ecallback)); |
switches | 0:0e018d759a2a | 508 | if (!e) { |
switches | 0:0e018d759a2a | 509 | return 0; |
switches | 0:0e018d759a2a | 510 | } |
switches | 0:0e018d759a2a | 511 | |
switches | 0:0e018d759a2a | 512 | equeue_event_delay(e, ms); |
switches | 0:0e018d759a2a | 513 | equeue_event_period(e, ms); |
switches | 0:0e018d759a2a | 514 | e->cb = cb; |
switches | 0:0e018d759a2a | 515 | e->data = data; |
switches | 0:0e018d759a2a | 516 | return equeue_post(q, ecallback_dispatch, e); |
switches | 0:0e018d759a2a | 517 | } |
switches | 0:0e018d759a2a | 518 | |
switches | 0:0e018d759a2a | 519 | |
switches | 0:0e018d759a2a | 520 | // backgrounding |
switches | 0:0e018d759a2a | 521 | void equeue_background(equeue_t *q, |
switches | 0:0e018d759a2a | 522 | void (*update)(void *timer, int ms), void *timer) { |
switches | 0:0e018d759a2a | 523 | equeue_mutex_lock(&q->queuelock); |
switches | 0:0e018d759a2a | 524 | if (q->background.update) { |
switches | 0:0e018d759a2a | 525 | q->background.update(q->background.timer, -1); |
switches | 0:0e018d759a2a | 526 | } |
switches | 0:0e018d759a2a | 527 | |
switches | 0:0e018d759a2a | 528 | q->background.update = update; |
switches | 0:0e018d759a2a | 529 | q->background.timer = timer; |
switches | 0:0e018d759a2a | 530 | |
switches | 0:0e018d759a2a | 531 | if (q->background.update && q->queue) { |
switches | 0:0e018d759a2a | 532 | q->background.update(q->background.timer, |
switches | 0:0e018d759a2a | 533 | equeue_clampdiff(q->queue->target, equeue_tick())); |
switches | 0:0e018d759a2a | 534 | } |
switches | 0:0e018d759a2a | 535 | q->background.active = true; |
switches | 0:0e018d759a2a | 536 | equeue_mutex_unlock(&q->queuelock); |
switches | 0:0e018d759a2a | 537 | } |
switches | 0:0e018d759a2a | 538 | |
switches | 0:0e018d759a2a | 539 | struct equeue_chain_context { |
switches | 0:0e018d759a2a | 540 | equeue_t *q; |
switches | 0:0e018d759a2a | 541 | equeue_t *target; |
switches | 0:0e018d759a2a | 542 | int id; |
switches | 0:0e018d759a2a | 543 | }; |
switches | 0:0e018d759a2a | 544 | |
switches | 0:0e018d759a2a | 545 | static void equeue_chain_dispatch(void *p) { |
switches | 0:0e018d759a2a | 546 | equeue_dispatch((equeue_t *)p, 0); |
switches | 0:0e018d759a2a | 547 | } |
switches | 0:0e018d759a2a | 548 | |
switches | 0:0e018d759a2a | 549 | static void equeue_chain_update(void *p, int ms) { |
switches | 0:0e018d759a2a | 550 | struct equeue_chain_context *c = (struct equeue_chain_context *)p; |
switches | 0:0e018d759a2a | 551 | equeue_cancel(c->target, c->id); |
switches | 0:0e018d759a2a | 552 | |
switches | 0:0e018d759a2a | 553 | if (ms >= 0) { |
switches | 0:0e018d759a2a | 554 | c->id = equeue_call_in(c->target, ms, equeue_chain_dispatch, c->q); |
switches | 0:0e018d759a2a | 555 | } else { |
switches | 0:0e018d759a2a | 556 | equeue_dealloc(c->target, c); |
switches | 0:0e018d759a2a | 557 | } |
switches | 0:0e018d759a2a | 558 | } |
switches | 0:0e018d759a2a | 559 | |
switches | 0:0e018d759a2a | 560 | void equeue_chain(equeue_t *q, equeue_t *target) { |
switches | 0:0e018d759a2a | 561 | struct equeue_chain_context *c = equeue_alloc(q, |
switches | 0:0e018d759a2a | 562 | sizeof(struct equeue_chain_context)); |
switches | 0:0e018d759a2a | 563 | |
switches | 0:0e018d759a2a | 564 | c->q = q; |
switches | 0:0e018d759a2a | 565 | c->target = target; |
switches | 0:0e018d759a2a | 566 | c->id = 0; |
switches | 0:0e018d759a2a | 567 | |
switches | 0:0e018d759a2a | 568 | equeue_background(q, equeue_chain_update, c); |
switches | 0:0e018d759a2a | 569 | } |