Kev Mann / mbed-dev-OS5_10_4
Committer:
kevman
Date:
Wed Mar 13 11:03:24 2019 +0000
Revision:
2:7aab896b1a3b
Parent:
0:38ceb79fef03
2019-03-13

Who changed what in which revision?

UserRevisionLine numberNew contents of line
kevman 0:38ceb79fef03 1 /*
kevman 0:38ceb79fef03 2 * Flexible event queue for dispatching events
kevman 0:38ceb79fef03 3 *
kevman 0:38ceb79fef03 4 * Copyright (c) 2016 Christopher Haster
kevman 0:38ceb79fef03 5 *
kevman 0:38ceb79fef03 6 * Licensed under the Apache License, Version 2.0 (the "License");
kevman 0:38ceb79fef03 7 * you may not use this file except in compliance with the License.
kevman 0:38ceb79fef03 8 * You may obtain a copy of the License at
kevman 0:38ceb79fef03 9 *
kevman 0:38ceb79fef03 10 * http://www.apache.org/licenses/LICENSE-2.0
kevman 0:38ceb79fef03 11 *
kevman 0:38ceb79fef03 12 * Unless required by applicable law or agreed to in writing, software
kevman 0:38ceb79fef03 13 * distributed under the License is distributed on an "AS IS" BASIS,
kevman 0:38ceb79fef03 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
kevman 0:38ceb79fef03 15 * See the License for the specific language governing permissions and
kevman 0:38ceb79fef03 16 * limitations under the License.
kevman 0:38ceb79fef03 17 */
kevman 0:38ceb79fef03 18 #include "equeue/equeue.h"
kevman 0:38ceb79fef03 19
kevman 0:38ceb79fef03 20 #include <stdlib.h>
kevman 0:38ceb79fef03 21 #include <string.h>
kevman 0:38ceb79fef03 22
kevman 0:38ceb79fef03 23
kevman 0:38ceb79fef03 24 // calculate the relative-difference between absolute times while
kevman 0:38ceb79fef03 25 // correctly handling overflow conditions
kevman 0:38ceb79fef03 26 static inline int equeue_tickdiff(unsigned a, unsigned b) {
kevman 0:38ceb79fef03 27 return (int)(unsigned)(a - b);
kevman 0:38ceb79fef03 28 }
kevman 0:38ceb79fef03 29
kevman 0:38ceb79fef03 30 // calculate the relative-difference between absolute times, but
kevman 0:38ceb79fef03 31 // also clamp to zero, resulting in only non-zero values.
kevman 0:38ceb79fef03 32 static inline int equeue_clampdiff(unsigned a, unsigned b) {
kevman 0:38ceb79fef03 33 int diff = equeue_tickdiff(a, b);
kevman 0:38ceb79fef03 34 return ~(diff >> (8*sizeof(int)-1)) & diff;
kevman 0:38ceb79fef03 35 }
kevman 0:38ceb79fef03 36
kevman 0:38ceb79fef03 37 // Increment the unique id in an event, hiding the event from cancel
kevman 0:38ceb79fef03 38 static inline void equeue_incid(equeue_t *q, struct equeue_event *e) {
kevman 0:38ceb79fef03 39 e->id += 1;
kevman 0:38ceb79fef03 40 if ((e->id << q->npw2) == 0) {
kevman 0:38ceb79fef03 41 e->id = 1;
kevman 0:38ceb79fef03 42 }
kevman 0:38ceb79fef03 43 }
kevman 0:38ceb79fef03 44
kevman 0:38ceb79fef03 45
kevman 0:38ceb79fef03 46 // equeue lifetime management
kevman 0:38ceb79fef03 47 int equeue_create(equeue_t *q, size_t size) {
kevman 0:38ceb79fef03 48 // dynamically allocate the specified buffer
kevman 0:38ceb79fef03 49 void *buffer = malloc(size);
kevman 0:38ceb79fef03 50 if (!buffer) {
kevman 0:38ceb79fef03 51 return -1;
kevman 0:38ceb79fef03 52 }
kevman 0:38ceb79fef03 53
kevman 0:38ceb79fef03 54 int err = equeue_create_inplace(q, size, buffer);
kevman 0:38ceb79fef03 55 q->allocated = buffer;
kevman 0:38ceb79fef03 56 return err;
kevman 0:38ceb79fef03 57 }
kevman 0:38ceb79fef03 58
kevman 0:38ceb79fef03 59 int equeue_create_inplace(equeue_t *q, size_t size, void *buffer) {
kevman 0:38ceb79fef03 60 // setup queue around provided buffer
kevman 0:38ceb79fef03 61 q->buffer = buffer;
kevman 0:38ceb79fef03 62 q->allocated = 0;
kevman 0:38ceb79fef03 63
kevman 0:38ceb79fef03 64 q->npw2 = 0;
kevman 0:38ceb79fef03 65 for (unsigned s = size; s; s >>= 1) {
kevman 0:38ceb79fef03 66 q->npw2++;
kevman 0:38ceb79fef03 67 }
kevman 0:38ceb79fef03 68
kevman 0:38ceb79fef03 69 q->chunks = 0;
kevman 0:38ceb79fef03 70 q->slab.size = size;
kevman 0:38ceb79fef03 71 q->slab.data = buffer;
kevman 0:38ceb79fef03 72
kevman 0:38ceb79fef03 73 q->queue = 0;
kevman 0:38ceb79fef03 74 q->tick = equeue_tick();
kevman 0:38ceb79fef03 75 q->generation = 0;
kevman 0:38ceb79fef03 76 q->break_requested = false;
kevman 0:38ceb79fef03 77
kevman 0:38ceb79fef03 78 q->background.active = false;
kevman 0:38ceb79fef03 79 q->background.update = 0;
kevman 0:38ceb79fef03 80 q->background.timer = 0;
kevman 0:38ceb79fef03 81
kevman 0:38ceb79fef03 82 // initialize platform resources
kevman 0:38ceb79fef03 83 int err;
kevman 0:38ceb79fef03 84 err = equeue_sema_create(&q->eventsema);
kevman 0:38ceb79fef03 85 if (err < 0) {
kevman 0:38ceb79fef03 86 return err;
kevman 0:38ceb79fef03 87 }
kevman 0:38ceb79fef03 88
kevman 0:38ceb79fef03 89 err = equeue_mutex_create(&q->queuelock);
kevman 0:38ceb79fef03 90 if (err < 0) {
kevman 0:38ceb79fef03 91 return err;
kevman 0:38ceb79fef03 92 }
kevman 0:38ceb79fef03 93
kevman 0:38ceb79fef03 94 err = equeue_mutex_create(&q->memlock);
kevman 0:38ceb79fef03 95 if (err < 0) {
kevman 0:38ceb79fef03 96 return err;
kevman 0:38ceb79fef03 97 }
kevman 0:38ceb79fef03 98
kevman 0:38ceb79fef03 99 return 0;
kevman 0:38ceb79fef03 100 }
kevman 0:38ceb79fef03 101
kevman 0:38ceb79fef03 102 void equeue_destroy(equeue_t *q) {
kevman 0:38ceb79fef03 103 // call destructors on pending events
kevman 0:38ceb79fef03 104 for (struct equeue_event *es = q->queue; es; es = es->next) {
kevman 0:38ceb79fef03 105 for (struct equeue_event *e = q->queue; e; e = e->sibling) {
kevman 0:38ceb79fef03 106 if (e->dtor) {
kevman 0:38ceb79fef03 107 e->dtor(e + 1);
kevman 0:38ceb79fef03 108 }
kevman 0:38ceb79fef03 109 }
kevman 0:38ceb79fef03 110 }
kevman 0:38ceb79fef03 111
kevman 0:38ceb79fef03 112 // notify background timer
kevman 0:38ceb79fef03 113 if (q->background.update) {
kevman 0:38ceb79fef03 114 q->background.update(q->background.timer, -1);
kevman 0:38ceb79fef03 115 }
kevman 0:38ceb79fef03 116
kevman 0:38ceb79fef03 117 // clean up platform resources + memory
kevman 0:38ceb79fef03 118 equeue_mutex_destroy(&q->memlock);
kevman 0:38ceb79fef03 119 equeue_mutex_destroy(&q->queuelock);
kevman 0:38ceb79fef03 120 equeue_sema_destroy(&q->eventsema);
kevman 0:38ceb79fef03 121 free(q->allocated);
kevman 0:38ceb79fef03 122 }
kevman 0:38ceb79fef03 123
kevman 0:38ceb79fef03 124
kevman 0:38ceb79fef03 125 // equeue chunk allocation functions
kevman 0:38ceb79fef03 126 static struct equeue_event *equeue_mem_alloc(equeue_t *q, size_t size) {
kevman 0:38ceb79fef03 127 // add event overhead
kevman 0:38ceb79fef03 128 size += sizeof(struct equeue_event);
kevman 0:38ceb79fef03 129 size = (size + sizeof(void*)-1) & ~(sizeof(void*)-1);
kevman 0:38ceb79fef03 130
kevman 0:38ceb79fef03 131 equeue_mutex_lock(&q->memlock);
kevman 0:38ceb79fef03 132
kevman 0:38ceb79fef03 133 // check if a good chunk is available
kevman 0:38ceb79fef03 134 for (struct equeue_event **p = &q->chunks; *p; p = &(*p)->next) {
kevman 0:38ceb79fef03 135 if ((*p)->size >= size) {
kevman 0:38ceb79fef03 136 struct equeue_event *e = *p;
kevman 0:38ceb79fef03 137 if (e->sibling) {
kevman 0:38ceb79fef03 138 *p = e->sibling;
kevman 0:38ceb79fef03 139 (*p)->next = e->next;
kevman 0:38ceb79fef03 140 } else {
kevman 0:38ceb79fef03 141 *p = e->next;
kevman 0:38ceb79fef03 142 }
kevman 0:38ceb79fef03 143
kevman 0:38ceb79fef03 144 equeue_mutex_unlock(&q->memlock);
kevman 0:38ceb79fef03 145 return e;
kevman 0:38ceb79fef03 146 }
kevman 0:38ceb79fef03 147 }
kevman 0:38ceb79fef03 148
kevman 0:38ceb79fef03 149 // otherwise allocate a new chunk out of the slab
kevman 0:38ceb79fef03 150 if (q->slab.size >= size) {
kevman 0:38ceb79fef03 151 struct equeue_event *e = (struct equeue_event *)q->slab.data;
kevman 0:38ceb79fef03 152 q->slab.data += size;
kevman 0:38ceb79fef03 153 q->slab.size -= size;
kevman 0:38ceb79fef03 154 e->size = size;
kevman 0:38ceb79fef03 155 e->id = 1;
kevman 0:38ceb79fef03 156
kevman 0:38ceb79fef03 157 equeue_mutex_unlock(&q->memlock);
kevman 0:38ceb79fef03 158 return e;
kevman 0:38ceb79fef03 159 }
kevman 0:38ceb79fef03 160
kevman 0:38ceb79fef03 161 equeue_mutex_unlock(&q->memlock);
kevman 0:38ceb79fef03 162 return 0;
kevman 0:38ceb79fef03 163 }
kevman 0:38ceb79fef03 164
kevman 0:38ceb79fef03 165 static void equeue_mem_dealloc(equeue_t *q, struct equeue_event *e) {
kevman 0:38ceb79fef03 166 equeue_mutex_lock(&q->memlock);
kevman 0:38ceb79fef03 167
kevman 0:38ceb79fef03 168 // stick chunk into list of chunks
kevman 0:38ceb79fef03 169 struct equeue_event **p = &q->chunks;
kevman 0:38ceb79fef03 170 while (*p && (*p)->size < e->size) {
kevman 0:38ceb79fef03 171 p = &(*p)->next;
kevman 0:38ceb79fef03 172 }
kevman 0:38ceb79fef03 173
kevman 0:38ceb79fef03 174 if (*p && (*p)->size == e->size) {
kevman 0:38ceb79fef03 175 e->sibling = *p;
kevman 0:38ceb79fef03 176 e->next = (*p)->next;
kevman 0:38ceb79fef03 177 } else {
kevman 0:38ceb79fef03 178 e->sibling = 0;
kevman 0:38ceb79fef03 179 e->next = *p;
kevman 0:38ceb79fef03 180 }
kevman 0:38ceb79fef03 181 *p = e;
kevman 0:38ceb79fef03 182
kevman 0:38ceb79fef03 183 equeue_mutex_unlock(&q->memlock);
kevman 0:38ceb79fef03 184 }
kevman 0:38ceb79fef03 185
kevman 0:38ceb79fef03 186 void *equeue_alloc(equeue_t *q, size_t size) {
kevman 0:38ceb79fef03 187 struct equeue_event *e = equeue_mem_alloc(q, size);
kevman 0:38ceb79fef03 188 if (!e) {
kevman 0:38ceb79fef03 189 return 0;
kevman 0:38ceb79fef03 190 }
kevman 0:38ceb79fef03 191
kevman 0:38ceb79fef03 192 e->target = 0;
kevman 0:38ceb79fef03 193 e->period = -1;
kevman 0:38ceb79fef03 194 e->dtor = 0;
kevman 0:38ceb79fef03 195
kevman 0:38ceb79fef03 196 return e + 1;
kevman 0:38ceb79fef03 197 }
kevman 0:38ceb79fef03 198
kevman 0:38ceb79fef03 199 void equeue_dealloc(equeue_t *q, void *p) {
kevman 0:38ceb79fef03 200 struct equeue_event *e = (struct equeue_event*)p - 1;
kevman 0:38ceb79fef03 201
kevman 0:38ceb79fef03 202 if (e->dtor) {
kevman 0:38ceb79fef03 203 e->dtor(e+1);
kevman 0:38ceb79fef03 204 }
kevman 0:38ceb79fef03 205
kevman 0:38ceb79fef03 206 equeue_mem_dealloc(q, e);
kevman 0:38ceb79fef03 207 }
kevman 0:38ceb79fef03 208
kevman 0:38ceb79fef03 209
kevman 0:38ceb79fef03 210 // equeue scheduling functions
kevman 0:38ceb79fef03 211 static int equeue_enqueue(equeue_t *q, struct equeue_event *e, unsigned tick) {
kevman 0:38ceb79fef03 212 // setup event and hash local id with buffer offset for unique id
kevman 0:38ceb79fef03 213 int id = (e->id << q->npw2) | ((unsigned char *)e - q->buffer);
kevman 0:38ceb79fef03 214 e->target = tick + equeue_clampdiff(e->target, tick);
kevman 0:38ceb79fef03 215 e->generation = q->generation;
kevman 0:38ceb79fef03 216
kevman 0:38ceb79fef03 217 equeue_mutex_lock(&q->queuelock);
kevman 0:38ceb79fef03 218
kevman 0:38ceb79fef03 219 // find the event slot
kevman 0:38ceb79fef03 220 struct equeue_event **p = &q->queue;
kevman 0:38ceb79fef03 221 while (*p && equeue_tickdiff((*p)->target, e->target) < 0) {
kevman 0:38ceb79fef03 222 p = &(*p)->next;
kevman 0:38ceb79fef03 223 }
kevman 0:38ceb79fef03 224
kevman 0:38ceb79fef03 225 // insert at head in slot
kevman 0:38ceb79fef03 226 if (*p && (*p)->target == e->target) {
kevman 0:38ceb79fef03 227 e->next = (*p)->next;
kevman 0:38ceb79fef03 228 if (e->next) {
kevman 0:38ceb79fef03 229 e->next->ref = &e->next;
kevman 0:38ceb79fef03 230 }
kevman 0:38ceb79fef03 231
kevman 0:38ceb79fef03 232 e->sibling = *p;
kevman 0:38ceb79fef03 233 e->sibling->ref = &e->sibling;
kevman 0:38ceb79fef03 234 } else {
kevman 0:38ceb79fef03 235 e->next = *p;
kevman 0:38ceb79fef03 236 if (e->next) {
kevman 0:38ceb79fef03 237 e->next->ref = &e->next;
kevman 0:38ceb79fef03 238 }
kevman 0:38ceb79fef03 239
kevman 0:38ceb79fef03 240 e->sibling = 0;
kevman 0:38ceb79fef03 241 }
kevman 0:38ceb79fef03 242
kevman 0:38ceb79fef03 243 *p = e;
kevman 0:38ceb79fef03 244 e->ref = p;
kevman 0:38ceb79fef03 245
kevman 0:38ceb79fef03 246 // notify background timer
kevman 0:38ceb79fef03 247 if ((q->background.update && q->background.active) &&
kevman 0:38ceb79fef03 248 (q->queue == e && !e->sibling)) {
kevman 0:38ceb79fef03 249 q->background.update(q->background.timer,
kevman 0:38ceb79fef03 250 equeue_clampdiff(e->target, tick));
kevman 0:38ceb79fef03 251 }
kevman 0:38ceb79fef03 252
kevman 0:38ceb79fef03 253 equeue_mutex_unlock(&q->queuelock);
kevman 0:38ceb79fef03 254
kevman 0:38ceb79fef03 255 return id;
kevman 0:38ceb79fef03 256 }
kevman 0:38ceb79fef03 257
kevman 0:38ceb79fef03 258 static struct equeue_event *equeue_unqueue(equeue_t *q, int id) {
kevman 0:38ceb79fef03 259 // decode event from unique id and check that the local id matches
kevman 0:38ceb79fef03 260 struct equeue_event *e = (struct equeue_event *)
kevman 0:38ceb79fef03 261 &q->buffer[id & ((1 << q->npw2)-1)];
kevman 0:38ceb79fef03 262
kevman 0:38ceb79fef03 263 equeue_mutex_lock(&q->queuelock);
kevman 0:38ceb79fef03 264 if (e->id != id >> q->npw2) {
kevman 0:38ceb79fef03 265 equeue_mutex_unlock(&q->queuelock);
kevman 0:38ceb79fef03 266 return 0;
kevman 0:38ceb79fef03 267 }
kevman 0:38ceb79fef03 268
kevman 0:38ceb79fef03 269 // clear the event and check if already in-flight
kevman 0:38ceb79fef03 270 e->cb = 0;
kevman 0:38ceb79fef03 271 e->period = -1;
kevman 0:38ceb79fef03 272
kevman 0:38ceb79fef03 273 int diff = equeue_tickdiff(e->target, q->tick);
kevman 0:38ceb79fef03 274 if (diff < 0 || (diff == 0 && e->generation != q->generation)) {
kevman 0:38ceb79fef03 275 equeue_mutex_unlock(&q->queuelock);
kevman 0:38ceb79fef03 276 return 0;
kevman 0:38ceb79fef03 277 }
kevman 0:38ceb79fef03 278
kevman 0:38ceb79fef03 279 // disentangle from queue
kevman 0:38ceb79fef03 280 if (e->sibling) {
kevman 0:38ceb79fef03 281 e->sibling->next = e->next;
kevman 0:38ceb79fef03 282 if (e->sibling->next) {
kevman 0:38ceb79fef03 283 e->sibling->next->ref = &e->sibling->next;
kevman 0:38ceb79fef03 284 }
kevman 0:38ceb79fef03 285
kevman 0:38ceb79fef03 286 *e->ref = e->sibling;
kevman 0:38ceb79fef03 287 e->sibling->ref = e->ref;
kevman 0:38ceb79fef03 288 } else {
kevman 0:38ceb79fef03 289 *e->ref = e->next;
kevman 0:38ceb79fef03 290 if (e->next) {
kevman 0:38ceb79fef03 291 e->next->ref = e->ref;
kevman 0:38ceb79fef03 292 }
kevman 0:38ceb79fef03 293 }
kevman 0:38ceb79fef03 294
kevman 0:38ceb79fef03 295 equeue_incid(q, e);
kevman 0:38ceb79fef03 296 equeue_mutex_unlock(&q->queuelock);
kevman 0:38ceb79fef03 297
kevman 0:38ceb79fef03 298 return e;
kevman 0:38ceb79fef03 299 }
kevman 0:38ceb79fef03 300
kevman 0:38ceb79fef03 301 static struct equeue_event *equeue_dequeue(equeue_t *q, unsigned target) {
kevman 0:38ceb79fef03 302 equeue_mutex_lock(&q->queuelock);
kevman 0:38ceb79fef03 303
kevman 0:38ceb79fef03 304 // find all expired events and mark a new generation
kevman 0:38ceb79fef03 305 q->generation += 1;
kevman 0:38ceb79fef03 306 if (equeue_tickdiff(q->tick, target) <= 0) {
kevman 0:38ceb79fef03 307 q->tick = target;
kevman 0:38ceb79fef03 308 }
kevman 0:38ceb79fef03 309
kevman 0:38ceb79fef03 310 struct equeue_event *head = q->queue;
kevman 0:38ceb79fef03 311 struct equeue_event **p = &head;
kevman 0:38ceb79fef03 312 while (*p && equeue_tickdiff((*p)->target, target) <= 0) {
kevman 0:38ceb79fef03 313 p = &(*p)->next;
kevman 0:38ceb79fef03 314 }
kevman 0:38ceb79fef03 315
kevman 0:38ceb79fef03 316 q->queue = *p;
kevman 0:38ceb79fef03 317 if (q->queue) {
kevman 0:38ceb79fef03 318 q->queue->ref = &q->queue;
kevman 0:38ceb79fef03 319 }
kevman 0:38ceb79fef03 320
kevman 0:38ceb79fef03 321 *p = 0;
kevman 0:38ceb79fef03 322
kevman 0:38ceb79fef03 323 equeue_mutex_unlock(&q->queuelock);
kevman 0:38ceb79fef03 324
kevman 0:38ceb79fef03 325 // reverse and flatten each slot to match insertion order
kevman 0:38ceb79fef03 326 struct equeue_event **tail = &head;
kevman 0:38ceb79fef03 327 struct equeue_event *ess = head;
kevman 0:38ceb79fef03 328 while (ess) {
kevman 0:38ceb79fef03 329 struct equeue_event *es = ess;
kevman 0:38ceb79fef03 330 ess = es->next;
kevman 0:38ceb79fef03 331
kevman 0:38ceb79fef03 332 struct equeue_event *prev = 0;
kevman 0:38ceb79fef03 333 for (struct equeue_event *e = es; e; e = e->sibling) {
kevman 0:38ceb79fef03 334 e->next = prev;
kevman 0:38ceb79fef03 335 prev = e;
kevman 0:38ceb79fef03 336 }
kevman 0:38ceb79fef03 337
kevman 0:38ceb79fef03 338 *tail = prev;
kevman 0:38ceb79fef03 339 tail = &es->next;
kevman 0:38ceb79fef03 340 }
kevman 0:38ceb79fef03 341
kevman 0:38ceb79fef03 342 return head;
kevman 0:38ceb79fef03 343 }
kevman 0:38ceb79fef03 344
kevman 0:38ceb79fef03 345 int equeue_post(equeue_t *q, void (*cb)(void*), void *p) {
kevman 0:38ceb79fef03 346 struct equeue_event *e = (struct equeue_event*)p - 1;
kevman 0:38ceb79fef03 347 unsigned tick = equeue_tick();
kevman 0:38ceb79fef03 348 e->cb = cb;
kevman 0:38ceb79fef03 349 e->target = tick + e->target;
kevman 0:38ceb79fef03 350
kevman 0:38ceb79fef03 351 int id = equeue_enqueue(q, e, tick);
kevman 0:38ceb79fef03 352 equeue_sema_signal(&q->eventsema);
kevman 0:38ceb79fef03 353 return id;
kevman 0:38ceb79fef03 354 }
kevman 0:38ceb79fef03 355
kevman 0:38ceb79fef03 356 void equeue_cancel(equeue_t *q, int id) {
kevman 0:38ceb79fef03 357 if (!id) {
kevman 0:38ceb79fef03 358 return;
kevman 0:38ceb79fef03 359 }
kevman 0:38ceb79fef03 360
kevman 0:38ceb79fef03 361 struct equeue_event *e = equeue_unqueue(q, id);
kevman 0:38ceb79fef03 362 if (e) {
kevman 0:38ceb79fef03 363 equeue_dealloc(q, e + 1);
kevman 0:38ceb79fef03 364 }
kevman 0:38ceb79fef03 365 }
kevman 0:38ceb79fef03 366
kevman 0:38ceb79fef03 367 int equeue_timeleft(equeue_t *q, int id) {
kevman 0:38ceb79fef03 368 int ret = -1;
kevman 0:38ceb79fef03 369
kevman 0:38ceb79fef03 370 if (!id) {
kevman 0:38ceb79fef03 371 return -1;
kevman 0:38ceb79fef03 372 }
kevman 0:38ceb79fef03 373
kevman 0:38ceb79fef03 374 // decode event from unique id and check that the local id matches
kevman 0:38ceb79fef03 375 struct equeue_event *e = (struct equeue_event *)
kevman 0:38ceb79fef03 376 &q->buffer[id & ((1 << q->npw2)-1)];
kevman 0:38ceb79fef03 377
kevman 0:38ceb79fef03 378 equeue_mutex_lock(&q->queuelock);
kevman 0:38ceb79fef03 379 if (e->id == id >> q->npw2) {
kevman 0:38ceb79fef03 380 ret = equeue_clampdiff(e->target, equeue_tick());
kevman 0:38ceb79fef03 381 }
kevman 0:38ceb79fef03 382 equeue_mutex_unlock(&q->queuelock);
kevman 0:38ceb79fef03 383 return ret;
kevman 0:38ceb79fef03 384 }
kevman 0:38ceb79fef03 385
kevman 0:38ceb79fef03 386 void equeue_break(equeue_t *q) {
kevman 0:38ceb79fef03 387 equeue_mutex_lock(&q->queuelock);
kevman 0:38ceb79fef03 388 q->break_requested = true;
kevman 0:38ceb79fef03 389 equeue_mutex_unlock(&q->queuelock);
kevman 0:38ceb79fef03 390 equeue_sema_signal(&q->eventsema);
kevman 0:38ceb79fef03 391 }
kevman 0:38ceb79fef03 392
kevman 0:38ceb79fef03 393 void equeue_dispatch(equeue_t *q, int ms) {
kevman 0:38ceb79fef03 394 unsigned tick = equeue_tick();
kevman 0:38ceb79fef03 395 unsigned timeout = tick + ms;
kevman 0:38ceb79fef03 396 q->background.active = false;
kevman 0:38ceb79fef03 397
kevman 0:38ceb79fef03 398 while (1) {
kevman 0:38ceb79fef03 399 // collect all the available events and next deadline
kevman 0:38ceb79fef03 400 struct equeue_event *es = equeue_dequeue(q, tick);
kevman 0:38ceb79fef03 401
kevman 0:38ceb79fef03 402 // dispatch events
kevman 0:38ceb79fef03 403 while (es) {
kevman 0:38ceb79fef03 404 struct equeue_event *e = es;
kevman 0:38ceb79fef03 405 es = e->next;
kevman 0:38ceb79fef03 406
kevman 0:38ceb79fef03 407 // actually dispatch the callbacks
kevman 0:38ceb79fef03 408 void (*cb)(void *) = e->cb;
kevman 0:38ceb79fef03 409 if (cb) {
kevman 0:38ceb79fef03 410 cb(e + 1);
kevman 0:38ceb79fef03 411 }
kevman 0:38ceb79fef03 412
kevman 0:38ceb79fef03 413 // reenqueue periodic events or deallocate
kevman 0:38ceb79fef03 414 if (e->period >= 0) {
kevman 0:38ceb79fef03 415 e->target += e->period;
kevman 0:38ceb79fef03 416 equeue_enqueue(q, e, equeue_tick());
kevman 0:38ceb79fef03 417 } else {
kevman 0:38ceb79fef03 418 equeue_incid(q, e);
kevman 0:38ceb79fef03 419 equeue_dealloc(q, e+1);
kevman 0:38ceb79fef03 420 }
kevman 0:38ceb79fef03 421 }
kevman 0:38ceb79fef03 422
kevman 0:38ceb79fef03 423 int deadline = -1;
kevman 0:38ceb79fef03 424 tick = equeue_tick();
kevman 0:38ceb79fef03 425
kevman 0:38ceb79fef03 426 // check if we should stop dispatching soon
kevman 0:38ceb79fef03 427 if (ms >= 0) {
kevman 0:38ceb79fef03 428 deadline = equeue_tickdiff(timeout, tick);
kevman 0:38ceb79fef03 429 if (deadline <= 0) {
kevman 0:38ceb79fef03 430 // update background timer if necessary
kevman 0:38ceb79fef03 431 if (q->background.update) {
kevman 0:38ceb79fef03 432 equeue_mutex_lock(&q->queuelock);
kevman 0:38ceb79fef03 433 if (q->background.update && q->queue) {
kevman 0:38ceb79fef03 434 q->background.update(q->background.timer,
kevman 0:38ceb79fef03 435 equeue_clampdiff(q->queue->target, tick));
kevman 0:38ceb79fef03 436 }
kevman 0:38ceb79fef03 437 q->background.active = true;
kevman 0:38ceb79fef03 438 equeue_mutex_unlock(&q->queuelock);
kevman 0:38ceb79fef03 439 }
kevman 0:38ceb79fef03 440 q->break_requested = false;
kevman 0:38ceb79fef03 441 return;
kevman 0:38ceb79fef03 442 }
kevman 0:38ceb79fef03 443 }
kevman 0:38ceb79fef03 444
kevman 0:38ceb79fef03 445 // find closest deadline
kevman 0:38ceb79fef03 446 equeue_mutex_lock(&q->queuelock);
kevman 0:38ceb79fef03 447 if (q->queue) {
kevman 0:38ceb79fef03 448 int diff = equeue_clampdiff(q->queue->target, tick);
kevman 0:38ceb79fef03 449 if ((unsigned)diff < (unsigned)deadline) {
kevman 0:38ceb79fef03 450 deadline = diff;
kevman 0:38ceb79fef03 451 }
kevman 0:38ceb79fef03 452 }
kevman 0:38ceb79fef03 453 equeue_mutex_unlock(&q->queuelock);
kevman 0:38ceb79fef03 454
kevman 0:38ceb79fef03 455 // wait for events
kevman 0:38ceb79fef03 456 equeue_sema_wait(&q->eventsema, deadline);
kevman 0:38ceb79fef03 457
kevman 0:38ceb79fef03 458 // check if we were notified to break out of dispatch
kevman 0:38ceb79fef03 459 if (q->break_requested) {
kevman 0:38ceb79fef03 460 equeue_mutex_lock(&q->queuelock);
kevman 0:38ceb79fef03 461 if (q->break_requested) {
kevman 0:38ceb79fef03 462 q->break_requested = false;
kevman 0:38ceb79fef03 463 equeue_mutex_unlock(&q->queuelock);
kevman 0:38ceb79fef03 464 return;
kevman 0:38ceb79fef03 465 }
kevman 0:38ceb79fef03 466 equeue_mutex_unlock(&q->queuelock);
kevman 0:38ceb79fef03 467 }
kevman 0:38ceb79fef03 468
kevman 0:38ceb79fef03 469 // update tick for next iteration
kevman 0:38ceb79fef03 470 tick = equeue_tick();
kevman 0:38ceb79fef03 471 }
kevman 0:38ceb79fef03 472 }
kevman 0:38ceb79fef03 473
kevman 0:38ceb79fef03 474
kevman 0:38ceb79fef03 475 // event functions
kevman 0:38ceb79fef03 476 void equeue_event_delay(void *p, int ms) {
kevman 0:38ceb79fef03 477 struct equeue_event *e = (struct equeue_event*)p - 1;
kevman 0:38ceb79fef03 478 e->target = ms;
kevman 0:38ceb79fef03 479 }
kevman 0:38ceb79fef03 480
kevman 0:38ceb79fef03 481 void equeue_event_period(void *p, int ms) {
kevman 0:38ceb79fef03 482 struct equeue_event *e = (struct equeue_event*)p - 1;
kevman 0:38ceb79fef03 483 e->period = ms;
kevman 0:38ceb79fef03 484 }
kevman 0:38ceb79fef03 485
kevman 0:38ceb79fef03 486 void equeue_event_dtor(void *p, void (*dtor)(void *)) {
kevman 0:38ceb79fef03 487 struct equeue_event *e = (struct equeue_event*)p - 1;
kevman 0:38ceb79fef03 488 e->dtor = dtor;
kevman 0:38ceb79fef03 489 }
kevman 0:38ceb79fef03 490
kevman 0:38ceb79fef03 491
kevman 0:38ceb79fef03 492 // simple callbacks
kevman 0:38ceb79fef03 493 struct ecallback {
kevman 0:38ceb79fef03 494 void (*cb)(void*);
kevman 0:38ceb79fef03 495 void *data;
kevman 0:38ceb79fef03 496 };
kevman 0:38ceb79fef03 497
kevman 0:38ceb79fef03 498 static void ecallback_dispatch(void *p) {
kevman 0:38ceb79fef03 499 struct ecallback *e = (struct ecallback*)p;
kevman 0:38ceb79fef03 500 e->cb(e->data);
kevman 0:38ceb79fef03 501 }
kevman 0:38ceb79fef03 502
kevman 0:38ceb79fef03 503 int equeue_call(equeue_t *q, void (*cb)(void*), void *data) {
kevman 0:38ceb79fef03 504 struct ecallback *e = equeue_alloc(q, sizeof(struct ecallback));
kevman 0:38ceb79fef03 505 if (!e) {
kevman 0:38ceb79fef03 506 return 0;
kevman 0:38ceb79fef03 507 }
kevman 0:38ceb79fef03 508
kevman 0:38ceb79fef03 509 e->cb = cb;
kevman 0:38ceb79fef03 510 e->data = data;
kevman 0:38ceb79fef03 511 return equeue_post(q, ecallback_dispatch, e);
kevman 0:38ceb79fef03 512 }
kevman 0:38ceb79fef03 513
kevman 0:38ceb79fef03 514 int equeue_call_in(equeue_t *q, int ms, void (*cb)(void*), void *data) {
kevman 0:38ceb79fef03 515 struct ecallback *e = equeue_alloc(q, sizeof(struct ecallback));
kevman 0:38ceb79fef03 516 if (!e) {
kevman 0:38ceb79fef03 517 return 0;
kevman 0:38ceb79fef03 518 }
kevman 0:38ceb79fef03 519
kevman 0:38ceb79fef03 520 equeue_event_delay(e, ms);
kevman 0:38ceb79fef03 521 e->cb = cb;
kevman 0:38ceb79fef03 522 e->data = data;
kevman 0:38ceb79fef03 523 return equeue_post(q, ecallback_dispatch, e);
kevman 0:38ceb79fef03 524 }
kevman 0:38ceb79fef03 525
kevman 0:38ceb79fef03 526 int equeue_call_every(equeue_t *q, int ms, void (*cb)(void*), void *data) {
kevman 0:38ceb79fef03 527 struct ecallback *e = equeue_alloc(q, sizeof(struct ecallback));
kevman 0:38ceb79fef03 528 if (!e) {
kevman 0:38ceb79fef03 529 return 0;
kevman 0:38ceb79fef03 530 }
kevman 0:38ceb79fef03 531
kevman 0:38ceb79fef03 532 equeue_event_delay(e, ms);
kevman 0:38ceb79fef03 533 equeue_event_period(e, ms);
kevman 0:38ceb79fef03 534 e->cb = cb;
kevman 0:38ceb79fef03 535 e->data = data;
kevman 0:38ceb79fef03 536 return equeue_post(q, ecallback_dispatch, e);
kevman 0:38ceb79fef03 537 }
kevman 0:38ceb79fef03 538
kevman 0:38ceb79fef03 539
kevman 0:38ceb79fef03 540 // backgrounding
kevman 0:38ceb79fef03 541 void equeue_background(equeue_t *q,
kevman 0:38ceb79fef03 542 void (*update)(void *timer, int ms), void *timer) {
kevman 0:38ceb79fef03 543 equeue_mutex_lock(&q->queuelock);
kevman 0:38ceb79fef03 544 if (q->background.update) {
kevman 0:38ceb79fef03 545 q->background.update(q->background.timer, -1);
kevman 0:38ceb79fef03 546 }
kevman 0:38ceb79fef03 547
kevman 0:38ceb79fef03 548 q->background.update = update;
kevman 0:38ceb79fef03 549 q->background.timer = timer;
kevman 0:38ceb79fef03 550
kevman 0:38ceb79fef03 551 if (q->background.update && q->queue) {
kevman 0:38ceb79fef03 552 q->background.update(q->background.timer,
kevman 0:38ceb79fef03 553 equeue_clampdiff(q->queue->target, equeue_tick()));
kevman 0:38ceb79fef03 554 }
kevman 0:38ceb79fef03 555 q->background.active = true;
kevman 0:38ceb79fef03 556 equeue_mutex_unlock(&q->queuelock);
kevman 0:38ceb79fef03 557 }
kevman 0:38ceb79fef03 558
kevman 0:38ceb79fef03 559 struct equeue_chain_context {
kevman 0:38ceb79fef03 560 equeue_t *q;
kevman 0:38ceb79fef03 561 equeue_t *target;
kevman 0:38ceb79fef03 562 int id;
kevman 0:38ceb79fef03 563 };
kevman 0:38ceb79fef03 564
kevman 0:38ceb79fef03 565 static void equeue_chain_dispatch(void *p) {
kevman 0:38ceb79fef03 566 equeue_dispatch((equeue_t *)p, 0);
kevman 0:38ceb79fef03 567 }
kevman 0:38ceb79fef03 568
kevman 0:38ceb79fef03 569 static void equeue_chain_update(void *p, int ms) {
kevman 0:38ceb79fef03 570 struct equeue_chain_context *c = (struct equeue_chain_context *)p;
kevman 0:38ceb79fef03 571 equeue_cancel(c->target, c->id);
kevman 0:38ceb79fef03 572
kevman 0:38ceb79fef03 573 if (ms >= 0) {
kevman 0:38ceb79fef03 574 c->id = equeue_call_in(c->target, ms, equeue_chain_dispatch, c->q);
kevman 0:38ceb79fef03 575 } else {
kevman 0:38ceb79fef03 576 equeue_dealloc(c->target, c);
kevman 0:38ceb79fef03 577 }
kevman 0:38ceb79fef03 578 }
kevman 0:38ceb79fef03 579
kevman 0:38ceb79fef03 580 void equeue_chain(equeue_t *q, equeue_t *target) {
kevman 0:38ceb79fef03 581 if (!target) {
kevman 0:38ceb79fef03 582 equeue_background(q, 0, 0);
kevman 0:38ceb79fef03 583 return;
kevman 0:38ceb79fef03 584 }
kevman 0:38ceb79fef03 585
kevman 0:38ceb79fef03 586 struct equeue_chain_context *c = equeue_alloc(q,
kevman 0:38ceb79fef03 587 sizeof(struct equeue_chain_context));
kevman 0:38ceb79fef03 588
kevman 0:38ceb79fef03 589 c->q = q;
kevman 0:38ceb79fef03 590 c->target = target;
kevman 0:38ceb79fef03 591 c->id = 0;
kevman 0:38ceb79fef03 592
kevman 0:38ceb79fef03 593 equeue_background(q, equeue_chain_update, c);
kevman 0:38ceb79fef03 594 }