mbed-os

Dependents:   cobaLCDJoyMotor_Thread odometry_omni_3roda_v3 odometry_omni_3roda_v1 odometry_omni_3roda_v2 ... more

Committer:
be_bryan
Date:
Mon Dec 11 17:54:04 2017 +0000
Revision:
0:b74591d5ab33
motor ++

Who changed what in which revision?

UserRevisionLine numberNew contents of line
be_bryan 0:b74591d5ab33 1 /*
be_bryan 0:b74591d5ab33 2 * Flexible event queue for dispatching events
be_bryan 0:b74591d5ab33 3 *
be_bryan 0:b74591d5ab33 4 * Copyright (c) 2016 Christopher Haster
be_bryan 0:b74591d5ab33 5 *
be_bryan 0:b74591d5ab33 6 * Licensed under the Apache License, Version 2.0 (the "License");
be_bryan 0:b74591d5ab33 7 * you may not use this file except in compliance with the License.
be_bryan 0:b74591d5ab33 8 * You may obtain a copy of the License at
be_bryan 0:b74591d5ab33 9 *
be_bryan 0:b74591d5ab33 10 * http://www.apache.org/licenses/LICENSE-2.0
be_bryan 0:b74591d5ab33 11 *
be_bryan 0:b74591d5ab33 12 * Unless required by applicable law or agreed to in writing, software
be_bryan 0:b74591d5ab33 13 * distributed under the License is distributed on an "AS IS" BASIS,
be_bryan 0:b74591d5ab33 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
be_bryan 0:b74591d5ab33 15 * See the License for the specific language governing permissions and
be_bryan 0:b74591d5ab33 16 * limitations under the License.
be_bryan 0:b74591d5ab33 17 */
be_bryan 0:b74591d5ab33 18 #include "equeue/equeue.h"
be_bryan 0:b74591d5ab33 19
be_bryan 0:b74591d5ab33 20 #include <stdlib.h>
be_bryan 0:b74591d5ab33 21 #include <string.h>
be_bryan 0:b74591d5ab33 22
be_bryan 0:b74591d5ab33 23
be_bryan 0:b74591d5ab33 24 // calculate the relative-difference between absolute times while
be_bryan 0:b74591d5ab33 25 // correctly handling overflow conditions
be_bryan 0:b74591d5ab33 26 static inline int equeue_tickdiff(unsigned a, unsigned b) {
be_bryan 0:b74591d5ab33 27 return (int)(unsigned)(a - b);
be_bryan 0:b74591d5ab33 28 }
be_bryan 0:b74591d5ab33 29
be_bryan 0:b74591d5ab33 30 // calculate the relative-difference between absolute times, but
be_bryan 0:b74591d5ab33 31 // also clamp to zero, resulting in only non-zero values.
be_bryan 0:b74591d5ab33 32 static inline int equeue_clampdiff(unsigned a, unsigned b) {
be_bryan 0:b74591d5ab33 33 int diff = equeue_tickdiff(a, b);
be_bryan 0:b74591d5ab33 34 return ~(diff >> (8*sizeof(int)-1)) & diff;
be_bryan 0:b74591d5ab33 35 }
be_bryan 0:b74591d5ab33 36
be_bryan 0:b74591d5ab33 37 // Increment the unique id in an event, hiding the event from cancel
be_bryan 0:b74591d5ab33 38 static inline void equeue_incid(equeue_t *q, struct equeue_event *e) {
be_bryan 0:b74591d5ab33 39 e->id += 1;
be_bryan 0:b74591d5ab33 40 if (!(e->id << q->npw2)) {
be_bryan 0:b74591d5ab33 41 e->id = 1;
be_bryan 0:b74591d5ab33 42 }
be_bryan 0:b74591d5ab33 43 }
be_bryan 0:b74591d5ab33 44
be_bryan 0:b74591d5ab33 45
be_bryan 0:b74591d5ab33 46 // equeue lifetime management
be_bryan 0:b74591d5ab33 47 int equeue_create(equeue_t *q, size_t size) {
be_bryan 0:b74591d5ab33 48 // dynamically allocate the specified buffer
be_bryan 0:b74591d5ab33 49 void *buffer = malloc(size);
be_bryan 0:b74591d5ab33 50 if (!buffer) {
be_bryan 0:b74591d5ab33 51 return -1;
be_bryan 0:b74591d5ab33 52 }
be_bryan 0:b74591d5ab33 53
be_bryan 0:b74591d5ab33 54 int err = equeue_create_inplace(q, size, buffer);
be_bryan 0:b74591d5ab33 55 q->allocated = buffer;
be_bryan 0:b74591d5ab33 56 return err;
be_bryan 0:b74591d5ab33 57 }
be_bryan 0:b74591d5ab33 58
be_bryan 0:b74591d5ab33 59 int equeue_create_inplace(equeue_t *q, size_t size, void *buffer) {
be_bryan 0:b74591d5ab33 60 // setup queue around provided buffer
be_bryan 0:b74591d5ab33 61 q->buffer = buffer;
be_bryan 0:b74591d5ab33 62 q->allocated = 0;
be_bryan 0:b74591d5ab33 63
be_bryan 0:b74591d5ab33 64 q->npw2 = 0;
be_bryan 0:b74591d5ab33 65 for (unsigned s = size; s; s >>= 1) {
be_bryan 0:b74591d5ab33 66 q->npw2++;
be_bryan 0:b74591d5ab33 67 }
be_bryan 0:b74591d5ab33 68
be_bryan 0:b74591d5ab33 69 q->chunks = 0;
be_bryan 0:b74591d5ab33 70 q->slab.size = size;
be_bryan 0:b74591d5ab33 71 q->slab.data = buffer;
be_bryan 0:b74591d5ab33 72
be_bryan 0:b74591d5ab33 73 q->queue = 0;
be_bryan 0:b74591d5ab33 74 q->tick = equeue_tick();
be_bryan 0:b74591d5ab33 75 q->generation = 0;
be_bryan 0:b74591d5ab33 76 q->breaks = 0;
be_bryan 0:b74591d5ab33 77
be_bryan 0:b74591d5ab33 78 q->background.active = false;
be_bryan 0:b74591d5ab33 79 q->background.update = 0;
be_bryan 0:b74591d5ab33 80 q->background.timer = 0;
be_bryan 0:b74591d5ab33 81
be_bryan 0:b74591d5ab33 82 // initialize platform resources
be_bryan 0:b74591d5ab33 83 int err;
be_bryan 0:b74591d5ab33 84 err = equeue_sema_create(&q->eventsema);
be_bryan 0:b74591d5ab33 85 if (err < 0) {
be_bryan 0:b74591d5ab33 86 return err;
be_bryan 0:b74591d5ab33 87 }
be_bryan 0:b74591d5ab33 88
be_bryan 0:b74591d5ab33 89 err = equeue_mutex_create(&q->queuelock);
be_bryan 0:b74591d5ab33 90 if (err < 0) {
be_bryan 0:b74591d5ab33 91 return err;
be_bryan 0:b74591d5ab33 92 }
be_bryan 0:b74591d5ab33 93
be_bryan 0:b74591d5ab33 94 err = equeue_mutex_create(&q->memlock);
be_bryan 0:b74591d5ab33 95 if (err < 0) {
be_bryan 0:b74591d5ab33 96 return err;
be_bryan 0:b74591d5ab33 97 }
be_bryan 0:b74591d5ab33 98
be_bryan 0:b74591d5ab33 99 return 0;
be_bryan 0:b74591d5ab33 100 }
be_bryan 0:b74591d5ab33 101
be_bryan 0:b74591d5ab33 102 void equeue_destroy(equeue_t *q) {
be_bryan 0:b74591d5ab33 103 // call destructors on pending events
be_bryan 0:b74591d5ab33 104 for (struct equeue_event *es = q->queue; es; es = es->next) {
be_bryan 0:b74591d5ab33 105 for (struct equeue_event *e = q->queue; e; e = e->sibling) {
be_bryan 0:b74591d5ab33 106 if (e->dtor) {
be_bryan 0:b74591d5ab33 107 e->dtor(e + 1);
be_bryan 0:b74591d5ab33 108 }
be_bryan 0:b74591d5ab33 109 }
be_bryan 0:b74591d5ab33 110 }
be_bryan 0:b74591d5ab33 111
be_bryan 0:b74591d5ab33 112 // notify background timer
be_bryan 0:b74591d5ab33 113 if (q->background.update) {
be_bryan 0:b74591d5ab33 114 q->background.update(q->background.timer, -1);
be_bryan 0:b74591d5ab33 115 }
be_bryan 0:b74591d5ab33 116
be_bryan 0:b74591d5ab33 117 // clean up platform resources + memory
be_bryan 0:b74591d5ab33 118 equeue_mutex_destroy(&q->memlock);
be_bryan 0:b74591d5ab33 119 equeue_mutex_destroy(&q->queuelock);
be_bryan 0:b74591d5ab33 120 equeue_sema_destroy(&q->eventsema);
be_bryan 0:b74591d5ab33 121 free(q->allocated);
be_bryan 0:b74591d5ab33 122 }
be_bryan 0:b74591d5ab33 123
be_bryan 0:b74591d5ab33 124
be_bryan 0:b74591d5ab33 125 // equeue chunk allocation functions
be_bryan 0:b74591d5ab33 126 static struct equeue_event *equeue_mem_alloc(equeue_t *q, size_t size) {
be_bryan 0:b74591d5ab33 127 // add event overhead
be_bryan 0:b74591d5ab33 128 size += sizeof(struct equeue_event);
be_bryan 0:b74591d5ab33 129 size = (size + sizeof(void*)-1) & ~(sizeof(void*)-1);
be_bryan 0:b74591d5ab33 130
be_bryan 0:b74591d5ab33 131 equeue_mutex_lock(&q->memlock);
be_bryan 0:b74591d5ab33 132
be_bryan 0:b74591d5ab33 133 // check if a good chunk is available
be_bryan 0:b74591d5ab33 134 for (struct equeue_event **p = &q->chunks; *p; p = &(*p)->next) {
be_bryan 0:b74591d5ab33 135 if ((*p)->size >= size) {
be_bryan 0:b74591d5ab33 136 struct equeue_event *e = *p;
be_bryan 0:b74591d5ab33 137 if (e->sibling) {
be_bryan 0:b74591d5ab33 138 *p = e->sibling;
be_bryan 0:b74591d5ab33 139 (*p)->next = e->next;
be_bryan 0:b74591d5ab33 140 } else {
be_bryan 0:b74591d5ab33 141 *p = e->next;
be_bryan 0:b74591d5ab33 142 }
be_bryan 0:b74591d5ab33 143
be_bryan 0:b74591d5ab33 144 equeue_mutex_unlock(&q->memlock);
be_bryan 0:b74591d5ab33 145 return e;
be_bryan 0:b74591d5ab33 146 }
be_bryan 0:b74591d5ab33 147 }
be_bryan 0:b74591d5ab33 148
be_bryan 0:b74591d5ab33 149 // otherwise allocate a new chunk out of the slab
be_bryan 0:b74591d5ab33 150 if (q->slab.size >= size) {
be_bryan 0:b74591d5ab33 151 struct equeue_event *e = (struct equeue_event *)q->slab.data;
be_bryan 0:b74591d5ab33 152 q->slab.data += size;
be_bryan 0:b74591d5ab33 153 q->slab.size -= size;
be_bryan 0:b74591d5ab33 154 e->size = size;
be_bryan 0:b74591d5ab33 155 e->id = 1;
be_bryan 0:b74591d5ab33 156
be_bryan 0:b74591d5ab33 157 equeue_mutex_unlock(&q->memlock);
be_bryan 0:b74591d5ab33 158 return e;
be_bryan 0:b74591d5ab33 159 }
be_bryan 0:b74591d5ab33 160
be_bryan 0:b74591d5ab33 161 equeue_mutex_unlock(&q->memlock);
be_bryan 0:b74591d5ab33 162 return 0;
be_bryan 0:b74591d5ab33 163 }
be_bryan 0:b74591d5ab33 164
be_bryan 0:b74591d5ab33 165 static void equeue_mem_dealloc(equeue_t *q, struct equeue_event *e) {
be_bryan 0:b74591d5ab33 166 equeue_mutex_lock(&q->memlock);
be_bryan 0:b74591d5ab33 167
be_bryan 0:b74591d5ab33 168 // stick chunk into list of chunks
be_bryan 0:b74591d5ab33 169 struct equeue_event **p = &q->chunks;
be_bryan 0:b74591d5ab33 170 while (*p && (*p)->size < e->size) {
be_bryan 0:b74591d5ab33 171 p = &(*p)->next;
be_bryan 0:b74591d5ab33 172 }
be_bryan 0:b74591d5ab33 173
be_bryan 0:b74591d5ab33 174 if (*p && (*p)->size == e->size) {
be_bryan 0:b74591d5ab33 175 e->sibling = *p;
be_bryan 0:b74591d5ab33 176 e->next = (*p)->next;
be_bryan 0:b74591d5ab33 177 } else {
be_bryan 0:b74591d5ab33 178 e->sibling = 0;
be_bryan 0:b74591d5ab33 179 e->next = *p;
be_bryan 0:b74591d5ab33 180 }
be_bryan 0:b74591d5ab33 181 *p = e;
be_bryan 0:b74591d5ab33 182
be_bryan 0:b74591d5ab33 183 equeue_mutex_unlock(&q->memlock);
be_bryan 0:b74591d5ab33 184 }
be_bryan 0:b74591d5ab33 185
be_bryan 0:b74591d5ab33 186 void *equeue_alloc(equeue_t *q, size_t size) {
be_bryan 0:b74591d5ab33 187 struct equeue_event *e = equeue_mem_alloc(q, size);
be_bryan 0:b74591d5ab33 188 if (!e) {
be_bryan 0:b74591d5ab33 189 return 0;
be_bryan 0:b74591d5ab33 190 }
be_bryan 0:b74591d5ab33 191
be_bryan 0:b74591d5ab33 192 e->target = 0;
be_bryan 0:b74591d5ab33 193 e->period = -1;
be_bryan 0:b74591d5ab33 194 e->dtor = 0;
be_bryan 0:b74591d5ab33 195
be_bryan 0:b74591d5ab33 196 return e + 1;
be_bryan 0:b74591d5ab33 197 }
be_bryan 0:b74591d5ab33 198
be_bryan 0:b74591d5ab33 199 void equeue_dealloc(equeue_t *q, void *p) {
be_bryan 0:b74591d5ab33 200 struct equeue_event *e = (struct equeue_event*)p - 1;
be_bryan 0:b74591d5ab33 201
be_bryan 0:b74591d5ab33 202 if (e->dtor) {
be_bryan 0:b74591d5ab33 203 e->dtor(e+1);
be_bryan 0:b74591d5ab33 204 }
be_bryan 0:b74591d5ab33 205
be_bryan 0:b74591d5ab33 206 equeue_mem_dealloc(q, e);
be_bryan 0:b74591d5ab33 207 }
be_bryan 0:b74591d5ab33 208
be_bryan 0:b74591d5ab33 209
be_bryan 0:b74591d5ab33 210 // equeue scheduling functions
be_bryan 0:b74591d5ab33 211 static int equeue_enqueue(equeue_t *q, struct equeue_event *e, unsigned tick) {
be_bryan 0:b74591d5ab33 212 // setup event and hash local id with buffer offset for unique id
be_bryan 0:b74591d5ab33 213 int id = (e->id << q->npw2) | ((unsigned char *)e - q->buffer);
be_bryan 0:b74591d5ab33 214 e->target = tick + equeue_clampdiff(e->target, tick);
be_bryan 0:b74591d5ab33 215 e->generation = q->generation;
be_bryan 0:b74591d5ab33 216
be_bryan 0:b74591d5ab33 217 equeue_mutex_lock(&q->queuelock);
be_bryan 0:b74591d5ab33 218
be_bryan 0:b74591d5ab33 219 // find the event slot
be_bryan 0:b74591d5ab33 220 struct equeue_event **p = &q->queue;
be_bryan 0:b74591d5ab33 221 while (*p && equeue_tickdiff((*p)->target, e->target) < 0) {
be_bryan 0:b74591d5ab33 222 p = &(*p)->next;
be_bryan 0:b74591d5ab33 223 }
be_bryan 0:b74591d5ab33 224
be_bryan 0:b74591d5ab33 225 // insert at head in slot
be_bryan 0:b74591d5ab33 226 if (*p && (*p)->target == e->target) {
be_bryan 0:b74591d5ab33 227 e->next = (*p)->next;
be_bryan 0:b74591d5ab33 228 if (e->next) {
be_bryan 0:b74591d5ab33 229 e->next->ref = &e->next;
be_bryan 0:b74591d5ab33 230 }
be_bryan 0:b74591d5ab33 231
be_bryan 0:b74591d5ab33 232 e->sibling = *p;
be_bryan 0:b74591d5ab33 233 e->sibling->ref = &e->sibling;
be_bryan 0:b74591d5ab33 234 } else {
be_bryan 0:b74591d5ab33 235 e->next = *p;
be_bryan 0:b74591d5ab33 236 if (e->next) {
be_bryan 0:b74591d5ab33 237 e->next->ref = &e->next;
be_bryan 0:b74591d5ab33 238 }
be_bryan 0:b74591d5ab33 239
be_bryan 0:b74591d5ab33 240 e->sibling = 0;
be_bryan 0:b74591d5ab33 241 }
be_bryan 0:b74591d5ab33 242
be_bryan 0:b74591d5ab33 243 *p = e;
be_bryan 0:b74591d5ab33 244 e->ref = p;
be_bryan 0:b74591d5ab33 245
be_bryan 0:b74591d5ab33 246 // notify background timer
be_bryan 0:b74591d5ab33 247 if ((q->background.update && q->background.active) &&
be_bryan 0:b74591d5ab33 248 (q->queue == e && !e->sibling)) {
be_bryan 0:b74591d5ab33 249 q->background.update(q->background.timer,
be_bryan 0:b74591d5ab33 250 equeue_clampdiff(e->target, tick));
be_bryan 0:b74591d5ab33 251 }
be_bryan 0:b74591d5ab33 252
be_bryan 0:b74591d5ab33 253 equeue_mutex_unlock(&q->queuelock);
be_bryan 0:b74591d5ab33 254
be_bryan 0:b74591d5ab33 255 return id;
be_bryan 0:b74591d5ab33 256 }
be_bryan 0:b74591d5ab33 257
be_bryan 0:b74591d5ab33 258 static struct equeue_event *equeue_unqueue(equeue_t *q, int id) {
be_bryan 0:b74591d5ab33 259 // decode event from unique id and check that the local id matches
be_bryan 0:b74591d5ab33 260 struct equeue_event *e = (struct equeue_event *)
be_bryan 0:b74591d5ab33 261 &q->buffer[id & ((1 << q->npw2)-1)];
be_bryan 0:b74591d5ab33 262
be_bryan 0:b74591d5ab33 263 equeue_mutex_lock(&q->queuelock);
be_bryan 0:b74591d5ab33 264 if (e->id != id >> q->npw2) {
be_bryan 0:b74591d5ab33 265 equeue_mutex_unlock(&q->queuelock);
be_bryan 0:b74591d5ab33 266 return 0;
be_bryan 0:b74591d5ab33 267 }
be_bryan 0:b74591d5ab33 268
be_bryan 0:b74591d5ab33 269 // clear the event and check if already in-flight
be_bryan 0:b74591d5ab33 270 e->cb = 0;
be_bryan 0:b74591d5ab33 271 e->period = -1;
be_bryan 0:b74591d5ab33 272
be_bryan 0:b74591d5ab33 273 int diff = equeue_tickdiff(e->target, q->tick);
be_bryan 0:b74591d5ab33 274 if (diff < 0 || (diff == 0 && e->generation != q->generation)) {
be_bryan 0:b74591d5ab33 275 equeue_mutex_unlock(&q->queuelock);
be_bryan 0:b74591d5ab33 276 return 0;
be_bryan 0:b74591d5ab33 277 }
be_bryan 0:b74591d5ab33 278
be_bryan 0:b74591d5ab33 279 // disentangle from queue
be_bryan 0:b74591d5ab33 280 if (e->sibling) {
be_bryan 0:b74591d5ab33 281 e->sibling->next = e->next;
be_bryan 0:b74591d5ab33 282 if (e->sibling->next) {
be_bryan 0:b74591d5ab33 283 e->sibling->next->ref = &e->sibling->next;
be_bryan 0:b74591d5ab33 284 }
be_bryan 0:b74591d5ab33 285
be_bryan 0:b74591d5ab33 286 *e->ref = e->sibling;
be_bryan 0:b74591d5ab33 287 e->sibling->ref = e->ref;
be_bryan 0:b74591d5ab33 288 } else {
be_bryan 0:b74591d5ab33 289 *e->ref = e->next;
be_bryan 0:b74591d5ab33 290 if (e->next) {
be_bryan 0:b74591d5ab33 291 e->next->ref = e->ref;
be_bryan 0:b74591d5ab33 292 }
be_bryan 0:b74591d5ab33 293 }
be_bryan 0:b74591d5ab33 294
be_bryan 0:b74591d5ab33 295 equeue_incid(q, e);
be_bryan 0:b74591d5ab33 296 equeue_mutex_unlock(&q->queuelock);
be_bryan 0:b74591d5ab33 297
be_bryan 0:b74591d5ab33 298 return e;
be_bryan 0:b74591d5ab33 299 }
be_bryan 0:b74591d5ab33 300
be_bryan 0:b74591d5ab33 301 static struct equeue_event *equeue_dequeue(equeue_t *q, unsigned target) {
be_bryan 0:b74591d5ab33 302 equeue_mutex_lock(&q->queuelock);
be_bryan 0:b74591d5ab33 303
be_bryan 0:b74591d5ab33 304 // find all expired events and mark a new generation
be_bryan 0:b74591d5ab33 305 q->generation += 1;
be_bryan 0:b74591d5ab33 306 if (equeue_tickdiff(q->tick, target) <= 0) {
be_bryan 0:b74591d5ab33 307 q->tick = target;
be_bryan 0:b74591d5ab33 308 }
be_bryan 0:b74591d5ab33 309
be_bryan 0:b74591d5ab33 310 struct equeue_event *head = q->queue;
be_bryan 0:b74591d5ab33 311 struct equeue_event **p = &head;
be_bryan 0:b74591d5ab33 312 while (*p && equeue_tickdiff((*p)->target, target) <= 0) {
be_bryan 0:b74591d5ab33 313 p = &(*p)->next;
be_bryan 0:b74591d5ab33 314 }
be_bryan 0:b74591d5ab33 315
be_bryan 0:b74591d5ab33 316 q->queue = *p;
be_bryan 0:b74591d5ab33 317 if (q->queue) {
be_bryan 0:b74591d5ab33 318 q->queue->ref = &q->queue;
be_bryan 0:b74591d5ab33 319 }
be_bryan 0:b74591d5ab33 320
be_bryan 0:b74591d5ab33 321 *p = 0;
be_bryan 0:b74591d5ab33 322
be_bryan 0:b74591d5ab33 323 equeue_mutex_unlock(&q->queuelock);
be_bryan 0:b74591d5ab33 324
be_bryan 0:b74591d5ab33 325 // reverse and flatten each slot to match insertion order
be_bryan 0:b74591d5ab33 326 struct equeue_event **tail = &head;
be_bryan 0:b74591d5ab33 327 struct equeue_event *ess = head;
be_bryan 0:b74591d5ab33 328 while (ess) {
be_bryan 0:b74591d5ab33 329 struct equeue_event *es = ess;
be_bryan 0:b74591d5ab33 330 ess = es->next;
be_bryan 0:b74591d5ab33 331
be_bryan 0:b74591d5ab33 332 struct equeue_event *prev = 0;
be_bryan 0:b74591d5ab33 333 for (struct equeue_event *e = es; e; e = e->sibling) {
be_bryan 0:b74591d5ab33 334 e->next = prev;
be_bryan 0:b74591d5ab33 335 prev = e;
be_bryan 0:b74591d5ab33 336 }
be_bryan 0:b74591d5ab33 337
be_bryan 0:b74591d5ab33 338 *tail = prev;
be_bryan 0:b74591d5ab33 339 tail = &es->next;
be_bryan 0:b74591d5ab33 340 }
be_bryan 0:b74591d5ab33 341
be_bryan 0:b74591d5ab33 342 return head;
be_bryan 0:b74591d5ab33 343 }
be_bryan 0:b74591d5ab33 344
be_bryan 0:b74591d5ab33 345 int equeue_post(equeue_t *q, void (*cb)(void*), void *p) {
be_bryan 0:b74591d5ab33 346 struct equeue_event *e = (struct equeue_event*)p - 1;
be_bryan 0:b74591d5ab33 347 unsigned tick = equeue_tick();
be_bryan 0:b74591d5ab33 348 e->cb = cb;
be_bryan 0:b74591d5ab33 349 e->target = tick + e->target;
be_bryan 0:b74591d5ab33 350
be_bryan 0:b74591d5ab33 351 int id = equeue_enqueue(q, e, tick);
be_bryan 0:b74591d5ab33 352 equeue_sema_signal(&q->eventsema);
be_bryan 0:b74591d5ab33 353 return id;
be_bryan 0:b74591d5ab33 354 }
be_bryan 0:b74591d5ab33 355
be_bryan 0:b74591d5ab33 356 void equeue_cancel(equeue_t *q, int id) {
be_bryan 0:b74591d5ab33 357 if (!id) {
be_bryan 0:b74591d5ab33 358 return;
be_bryan 0:b74591d5ab33 359 }
be_bryan 0:b74591d5ab33 360
be_bryan 0:b74591d5ab33 361 struct equeue_event *e = equeue_unqueue(q, id);
be_bryan 0:b74591d5ab33 362 if (e) {
be_bryan 0:b74591d5ab33 363 equeue_dealloc(q, e + 1);
be_bryan 0:b74591d5ab33 364 }
be_bryan 0:b74591d5ab33 365 }
be_bryan 0:b74591d5ab33 366
be_bryan 0:b74591d5ab33 367 void equeue_break(equeue_t *q) {
be_bryan 0:b74591d5ab33 368 equeue_mutex_lock(&q->queuelock);
be_bryan 0:b74591d5ab33 369 q->breaks++;
be_bryan 0:b74591d5ab33 370 equeue_mutex_unlock(&q->queuelock);
be_bryan 0:b74591d5ab33 371 equeue_sema_signal(&q->eventsema);
be_bryan 0:b74591d5ab33 372 }
be_bryan 0:b74591d5ab33 373
be_bryan 0:b74591d5ab33 374 void equeue_dispatch(equeue_t *q, int ms) {
be_bryan 0:b74591d5ab33 375 unsigned tick = equeue_tick();
be_bryan 0:b74591d5ab33 376 unsigned timeout = tick + ms;
be_bryan 0:b74591d5ab33 377 q->background.active = false;
be_bryan 0:b74591d5ab33 378
be_bryan 0:b74591d5ab33 379 while (1) {
be_bryan 0:b74591d5ab33 380 // collect all the available events and next deadline
be_bryan 0:b74591d5ab33 381 struct equeue_event *es = equeue_dequeue(q, tick);
be_bryan 0:b74591d5ab33 382
be_bryan 0:b74591d5ab33 383 // dispatch events
be_bryan 0:b74591d5ab33 384 while (es) {
be_bryan 0:b74591d5ab33 385 struct equeue_event *e = es;
be_bryan 0:b74591d5ab33 386 es = e->next;
be_bryan 0:b74591d5ab33 387
be_bryan 0:b74591d5ab33 388 // actually dispatch the callbacks
be_bryan 0:b74591d5ab33 389 void (*cb)(void *) = e->cb;
be_bryan 0:b74591d5ab33 390 if (cb) {
be_bryan 0:b74591d5ab33 391 cb(e + 1);
be_bryan 0:b74591d5ab33 392 }
be_bryan 0:b74591d5ab33 393
be_bryan 0:b74591d5ab33 394 // reenqueue periodic events or deallocate
be_bryan 0:b74591d5ab33 395 if (e->period >= 0) {
be_bryan 0:b74591d5ab33 396 e->target += e->period;
be_bryan 0:b74591d5ab33 397 equeue_enqueue(q, e, equeue_tick());
be_bryan 0:b74591d5ab33 398 } else {
be_bryan 0:b74591d5ab33 399 equeue_incid(q, e);
be_bryan 0:b74591d5ab33 400 equeue_dealloc(q, e+1);
be_bryan 0:b74591d5ab33 401 }
be_bryan 0:b74591d5ab33 402 }
be_bryan 0:b74591d5ab33 403
be_bryan 0:b74591d5ab33 404 int deadline = -1;
be_bryan 0:b74591d5ab33 405 tick = equeue_tick();
be_bryan 0:b74591d5ab33 406
be_bryan 0:b74591d5ab33 407 // check if we should stop dispatching soon
be_bryan 0:b74591d5ab33 408 if (ms >= 0) {
be_bryan 0:b74591d5ab33 409 deadline = equeue_tickdiff(timeout, tick);
be_bryan 0:b74591d5ab33 410 if (deadline <= 0) {
be_bryan 0:b74591d5ab33 411 // update background timer if necessary
be_bryan 0:b74591d5ab33 412 if (q->background.update) {
be_bryan 0:b74591d5ab33 413 equeue_mutex_lock(&q->queuelock);
be_bryan 0:b74591d5ab33 414 if (q->background.update && q->queue) {
be_bryan 0:b74591d5ab33 415 q->background.update(q->background.timer,
be_bryan 0:b74591d5ab33 416 equeue_clampdiff(q->queue->target, tick));
be_bryan 0:b74591d5ab33 417 }
be_bryan 0:b74591d5ab33 418 q->background.active = true;
be_bryan 0:b74591d5ab33 419 equeue_mutex_unlock(&q->queuelock);
be_bryan 0:b74591d5ab33 420 }
be_bryan 0:b74591d5ab33 421 return;
be_bryan 0:b74591d5ab33 422 }
be_bryan 0:b74591d5ab33 423 }
be_bryan 0:b74591d5ab33 424
be_bryan 0:b74591d5ab33 425 // find closest deadline
be_bryan 0:b74591d5ab33 426 equeue_mutex_lock(&q->queuelock);
be_bryan 0:b74591d5ab33 427 if (q->queue) {
be_bryan 0:b74591d5ab33 428 int diff = equeue_clampdiff(q->queue->target, tick);
be_bryan 0:b74591d5ab33 429 if ((unsigned)diff < (unsigned)deadline) {
be_bryan 0:b74591d5ab33 430 deadline = diff;
be_bryan 0:b74591d5ab33 431 }
be_bryan 0:b74591d5ab33 432 }
be_bryan 0:b74591d5ab33 433 equeue_mutex_unlock(&q->queuelock);
be_bryan 0:b74591d5ab33 434
be_bryan 0:b74591d5ab33 435 // wait for events
be_bryan 0:b74591d5ab33 436 equeue_sema_wait(&q->eventsema, deadline);
be_bryan 0:b74591d5ab33 437
be_bryan 0:b74591d5ab33 438 // check if we were notified to break out of dispatch
be_bryan 0:b74591d5ab33 439 if (q->breaks) {
be_bryan 0:b74591d5ab33 440 equeue_mutex_lock(&q->queuelock);
be_bryan 0:b74591d5ab33 441 if (q->breaks > 0) {
be_bryan 0:b74591d5ab33 442 q->breaks--;
be_bryan 0:b74591d5ab33 443 equeue_mutex_unlock(&q->queuelock);
be_bryan 0:b74591d5ab33 444 return;
be_bryan 0:b74591d5ab33 445 }
be_bryan 0:b74591d5ab33 446 equeue_mutex_unlock(&q->queuelock);
be_bryan 0:b74591d5ab33 447 }
be_bryan 0:b74591d5ab33 448
be_bryan 0:b74591d5ab33 449 // update tick for next iteration
be_bryan 0:b74591d5ab33 450 tick = equeue_tick();
be_bryan 0:b74591d5ab33 451 }
be_bryan 0:b74591d5ab33 452 }
be_bryan 0:b74591d5ab33 453
be_bryan 0:b74591d5ab33 454
be_bryan 0:b74591d5ab33 455 // event functions
be_bryan 0:b74591d5ab33 456 void equeue_event_delay(void *p, int ms) {
be_bryan 0:b74591d5ab33 457 struct equeue_event *e = (struct equeue_event*)p - 1;
be_bryan 0:b74591d5ab33 458 e->target = ms;
be_bryan 0:b74591d5ab33 459 }
be_bryan 0:b74591d5ab33 460
be_bryan 0:b74591d5ab33 461 void equeue_event_period(void *p, int ms) {
be_bryan 0:b74591d5ab33 462 struct equeue_event *e = (struct equeue_event*)p - 1;
be_bryan 0:b74591d5ab33 463 e->period = ms;
be_bryan 0:b74591d5ab33 464 }
be_bryan 0:b74591d5ab33 465
be_bryan 0:b74591d5ab33 466 void equeue_event_dtor(void *p, void (*dtor)(void *)) {
be_bryan 0:b74591d5ab33 467 struct equeue_event *e = (struct equeue_event*)p - 1;
be_bryan 0:b74591d5ab33 468 e->dtor = dtor;
be_bryan 0:b74591d5ab33 469 }
be_bryan 0:b74591d5ab33 470
be_bryan 0:b74591d5ab33 471
be_bryan 0:b74591d5ab33 472 // simple callbacks
be_bryan 0:b74591d5ab33 473 struct ecallback {
be_bryan 0:b74591d5ab33 474 void (*cb)(void*);
be_bryan 0:b74591d5ab33 475 void *data;
be_bryan 0:b74591d5ab33 476 };
be_bryan 0:b74591d5ab33 477
be_bryan 0:b74591d5ab33 478 static void ecallback_dispatch(void *p) {
be_bryan 0:b74591d5ab33 479 struct ecallback *e = (struct ecallback*)p;
be_bryan 0:b74591d5ab33 480 e->cb(e->data);
be_bryan 0:b74591d5ab33 481 }
be_bryan 0:b74591d5ab33 482
be_bryan 0:b74591d5ab33 483 int equeue_call(equeue_t *q, void (*cb)(void*), void *data) {
be_bryan 0:b74591d5ab33 484 struct ecallback *e = equeue_alloc(q, sizeof(struct ecallback));
be_bryan 0:b74591d5ab33 485 if (!e) {
be_bryan 0:b74591d5ab33 486 return 0;
be_bryan 0:b74591d5ab33 487 }
be_bryan 0:b74591d5ab33 488
be_bryan 0:b74591d5ab33 489 e->cb = cb;
be_bryan 0:b74591d5ab33 490 e->data = data;
be_bryan 0:b74591d5ab33 491 return equeue_post(q, ecallback_dispatch, e);
be_bryan 0:b74591d5ab33 492 }
be_bryan 0:b74591d5ab33 493
be_bryan 0:b74591d5ab33 494 int equeue_call_in(equeue_t *q, int ms, void (*cb)(void*), void *data) {
be_bryan 0:b74591d5ab33 495 struct ecallback *e = equeue_alloc(q, sizeof(struct ecallback));
be_bryan 0:b74591d5ab33 496 if (!e) {
be_bryan 0:b74591d5ab33 497 return 0;
be_bryan 0:b74591d5ab33 498 }
be_bryan 0:b74591d5ab33 499
be_bryan 0:b74591d5ab33 500 equeue_event_delay(e, ms);
be_bryan 0:b74591d5ab33 501 e->cb = cb;
be_bryan 0:b74591d5ab33 502 e->data = data;
be_bryan 0:b74591d5ab33 503 return equeue_post(q, ecallback_dispatch, e);
be_bryan 0:b74591d5ab33 504 }
be_bryan 0:b74591d5ab33 505
be_bryan 0:b74591d5ab33 506 int equeue_call_every(equeue_t *q, int ms, void (*cb)(void*), void *data) {
be_bryan 0:b74591d5ab33 507 struct ecallback *e = equeue_alloc(q, sizeof(struct ecallback));
be_bryan 0:b74591d5ab33 508 if (!e) {
be_bryan 0:b74591d5ab33 509 return 0;
be_bryan 0:b74591d5ab33 510 }
be_bryan 0:b74591d5ab33 511
be_bryan 0:b74591d5ab33 512 equeue_event_delay(e, ms);
be_bryan 0:b74591d5ab33 513 equeue_event_period(e, ms);
be_bryan 0:b74591d5ab33 514 e->cb = cb;
be_bryan 0:b74591d5ab33 515 e->data = data;
be_bryan 0:b74591d5ab33 516 return equeue_post(q, ecallback_dispatch, e);
be_bryan 0:b74591d5ab33 517 }
be_bryan 0:b74591d5ab33 518
be_bryan 0:b74591d5ab33 519
be_bryan 0:b74591d5ab33 520 // backgrounding
be_bryan 0:b74591d5ab33 521 void equeue_background(equeue_t *q,
be_bryan 0:b74591d5ab33 522 void (*update)(void *timer, int ms), void *timer) {
be_bryan 0:b74591d5ab33 523 equeue_mutex_lock(&q->queuelock);
be_bryan 0:b74591d5ab33 524 if (q->background.update) {
be_bryan 0:b74591d5ab33 525 q->background.update(q->background.timer, -1);
be_bryan 0:b74591d5ab33 526 }
be_bryan 0:b74591d5ab33 527
be_bryan 0:b74591d5ab33 528 q->background.update = update;
be_bryan 0:b74591d5ab33 529 q->background.timer = timer;
be_bryan 0:b74591d5ab33 530
be_bryan 0:b74591d5ab33 531 if (q->background.update && q->queue) {
be_bryan 0:b74591d5ab33 532 q->background.update(q->background.timer,
be_bryan 0:b74591d5ab33 533 equeue_clampdiff(q->queue->target, equeue_tick()));
be_bryan 0:b74591d5ab33 534 }
be_bryan 0:b74591d5ab33 535 q->background.active = true;
be_bryan 0:b74591d5ab33 536 equeue_mutex_unlock(&q->queuelock);
be_bryan 0:b74591d5ab33 537 }
be_bryan 0:b74591d5ab33 538
be_bryan 0:b74591d5ab33 539 struct equeue_chain_context {
be_bryan 0:b74591d5ab33 540 equeue_t *q;
be_bryan 0:b74591d5ab33 541 equeue_t *target;
be_bryan 0:b74591d5ab33 542 int id;
be_bryan 0:b74591d5ab33 543 };
be_bryan 0:b74591d5ab33 544
be_bryan 0:b74591d5ab33 545 static void equeue_chain_dispatch(void *p) {
be_bryan 0:b74591d5ab33 546 equeue_dispatch((equeue_t *)p, 0);
be_bryan 0:b74591d5ab33 547 }
be_bryan 0:b74591d5ab33 548
be_bryan 0:b74591d5ab33 549 static void equeue_chain_update(void *p, int ms) {
be_bryan 0:b74591d5ab33 550 struct equeue_chain_context *c = (struct equeue_chain_context *)p;
be_bryan 0:b74591d5ab33 551 equeue_cancel(c->target, c->id);
be_bryan 0:b74591d5ab33 552
be_bryan 0:b74591d5ab33 553 if (ms >= 0) {
be_bryan 0:b74591d5ab33 554 c->id = equeue_call_in(c->target, ms, equeue_chain_dispatch, c->q);
be_bryan 0:b74591d5ab33 555 } else {
be_bryan 0:b74591d5ab33 556 equeue_dealloc(c->target, c);
be_bryan 0:b74591d5ab33 557 }
be_bryan 0:b74591d5ab33 558 }
be_bryan 0:b74591d5ab33 559
be_bryan 0:b74591d5ab33 560 void equeue_chain(equeue_t *q, equeue_t *target) {
be_bryan 0:b74591d5ab33 561 if (!target) {
be_bryan 0:b74591d5ab33 562 equeue_background(q, 0, 0);
be_bryan 0:b74591d5ab33 563 return;
be_bryan 0:b74591d5ab33 564 }
be_bryan 0:b74591d5ab33 565
be_bryan 0:b74591d5ab33 566 struct equeue_chain_context *c = equeue_alloc(q,
be_bryan 0:b74591d5ab33 567 sizeof(struct equeue_chain_context));
be_bryan 0:b74591d5ab33 568
be_bryan 0:b74591d5ab33 569 c->q = q;
be_bryan 0:b74591d5ab33 570 c->target = target;
be_bryan 0:b74591d5ab33 571 c->id = 0;
be_bryan 0:b74591d5ab33 572
be_bryan 0:b74591d5ab33 573 equeue_background(q, equeue_chain_update, c);
be_bryan 0:b74591d5ab33 574 }