Note! This project has moved to github.com/armmbed/mbed-events
This repository has been superceded
This project has moved to mbed-events
Composable event loops combine the cheap synchronicity of event loops with the composability of preempted threads.
Two modular event queue classes are provided:
- EventLoop - for loops coupled with a c++ managed thread
- EventQueue - for manually managed event queues
The Event class takes advantage of the extensibility of FuncPtr to allow an event to be passed through APIs as a normal function.
More information on composable event loops.
EventQueue.cpp@6:31c141d3bcc7, 2016-04-20 (annotated)
- Committer:
- Christopher Haster
- Date:
- Wed Apr 20 23:07:47 2016 -0500
- Revision:
- 6:31c141d3bcc7
- Parent:
- 5:9963a617f952
- Child:
- 7:dcd589b578ca
Enable nested disable/enable irqs
Who changed what in which revision?
User | Revision | Line number | New contents of line |
---|---|---|---|
Christopher Haster |
2:11cda6bead99 | 1 | #include "EventQueue.h" |
Christopher Haster |
2:11cda6bead99 | 2 | #include "Event.h" |
Christopher Haster |
0:b1b901ae3696 | 3 | |
Christopher Haster |
0:b1b901ae3696 | 4 | |
Christopher Haster |
2:11cda6bead99 | 5 | EventQueue::EventQueue() { |
Christopher Haster |
5:9963a617f952 | 6 | _queue = 0; |
Christopher Haster |
2:11cda6bead99 | 7 | _tick = 0; |
Christopher Haster |
2:11cda6bead99 | 8 | _timer.start(); |
Christopher Haster |
2:11cda6bead99 | 9 | _ticker.attach_us(this, &EventQueue::tick, (1 << 16) * 1000); |
Christopher Haster |
2:11cda6bead99 | 10 | } |
Christopher Haster |
2:11cda6bead99 | 11 | |
Christopher Haster |
2:11cda6bead99 | 12 | unsigned EventQueue::get_tick() { |
Christopher Haster |
2:11cda6bead99 | 13 | return _tick + (unsigned)_timer.read_ms(); |
Christopher Haster |
2:11cda6bead99 | 14 | } |
Christopher Haster |
2:11cda6bead99 | 15 | |
Christopher Haster |
2:11cda6bead99 | 16 | void EventQueue::tick() { |
Christopher Haster |
2:11cda6bead99 | 17 | _timer.reset(); |
Christopher Haster |
2:11cda6bead99 | 18 | _tick += 1 << 16; |
Christopher Haster |
2:11cda6bead99 | 19 | } |
Christopher Haster |
2:11cda6bead99 | 20 | |
Christopher Haster |
2:11cda6bead99 | 21 | void EventQueue::wakeup() { |
Christopher Haster |
2:11cda6bead99 | 22 | |
Christopher Haster |
2:11cda6bead99 | 23 | } |
Christopher Haster |
0:b1b901ae3696 | 24 | |
Christopher Haster |
1:2202c19570e5 | 25 | void EventQueue::dispatch(int ms) { |
Christopher Haster |
0:b1b901ae3696 | 26 | mbed::Timeout timeout; |
Christopher Haster |
2:11cda6bead99 | 27 | struct event exit; |
Christopher Haster |
2:11cda6bead99 | 28 | |
Christopher Haster |
2:11cda6bead99 | 29 | if (ms >= 0) { |
Christopher Haster |
2:11cda6bead99 | 30 | memset(&exit, 0, sizeof exit); |
Christopher Haster |
2:11cda6bead99 | 31 | event_register(&exit, ms); |
Christopher Haster |
2:11cda6bead99 | 32 | } |
Christopher Haster |
0:b1b901ae3696 | 33 | |
Christopher Haster |
0:b1b901ae3696 | 34 | while (true) { |
Christopher Haster |
0:b1b901ae3696 | 35 | while (_queue) { |
Christopher Haster |
2:11cda6bead99 | 36 | int diff = (int)(_queue->target - get_tick()); |
Christopher Haster |
2:11cda6bead99 | 37 | if (diff > 0) { |
Christopher Haster |
2:11cda6bead99 | 38 | timeout.attach_us(this, &EventQueue::wakeup, diff * 1000); |
Christopher Haster |
2:11cda6bead99 | 39 | break; |
Christopher Haster |
2:11cda6bead99 | 40 | } |
Christopher Haster |
2:11cda6bead99 | 41 | |
Christopher Haster |
2:11cda6bead99 | 42 | struct event *volatile e = _queue; |
Christopher Haster |
0:b1b901ae3696 | 43 | _queue = _queue->next; |
Christopher Haster |
2:11cda6bead99 | 44 | e->registered = false; |
Christopher Haster |
2:11cda6bead99 | 45 | |
Christopher Haster |
2:11cda6bead99 | 46 | if (e == &exit) { |
Christopher Haster |
2:11cda6bead99 | 47 | return; |
Christopher Haster |
2:11cda6bead99 | 48 | } |
Christopher Haster |
0:b1b901ae3696 | 49 | |
Christopher Haster |
2:11cda6bead99 | 50 | e->callback(e->data); |
Christopher Haster |
2:11cda6bead99 | 51 | |
Christopher Haster |
2:11cda6bead99 | 52 | if (e->period >= 0) { |
Christopher Haster |
2:11cda6bead99 | 53 | event_register(e, e->period); |
Christopher Haster |
2:11cda6bead99 | 54 | } |
Christopher Haster |
0:b1b901ae3696 | 55 | } |
Christopher Haster |
0:b1b901ae3696 | 56 | |
Christopher Haster |
0:b1b901ae3696 | 57 | __WFI(); |
Christopher Haster |
0:b1b901ae3696 | 58 | } |
Christopher Haster |
0:b1b901ae3696 | 59 | } |
Christopher Haster |
0:b1b901ae3696 | 60 | |
Christopher Haster |
2:11cda6bead99 | 61 | void EventQueue::event_register(struct event *event, int ms) { |
Christopher Haster |
2:11cda6bead99 | 62 | if (event->registered) { |
Christopher Haster |
2:11cda6bead99 | 63 | return; |
Christopher Haster |
2:11cda6bead99 | 64 | } |
Christopher Haster |
2:11cda6bead99 | 65 | |
Christopher Haster |
2:11cda6bead99 | 66 | event->target = get_tick() + (unsigned)ms; |
Christopher Haster |
2:11cda6bead99 | 67 | event->registered = true; |
Christopher Haster |
2:11cda6bead99 | 68 | |
Christopher Haster |
6:31c141d3bcc7 | 69 | uint32_t primask = __get_PRIMASK(); |
Christopher Haster |
0:b1b901ae3696 | 70 | __disable_irq(); |
Christopher Haster |
6:31c141d3bcc7 | 71 | |
Christopher Haster |
2:11cda6bead99 | 72 | struct event *volatile *p = &_queue; |
Christopher Haster |
2:11cda6bead99 | 73 | while (*p && (*p)->target < event->target) { |
Christopher Haster |
2:11cda6bead99 | 74 | p = &(*p)->next; |
Christopher Haster |
0:b1b901ae3696 | 75 | } |
Christopher Haster |
2:11cda6bead99 | 76 | |
Christopher Haster |
2:11cda6bead99 | 77 | event->next = *p; |
Christopher Haster |
2:11cda6bead99 | 78 | *p = event; |
Christopher Haster |
6:31c141d3bcc7 | 79 | |
Christopher Haster |
6:31c141d3bcc7 | 80 | if (!primask) { |
Christopher Haster |
6:31c141d3bcc7 | 81 | __enable_irq(); |
Christopher Haster |
6:31c141d3bcc7 | 82 | } |
Christopher Haster |
0:b1b901ae3696 | 83 | } |
Christopher Haster |
0:b1b901ae3696 | 84 | |
Christopher Haster |
0:b1b901ae3696 | 85 | void EventQueue::event_unregister(struct event *event) { |
Christopher Haster |
6:31c141d3bcc7 | 86 | uint32_t primask = __get_PRIMASK(); |
Christopher Haster |
0:b1b901ae3696 | 87 | __disable_irq(); |
Christopher Haster |
6:31c141d3bcc7 | 88 | |
Christopher Haster |
1:2202c19570e5 | 89 | for (struct event *volatile *p = &_queue; *p; p = &(*p)->next) { |
Christopher Haster |
0:b1b901ae3696 | 90 | if (*p == event) { |
Christopher Haster |
0:b1b901ae3696 | 91 | *p = event->next; |
Christopher Haster |
0:b1b901ae3696 | 92 | break; |
Christopher Haster |
0:b1b901ae3696 | 93 | } |
Christopher Haster |
0:b1b901ae3696 | 94 | } |
Christopher Haster |
6:31c141d3bcc7 | 95 | |
Christopher Haster |
6:31c141d3bcc7 | 96 | if (!primask) { |
Christopher Haster |
6:31c141d3bcc7 | 97 | __enable_irq(); |
Christopher Haster |
6:31c141d3bcc7 | 98 | } |
Christopher Haster |
0:b1b901ae3696 | 99 | } |
Christopher Haster |
0:b1b901ae3696 | 100 |