Note! This project has moved to github.com/armmbed/mbed-events
This repository has been superceded
This project has moved to mbed-events
Composable event loops combine the cheap synchronicity of event loops with the composability of preempted threads.
Two modular event queue classes are provided:
- EventLoop - for loops coupled with a c++ managed thread
- EventQueue - for manually managed event queues
The Event class takes advantage of the extensibility of FuncPtr to allow an event to be passed through APIs as a normal function.
More information on composable event loops.
EventQueue.cpp@15:92d7c0b8a0f5, 2016-05-10 (annotated)
- Committer:
- Christopher Haster
- Date:
- Tue May 10 09:23:27 2016 -0500
- Revision:
- 15:92d7c0b8a0f5
- Parent:
- 14:5abf2ccf2dbf
- Child:
- 16:ff5d48fcce1b
Abstracted out irq primitives
Who changed what in which revision?
User | Revision | Line number | New contents of line |
---|---|---|---|
Christopher Haster |
2:11cda6bead99 | 1 | #include "EventQueue.h" |
Christopher Haster |
2:11cda6bead99 | 2 | #include "Event.h" |
Christopher Haster |
10:62767e708bb6 | 3 | #include "mbed.h" |
Christopher Haster |
0:b1b901ae3696 | 4 | |
Christopher Haster |
9:2b0910397844 | 5 | #ifndef EVENTS_NO_RTOS |
Christopher Haster |
9:2b0910397844 | 6 | #include "rtos.h" |
Christopher Haster |
9:2b0910397844 | 7 | #endif |
Christopher Haster |
9:2b0910397844 | 8 | |
Christopher Haster |
0:b1b901ae3696 | 9 | |
Christopher Haster |
15:92d7c0b8a0f5 | 10 | // Platform specific definitions |
Christopher Haster |
15:92d7c0b8a0f5 | 11 | static inline unsigned irq_disable() { |
Christopher Haster |
15:92d7c0b8a0f5 | 12 | unsigned primask = __get_PRIMASK(); |
Christopher Haster |
15:92d7c0b8a0f5 | 13 | __disable_irq(); |
Christopher Haster |
15:92d7c0b8a0f5 | 14 | return primask; |
Christopher Haster |
15:92d7c0b8a0f5 | 15 | } |
Christopher Haster |
14:5abf2ccf2dbf | 16 | |
Christopher Haster |
15:92d7c0b8a0f5 | 17 | static inline void irq_enable(unsigned primask) { |
Christopher Haster |
15:92d7c0b8a0f5 | 18 | __set_PRIMASK(primask); |
Christopher Haster |
15:92d7c0b8a0f5 | 19 | } |
Christopher Haster |
15:92d7c0b8a0f5 | 20 | |
Christopher Haster |
15:92d7c0b8a0f5 | 21 | static inline void irq_wfi() { |
Christopher Haster |
15:92d7c0b8a0f5 | 22 | #ifndef EVENTS_NO_RTOS |
Christopher Haster |
15:92d7c0b8a0f5 | 23 | Thread::yield(); |
Christopher Haster |
15:92d7c0b8a0f5 | 24 | #else |
Christopher Haster |
15:92d7c0b8a0f5 | 25 | __WFI(); |
Christopher Haster |
15:92d7c0b8a0f5 | 26 | #endif |
Christopher Haster |
15:92d7c0b8a0f5 | 27 | } |
Christopher Haster |
15:92d7c0b8a0f5 | 28 | |
Christopher Haster |
15:92d7c0b8a0f5 | 29 | |
Christopher Haster |
15:92d7c0b8a0f5 | 30 | // Event queue definitions |
Christopher Haster |
14:5abf2ccf2dbf | 31 | EventQueue::EventQueue(unsigned event_count, unsigned event_context) { |
Christopher Haster |
14:5abf2ccf2dbf | 32 | _event_context = sizeof(FuncPtr<void()>) + event_context; |
Christopher Haster |
14:5abf2ccf2dbf | 33 | unsigned event_size = sizeof(struct event) + _event_context; |
Christopher Haster |
14:5abf2ccf2dbf | 34 | _mem = malloc(event_count * event_size); |
Christopher Haster |
14:5abf2ccf2dbf | 35 | _free = (struct event*)_mem; |
Christopher Haster |
14:5abf2ccf2dbf | 36 | |
Christopher Haster |
14:5abf2ccf2dbf | 37 | if (_mem) { |
Christopher Haster |
14:5abf2ccf2dbf | 38 | for (unsigned i = 0; i < event_count-1; i++) { |
Christopher Haster |
14:5abf2ccf2dbf | 39 | ((struct event*)((char*)_mem + i*event_size))->next = |
Christopher Haster |
14:5abf2ccf2dbf | 40 | (struct event*)((char*)_mem + (i+1)*event_size); |
Christopher Haster |
14:5abf2ccf2dbf | 41 | } |
Christopher Haster |
14:5abf2ccf2dbf | 42 | ((struct event*)((char*)_mem + (event_count-1)*event_size))->next = 0; |
Christopher Haster |
14:5abf2ccf2dbf | 43 | } |
Christopher Haster |
14:5abf2ccf2dbf | 44 | |
Christopher Haster |
5:9963a617f952 | 45 | _queue = 0; |
Christopher Haster |
2:11cda6bead99 | 46 | _tick = 0; |
Christopher Haster |
2:11cda6bead99 | 47 | _timer.start(); |
Christopher Haster |
2:11cda6bead99 | 48 | _ticker.attach_us(this, &EventQueue::tick, (1 << 16) * 1000); |
Christopher Haster |
2:11cda6bead99 | 49 | } |
Christopher Haster |
2:11cda6bead99 | 50 | |
Christopher Haster |
14:5abf2ccf2dbf | 51 | EventQueue::~EventQueue() { |
Christopher Haster |
14:5abf2ccf2dbf | 52 | free(_mem); |
Christopher Haster |
14:5abf2ccf2dbf | 53 | } |
Christopher Haster |
14:5abf2ccf2dbf | 54 | |
Christopher Haster |
2:11cda6bead99 | 55 | unsigned EventQueue::get_tick() { |
Christopher Haster |
2:11cda6bead99 | 56 | return _tick + (unsigned)_timer.read_ms(); |
Christopher Haster |
2:11cda6bead99 | 57 | } |
Christopher Haster |
2:11cda6bead99 | 58 | |
Christopher Haster |
2:11cda6bead99 | 59 | void EventQueue::tick() { |
Christopher Haster |
2:11cda6bead99 | 60 | _timer.reset(); |
Christopher Haster |
2:11cda6bead99 | 61 | _tick += 1 << 16; |
Christopher Haster |
2:11cda6bead99 | 62 | } |
Christopher Haster |
2:11cda6bead99 | 63 | |
Christopher Haster |
2:11cda6bead99 | 64 | void EventQueue::wakeup() { |
Christopher Haster |
2:11cda6bead99 | 65 | |
Christopher Haster |
2:11cda6bead99 | 66 | } |
Christopher Haster |
0:b1b901ae3696 | 67 | |
Christopher Haster |
15:92d7c0b8a0f5 | 68 | void EventQueue::exit(volatile bool *exit) { |
Christopher Haster |
15:92d7c0b8a0f5 | 69 | *exit = true; |
Christopher Haster |
15:92d7c0b8a0f5 | 70 | } |
Christopher Haster |
15:92d7c0b8a0f5 | 71 | |
Christopher Haster |
1:2202c19570e5 | 72 | void EventQueue::dispatch(int ms) { |
Christopher Haster |
10:62767e708bb6 | 73 | Timeout timeout; |
Christopher Haster |
15:92d7c0b8a0f5 | 74 | volatile bool exit = false; |
Christopher Haster |
2:11cda6bead99 | 75 | |
Christopher Haster |
2:11cda6bead99 | 76 | if (ms >= 0) { |
Christopher Haster |
15:92d7c0b8a0f5 | 77 | Event<void(volatile bool*)> e(this, this, &EventQueue::exit); |
Christopher Haster |
15:92d7c0b8a0f5 | 78 | e.delay(ms); |
Christopher Haster |
15:92d7c0b8a0f5 | 79 | e.trigger(&exit); |
Christopher Haster |
2:11cda6bead99 | 80 | } |
Christopher Haster |
0:b1b901ae3696 | 81 | |
Christopher Haster |
0:b1b901ae3696 | 82 | while (true) { |
Christopher Haster |
0:b1b901ae3696 | 83 | while (_queue) { |
Christopher Haster |
2:11cda6bead99 | 84 | int diff = (int)(_queue->target - get_tick()); |
Christopher Haster |
2:11cda6bead99 | 85 | if (diff > 0) { |
Christopher Haster |
2:11cda6bead99 | 86 | timeout.attach_us(this, &EventQueue::wakeup, diff * 1000); |
Christopher Haster |
2:11cda6bead99 | 87 | break; |
Christopher Haster |
2:11cda6bead99 | 88 | } |
Christopher Haster |
2:11cda6bead99 | 89 | |
Christopher Haster |
2:11cda6bead99 | 90 | struct event *volatile e = _queue; |
Christopher Haster |
0:b1b901ae3696 | 91 | _queue = _queue->next; |
Christopher Haster |
2:11cda6bead99 | 92 | |
Christopher Haster |
14:5abf2ccf2dbf | 93 | e->dispatch(reinterpret_cast<void *>(e + 1)); |
Christopher Haster |
2:11cda6bead99 | 94 | |
Christopher Haster |
2:11cda6bead99 | 95 | if (e->period >= 0) { |
Christopher Haster |
14:5abf2ccf2dbf | 96 | trigger(e, e->period); |
Christopher Haster |
14:5abf2ccf2dbf | 97 | } else { |
Christopher Haster |
14:5abf2ccf2dbf | 98 | dealloc(e); |
Christopher Haster |
2:11cda6bead99 | 99 | } |
Christopher Haster |
0:b1b901ae3696 | 100 | } |
Christopher Haster |
0:b1b901ae3696 | 101 | |
Christopher Haster |
15:92d7c0b8a0f5 | 102 | if (exit) { |
Christopher Haster |
15:92d7c0b8a0f5 | 103 | break; |
Christopher Haster |
15:92d7c0b8a0f5 | 104 | } |
Christopher Haster |
15:92d7c0b8a0f5 | 105 | |
Christopher Haster |
15:92d7c0b8a0f5 | 106 | irq_wfi(); |
Christopher Haster |
0:b1b901ae3696 | 107 | } |
Christopher Haster |
0:b1b901ae3696 | 108 | } |
Christopher Haster |
0:b1b901ae3696 | 109 | |
Christopher Haster |
14:5abf2ccf2dbf | 110 | void EventQueue::trigger(struct event *e, int delay) { |
Christopher Haster |
14:5abf2ccf2dbf | 111 | e->target = get_tick() + (unsigned)delay; |
Christopher Haster |
2:11cda6bead99 | 112 | |
Christopher Haster |
15:92d7c0b8a0f5 | 113 | unsigned primask = irq_disable(); |
Christopher Haster |
2:11cda6bead99 | 114 | struct event *volatile *p = &_queue; |
Christopher Haster |
14:5abf2ccf2dbf | 115 | while (*p && (*p)->target < e->target) { |
Christopher Haster |
2:11cda6bead99 | 116 | p = &(*p)->next; |
Christopher Haster |
0:b1b901ae3696 | 117 | } |
Christopher Haster |
2:11cda6bead99 | 118 | |
Christopher Haster |
14:5abf2ccf2dbf | 119 | e->next = *p; |
Christopher Haster |
14:5abf2ccf2dbf | 120 | *p = e; |
Christopher Haster |
15:92d7c0b8a0f5 | 121 | irq_enable(primask); |
Christopher Haster |
0:b1b901ae3696 | 122 | } |
Christopher Haster |
0:b1b901ae3696 | 123 | |
Christopher Haster |
14:5abf2ccf2dbf | 124 | EventQueue::event *EventQueue::alloc(unsigned size, int ms) { |
Christopher Haster |
14:5abf2ccf2dbf | 125 | if (size > _event_context) { |
Christopher Haster |
14:5abf2ccf2dbf | 126 | return 0; |
Christopher Haster |
14:5abf2ccf2dbf | 127 | } |
Christopher Haster |
14:5abf2ccf2dbf | 128 | |
Christopher Haster |
14:5abf2ccf2dbf | 129 | struct event *e; |
Christopher Haster |
14:5abf2ccf2dbf | 130 | |
Christopher Haster |
15:92d7c0b8a0f5 | 131 | unsigned primask = irq_disable(); |
Christopher Haster |
14:5abf2ccf2dbf | 132 | e = _free; |
Christopher Haster |
14:5abf2ccf2dbf | 133 | if (e) { |
Christopher Haster |
14:5abf2ccf2dbf | 134 | _free = e->next; |
Christopher Haster |
0:b1b901ae3696 | 135 | } |
Christopher Haster |
15:92d7c0b8a0f5 | 136 | irq_enable(primask); |
Christopher Haster |
14:5abf2ccf2dbf | 137 | |
Christopher Haster |
14:5abf2ccf2dbf | 138 | return e; |
Christopher Haster |
0:b1b901ae3696 | 139 | } |
Christopher Haster |
0:b1b901ae3696 | 140 | |
Christopher Haster |
14:5abf2ccf2dbf | 141 | void EventQueue::dealloc(struct event *e) { |
Christopher Haster |
15:92d7c0b8a0f5 | 142 | unsigned primask = irq_disable(); |
Christopher Haster |
14:5abf2ccf2dbf | 143 | e->next = _free; |
Christopher Haster |
14:5abf2ccf2dbf | 144 | _free = e; |
Christopher Haster |
15:92d7c0b8a0f5 | 145 | irq_enable(primask); |
Christopher Haster |
14:5abf2ccf2dbf | 146 | } |
Christopher Haster |
14:5abf2ccf2dbf | 147 |