Note! This project has moved to github.com/armmbed/mbed-events
This repository has been superceded
This project has moved to mbed-events
Composable event loops combine the cheap synchronicity of event loops with the composability of preempted threads.
Two modular event queue classes are provided:
- EventLoop - for loops coupled with a c++ managed thread
- EventQueue - for manually managed event queues
The Event class takes advantage of the extensibility of FuncPtr to allow an event to be passed through APIs as a normal function.
More information on composable event loops.
EventQueue.cpp@17:6d564266850e, 2016-04-18 (annotated)
- Committer:
- Christopher Haster
- Date:
- Mon Apr 18 12:51:04 2016 -0500
- Revision:
- 17:6d564266850e
- Parent:
- 16:ff5d48fcce1b
- Child:
- 20:2f9d9c53a5af
Add convenience trigger functions
Who changed what in which revision?
User | Revision | Line number | New contents of line |
---|---|---|---|
Christopher Haster |
2:11cda6bead99 | 1 | #include "EventQueue.h" |
Christopher Haster |
2:11cda6bead99 | 2 | #include "Event.h" |
Christopher Haster |
10:62767e708bb6 | 3 | #include "mbed.h" |
Christopher Haster |
9:2b0910397844 | 4 | #include "rtos.h" |
Christopher Haster |
9:2b0910397844 | 5 | |
Christopher Haster |
0:b1b901ae3696 | 6 | |
Christopher Haster |
15:92d7c0b8a0f5 | 7 | // Platform specific definitions |
Christopher Haster |
15:92d7c0b8a0f5 | 8 | static inline unsigned irq_disable() { |
Christopher Haster |
15:92d7c0b8a0f5 | 9 | unsigned primask = __get_PRIMASK(); |
Christopher Haster |
15:92d7c0b8a0f5 | 10 | __disable_irq(); |
Christopher Haster |
15:92d7c0b8a0f5 | 11 | return primask; |
Christopher Haster |
15:92d7c0b8a0f5 | 12 | } |
Christopher Haster |
14:5abf2ccf2dbf | 13 | |
Christopher Haster |
15:92d7c0b8a0f5 | 14 | static inline void irq_enable(unsigned primask) { |
Christopher Haster |
15:92d7c0b8a0f5 | 15 | __set_PRIMASK(primask); |
Christopher Haster |
15:92d7c0b8a0f5 | 16 | } |
Christopher Haster |
15:92d7c0b8a0f5 | 17 | |
Christopher Haster |
15:92d7c0b8a0f5 | 18 | |
Christopher Haster |
15:92d7c0b8a0f5 | 19 | // Event queue definitions |
Christopher Haster |
14:5abf2ccf2dbf | 20 | EventQueue::EventQueue(unsigned event_count, unsigned event_context) { |
Christopher Haster |
14:5abf2ccf2dbf | 21 | _event_context = sizeof(FuncPtr<void()>) + event_context; |
Christopher Haster |
14:5abf2ccf2dbf | 22 | unsigned event_size = sizeof(struct event) + _event_context; |
Christopher Haster |
14:5abf2ccf2dbf | 23 | _mem = malloc(event_count * event_size); |
Christopher Haster |
14:5abf2ccf2dbf | 24 | _free = (struct event*)_mem; |
Christopher Haster |
14:5abf2ccf2dbf | 25 | |
Christopher Haster |
14:5abf2ccf2dbf | 26 | if (_mem) { |
Christopher Haster |
14:5abf2ccf2dbf | 27 | for (unsigned i = 0; i < event_count-1; i++) { |
Christopher Haster |
14:5abf2ccf2dbf | 28 | ((struct event*)((char*)_mem + i*event_size))->next = |
Christopher Haster |
14:5abf2ccf2dbf | 29 | (struct event*)((char*)_mem + (i+1)*event_size); |
Christopher Haster |
14:5abf2ccf2dbf | 30 | } |
Christopher Haster |
14:5abf2ccf2dbf | 31 | ((struct event*)((char*)_mem + (event_count-1)*event_size))->next = 0; |
Christopher Haster |
14:5abf2ccf2dbf | 32 | } |
Christopher Haster |
14:5abf2ccf2dbf | 33 | |
Christopher Haster |
5:9963a617f952 | 34 | _queue = 0; |
Christopher Haster |
2:11cda6bead99 | 35 | _tick = 0; |
Christopher Haster |
2:11cda6bead99 | 36 | _timer.start(); |
Christopher Haster |
2:11cda6bead99 | 37 | _ticker.attach_us(this, &EventQueue::tick, (1 << 16) * 1000); |
Christopher Haster |
2:11cda6bead99 | 38 | } |
Christopher Haster |
2:11cda6bead99 | 39 | |
Christopher Haster |
14:5abf2ccf2dbf | 40 | EventQueue::~EventQueue() { |
Christopher Haster |
14:5abf2ccf2dbf | 41 | free(_mem); |
Christopher Haster |
14:5abf2ccf2dbf | 42 | } |
Christopher Haster |
14:5abf2ccf2dbf | 43 | |
Christopher Haster |
2:11cda6bead99 | 44 | unsigned EventQueue::get_tick() { |
Christopher Haster |
2:11cda6bead99 | 45 | return _tick + (unsigned)_timer.read_ms(); |
Christopher Haster |
2:11cda6bead99 | 46 | } |
Christopher Haster |
2:11cda6bead99 | 47 | |
Christopher Haster |
16:ff5d48fcce1b | 48 | bool EventQueue::past_tick(unsigned tick) { |
Christopher Haster |
16:ff5d48fcce1b | 49 | return static_cast<int>(tick - get_tick()) <= 0; |
Christopher Haster |
16:ff5d48fcce1b | 50 | } |
Christopher Haster |
16:ff5d48fcce1b | 51 | |
Christopher Haster |
2:11cda6bead99 | 52 | void EventQueue::tick() { |
Christopher Haster |
2:11cda6bead99 | 53 | _timer.reset(); |
Christopher Haster |
2:11cda6bead99 | 54 | _tick += 1 << 16; |
Christopher Haster |
2:11cda6bead99 | 55 | } |
Christopher Haster |
2:11cda6bead99 | 56 | |
Christopher Haster |
1:2202c19570e5 | 57 | void EventQueue::dispatch(int ms) { |
Christopher Haster |
16:ff5d48fcce1b | 58 | unsigned target = get_tick() + (unsigned)ms; |
Christopher Haster |
0:b1b901ae3696 | 59 | |
Christopher Haster |
0:b1b901ae3696 | 60 | while (true) { |
Christopher Haster |
0:b1b901ae3696 | 61 | while (_queue) { |
Christopher Haster |
16:ff5d48fcce1b | 62 | if (!past_tick(_queue->target)) { |
Christopher Haster |
2:11cda6bead99 | 63 | break; |
Christopher Haster |
2:11cda6bead99 | 64 | } |
Christopher Haster |
2:11cda6bead99 | 65 | |
Christopher Haster |
16:ff5d48fcce1b | 66 | unsigned primask = irq_disable(); |
Christopher Haster |
2:11cda6bead99 | 67 | struct event *volatile e = _queue; |
Christopher Haster |
0:b1b901ae3696 | 68 | _queue = _queue->next; |
Christopher Haster |
16:ff5d48fcce1b | 69 | irq_enable(primask); |
Christopher Haster |
2:11cda6bead99 | 70 | |
Christopher Haster |
14:5abf2ccf2dbf | 71 | e->dispatch(reinterpret_cast<void *>(e + 1)); |
Christopher Haster |
2:11cda6bead99 | 72 | |
Christopher Haster |
2:11cda6bead99 | 73 | if (e->period >= 0) { |
Christopher Haster |
17:6d564266850e | 74 | event_trigger(e, e->period); |
Christopher Haster |
14:5abf2ccf2dbf | 75 | } else { |
Christopher Haster |
17:6d564266850e | 76 | event_dealloc(e); |
Christopher Haster |
2:11cda6bead99 | 77 | } |
Christopher Haster |
0:b1b901ae3696 | 78 | } |
Christopher Haster |
0:b1b901ae3696 | 79 | |
Christopher Haster |
16:ff5d48fcce1b | 80 | if (ms >= 0 && past_tick(target)) { |
Christopher Haster |
16:ff5d48fcce1b | 81 | return; |
Christopher Haster |
15:92d7c0b8a0f5 | 82 | } |
Christopher Haster |
15:92d7c0b8a0f5 | 83 | |
Christopher Haster |
16:ff5d48fcce1b | 84 | osStatus status = Thread::yield(); |
Christopher Haster |
16:ff5d48fcce1b | 85 | if (status != osOK) { |
Christopher Haster |
16:ff5d48fcce1b | 86 | return; |
Christopher Haster |
16:ff5d48fcce1b | 87 | } |
Christopher Haster |
0:b1b901ae3696 | 88 | } |
Christopher Haster |
0:b1b901ae3696 | 89 | } |
Christopher Haster |
0:b1b901ae3696 | 90 | |
Christopher Haster |
17:6d564266850e | 91 | void EventQueue::event_trigger(struct event *e, int delay) { |
Christopher Haster |
14:5abf2ccf2dbf | 92 | e->target = get_tick() + (unsigned)delay; |
Christopher Haster |
2:11cda6bead99 | 93 | |
Christopher Haster |
15:92d7c0b8a0f5 | 94 | unsigned primask = irq_disable(); |
Christopher Haster |
2:11cda6bead99 | 95 | struct event *volatile *p = &_queue; |
Christopher Haster |
14:5abf2ccf2dbf | 96 | while (*p && (*p)->target < e->target) { |
Christopher Haster |
2:11cda6bead99 | 97 | p = &(*p)->next; |
Christopher Haster |
0:b1b901ae3696 | 98 | } |
Christopher Haster |
2:11cda6bead99 | 99 | |
Christopher Haster |
14:5abf2ccf2dbf | 100 | e->next = *p; |
Christopher Haster |
14:5abf2ccf2dbf | 101 | *p = e; |
Christopher Haster |
15:92d7c0b8a0f5 | 102 | irq_enable(primask); |
Christopher Haster |
0:b1b901ae3696 | 103 | } |
Christopher Haster |
0:b1b901ae3696 | 104 | |
Christopher Haster |
17:6d564266850e | 105 | EventQueue::event *EventQueue::event_alloc(unsigned size, int ms) { |
Christopher Haster |
14:5abf2ccf2dbf | 106 | if (size > _event_context) { |
Christopher Haster |
14:5abf2ccf2dbf | 107 | return 0; |
Christopher Haster |
14:5abf2ccf2dbf | 108 | } |
Christopher Haster |
14:5abf2ccf2dbf | 109 | |
Christopher Haster |
16:ff5d48fcce1b | 110 | unsigned target = get_tick() + (unsigned)ms; |
Christopher Haster |
14:5abf2ccf2dbf | 111 | struct event *e; |
Christopher Haster |
14:5abf2ccf2dbf | 112 | |
Christopher Haster |
16:ff5d48fcce1b | 113 | while (true) { |
Christopher Haster |
16:ff5d48fcce1b | 114 | if (_free) { |
Christopher Haster |
16:ff5d48fcce1b | 115 | unsigned primask = irq_disable(); |
Christopher Haster |
16:ff5d48fcce1b | 116 | if (_free) { |
Christopher Haster |
16:ff5d48fcce1b | 117 | e = _free; |
Christopher Haster |
16:ff5d48fcce1b | 118 | _free = _free->next; |
Christopher Haster |
16:ff5d48fcce1b | 119 | } |
Christopher Haster |
16:ff5d48fcce1b | 120 | irq_enable(primask); |
Christopher Haster |
16:ff5d48fcce1b | 121 | } |
Christopher Haster |
16:ff5d48fcce1b | 122 | |
Christopher Haster |
16:ff5d48fcce1b | 123 | if (e || (ms >= 0 && past_tick(target))) { |
Christopher Haster |
16:ff5d48fcce1b | 124 | return e; |
Christopher Haster |
16:ff5d48fcce1b | 125 | } |
Christopher Haster |
16:ff5d48fcce1b | 126 | |
Christopher Haster |
16:ff5d48fcce1b | 127 | osStatus status = Thread::yield(); |
Christopher Haster |
16:ff5d48fcce1b | 128 | if (status != osOK) { |
Christopher Haster |
16:ff5d48fcce1b | 129 | return e; |
Christopher Haster |
16:ff5d48fcce1b | 130 | } |
Christopher Haster |
0:b1b901ae3696 | 131 | } |
Christopher Haster |
0:b1b901ae3696 | 132 | } |
Christopher Haster |
0:b1b901ae3696 | 133 | |
Christopher Haster |
17:6d564266850e | 134 | void EventQueue::event_dealloc(struct event *e) { |
Christopher Haster |
15:92d7c0b8a0f5 | 135 | unsigned primask = irq_disable(); |
Christopher Haster |
14:5abf2ccf2dbf | 136 | e->next = _free; |
Christopher Haster |
14:5abf2ccf2dbf | 137 | _free = e; |
Christopher Haster |
15:92d7c0b8a0f5 | 138 | irq_enable(primask); |
Christopher Haster |
14:5abf2ccf2dbf | 139 | } |
Christopher Haster |
14:5abf2ccf2dbf | 140 |