Note! This project has moved to github.com/armmbed/mbed-events
This repository has been superceded
This project has moved to mbed-events
Composable event loops combine the cheap synchronicity of event loops with the composability of preempted threads.
Two modular event queue classes are provided:
- EventLoop - for loops coupled with a c++ managed thread
- EventQueue - for manually managed event queues
The Event class takes advantage of the extensibility of FuncPtr to allow an event to be passed through APIs as a normal function.
More information on composable event loops.
EventQueue.cpp@14:5abf2ccf2dbf, 2016-05-10 (annotated)
- Committer:
- Christopher Haster
- Date:
- Tue May 10 07:51:44 2016 -0500
- Revision:
- 14:5abf2ccf2dbf
- Parent:
- 10:62767e708bb6
- Child:
- 15:92d7c0b8a0f5
Move to internal memory management
Allows an event to be enqueued multiple times before being handled.
Downside is cancel is non-trivial to support.
Who changed what in which revision?
User | Revision | Line number | New contents of line |
---|---|---|---|
Christopher Haster |
2:11cda6bead99 | 1 | #include "EventQueue.h" |
Christopher Haster |
2:11cda6bead99 | 2 | #include "Event.h" |
Christopher Haster |
10:62767e708bb6 | 3 | #include "mbed.h" |
Christopher Haster |
0:b1b901ae3696 | 4 | |
Christopher Haster |
9:2b0910397844 | 5 | #ifndef EVENTS_NO_RTOS |
Christopher Haster |
9:2b0910397844 | 6 | #include "rtos.h" |
Christopher Haster |
9:2b0910397844 | 7 | #endif |
Christopher Haster |
9:2b0910397844 | 8 | |
Christopher Haster |
0:b1b901ae3696 | 9 | |
Christopher Haster |
14:5abf2ccf2dbf | 10 | |
Christopher Haster |
14:5abf2ccf2dbf | 11 | EventQueue::EventQueue(unsigned event_count, unsigned event_context) { |
Christopher Haster |
14:5abf2ccf2dbf | 12 | _event_context = sizeof(FuncPtr<void()>) + event_context; |
Christopher Haster |
14:5abf2ccf2dbf | 13 | unsigned event_size = sizeof(struct event) + _event_context; |
Christopher Haster |
14:5abf2ccf2dbf | 14 | _mem = malloc(event_count * event_size); |
Christopher Haster |
14:5abf2ccf2dbf | 15 | _free = (struct event*)_mem; |
Christopher Haster |
14:5abf2ccf2dbf | 16 | |
Christopher Haster |
14:5abf2ccf2dbf | 17 | if (_mem) { |
Christopher Haster |
14:5abf2ccf2dbf | 18 | for (unsigned i = 0; i < event_count-1; i++) { |
Christopher Haster |
14:5abf2ccf2dbf | 19 | ((struct event*)((char*)_mem + i*event_size))->next = |
Christopher Haster |
14:5abf2ccf2dbf | 20 | (struct event*)((char*)_mem + (i+1)*event_size); |
Christopher Haster |
14:5abf2ccf2dbf | 21 | } |
Christopher Haster |
14:5abf2ccf2dbf | 22 | ((struct event*)((char*)_mem + (event_count-1)*event_size))->next = 0; |
Christopher Haster |
14:5abf2ccf2dbf | 23 | } |
Christopher Haster |
14:5abf2ccf2dbf | 24 | |
Christopher Haster |
5:9963a617f952 | 25 | _queue = 0; |
Christopher Haster |
2:11cda6bead99 | 26 | _tick = 0; |
Christopher Haster |
2:11cda6bead99 | 27 | _timer.start(); |
Christopher Haster |
2:11cda6bead99 | 28 | _ticker.attach_us(this, &EventQueue::tick, (1 << 16) * 1000); |
Christopher Haster |
2:11cda6bead99 | 29 | } |
Christopher Haster |
2:11cda6bead99 | 30 | |
Christopher Haster |
14:5abf2ccf2dbf | 31 | EventQueue::~EventQueue() { |
Christopher Haster |
14:5abf2ccf2dbf | 32 | free(_mem); |
Christopher Haster |
14:5abf2ccf2dbf | 33 | } |
Christopher Haster |
14:5abf2ccf2dbf | 34 | |
Christopher Haster |
2:11cda6bead99 | 35 | unsigned EventQueue::get_tick() { |
Christopher Haster |
2:11cda6bead99 | 36 | return _tick + (unsigned)_timer.read_ms(); |
Christopher Haster |
2:11cda6bead99 | 37 | } |
Christopher Haster |
2:11cda6bead99 | 38 | |
Christopher Haster |
2:11cda6bead99 | 39 | void EventQueue::tick() { |
Christopher Haster |
2:11cda6bead99 | 40 | _timer.reset(); |
Christopher Haster |
2:11cda6bead99 | 41 | _tick += 1 << 16; |
Christopher Haster |
2:11cda6bead99 | 42 | } |
Christopher Haster |
2:11cda6bead99 | 43 | |
Christopher Haster |
2:11cda6bead99 | 44 | void EventQueue::wakeup() { |
Christopher Haster |
2:11cda6bead99 | 45 | |
Christopher Haster |
2:11cda6bead99 | 46 | } |
Christopher Haster |
0:b1b901ae3696 | 47 | |
Christopher Haster |
1:2202c19570e5 | 48 | void EventQueue::dispatch(int ms) { |
Christopher Haster |
10:62767e708bb6 | 49 | Timeout timeout; |
Christopher Haster |
2:11cda6bead99 | 50 | struct event exit; |
Christopher Haster |
2:11cda6bead99 | 51 | |
Christopher Haster |
2:11cda6bead99 | 52 | if (ms >= 0) { |
Christopher Haster |
14:5abf2ccf2dbf | 53 | trigger(&exit, ms); |
Christopher Haster |
2:11cda6bead99 | 54 | } |
Christopher Haster |
0:b1b901ae3696 | 55 | |
Christopher Haster |
0:b1b901ae3696 | 56 | while (true) { |
Christopher Haster |
0:b1b901ae3696 | 57 | while (_queue) { |
Christopher Haster |
2:11cda6bead99 | 58 | int diff = (int)(_queue->target - get_tick()); |
Christopher Haster |
2:11cda6bead99 | 59 | if (diff > 0) { |
Christopher Haster |
2:11cda6bead99 | 60 | timeout.attach_us(this, &EventQueue::wakeup, diff * 1000); |
Christopher Haster |
2:11cda6bead99 | 61 | break; |
Christopher Haster |
2:11cda6bead99 | 62 | } |
Christopher Haster |
2:11cda6bead99 | 63 | |
Christopher Haster |
2:11cda6bead99 | 64 | struct event *volatile e = _queue; |
Christopher Haster |
0:b1b901ae3696 | 65 | _queue = _queue->next; |
Christopher Haster |
2:11cda6bead99 | 66 | |
Christopher Haster |
2:11cda6bead99 | 67 | if (e == &exit) { |
Christopher Haster |
2:11cda6bead99 | 68 | return; |
Christopher Haster |
2:11cda6bead99 | 69 | } |
Christopher Haster |
0:b1b901ae3696 | 70 | |
Christopher Haster |
14:5abf2ccf2dbf | 71 | e->dispatch(reinterpret_cast<void *>(e + 1)); |
Christopher Haster |
2:11cda6bead99 | 72 | |
Christopher Haster |
2:11cda6bead99 | 73 | if (e->period >= 0) { |
Christopher Haster |
14:5abf2ccf2dbf | 74 | trigger(e, e->period); |
Christopher Haster |
14:5abf2ccf2dbf | 75 | } else { |
Christopher Haster |
14:5abf2ccf2dbf | 76 | dealloc(e); |
Christopher Haster |
2:11cda6bead99 | 77 | } |
Christopher Haster |
0:b1b901ae3696 | 78 | } |
Christopher Haster |
0:b1b901ae3696 | 79 | |
Christopher Haster |
9:2b0910397844 | 80 | #ifndef EVENTS_NO_RTOS |
Christopher Haster |
9:2b0910397844 | 81 | Thread::yield(); |
Christopher Haster |
9:2b0910397844 | 82 | #else |
Christopher Haster |
0:b1b901ae3696 | 83 | __WFI(); |
Christopher Haster |
9:2b0910397844 | 84 | #endif |
Christopher Haster |
0:b1b901ae3696 | 85 | } |
Christopher Haster |
0:b1b901ae3696 | 86 | } |
Christopher Haster |
0:b1b901ae3696 | 87 | |
Christopher Haster |
14:5abf2ccf2dbf | 88 | void EventQueue::trigger(struct event *e, int delay) { |
Christopher Haster |
14:5abf2ccf2dbf | 89 | e->target = get_tick() + (unsigned)delay; |
Christopher Haster |
2:11cda6bead99 | 90 | |
Christopher Haster |
6:31c141d3bcc7 | 91 | uint32_t primask = __get_PRIMASK(); |
Christopher Haster |
0:b1b901ae3696 | 92 | __disable_irq(); |
Christopher Haster |
6:31c141d3bcc7 | 93 | |
Christopher Haster |
2:11cda6bead99 | 94 | struct event *volatile *p = &_queue; |
Christopher Haster |
14:5abf2ccf2dbf | 95 | while (*p && (*p)->target < e->target) { |
Christopher Haster |
2:11cda6bead99 | 96 | p = &(*p)->next; |
Christopher Haster |
0:b1b901ae3696 | 97 | } |
Christopher Haster |
2:11cda6bead99 | 98 | |
Christopher Haster |
14:5abf2ccf2dbf | 99 | e->next = *p; |
Christopher Haster |
14:5abf2ccf2dbf | 100 | *p = e; |
Christopher Haster |
6:31c141d3bcc7 | 101 | |
Christopher Haster |
7:dcd589b578ca | 102 | __set_PRIMASK(primask); |
Christopher Haster |
0:b1b901ae3696 | 103 | } |
Christopher Haster |
0:b1b901ae3696 | 104 | |
Christopher Haster |
14:5abf2ccf2dbf | 105 | EventQueue::event *EventQueue::alloc(unsigned size, int ms) { |
Christopher Haster |
14:5abf2ccf2dbf | 106 | if (size > _event_context) { |
Christopher Haster |
14:5abf2ccf2dbf | 107 | return 0; |
Christopher Haster |
14:5abf2ccf2dbf | 108 | } |
Christopher Haster |
14:5abf2ccf2dbf | 109 | |
Christopher Haster |
14:5abf2ccf2dbf | 110 | struct event *e; |
Christopher Haster |
14:5abf2ccf2dbf | 111 | |
Christopher Haster |
6:31c141d3bcc7 | 112 | uint32_t primask = __get_PRIMASK(); |
Christopher Haster |
0:b1b901ae3696 | 113 | __disable_irq(); |
Christopher Haster |
6:31c141d3bcc7 | 114 | |
Christopher Haster |
14:5abf2ccf2dbf | 115 | e = _free; |
Christopher Haster |
14:5abf2ccf2dbf | 116 | if (e) { |
Christopher Haster |
14:5abf2ccf2dbf | 117 | _free = e->next; |
Christopher Haster |
0:b1b901ae3696 | 118 | } |
Christopher Haster |
6:31c141d3bcc7 | 119 | |
Christopher Haster |
7:dcd589b578ca | 120 | __set_PRIMASK(primask); |
Christopher Haster |
14:5abf2ccf2dbf | 121 | |
Christopher Haster |
14:5abf2ccf2dbf | 122 | return e; |
Christopher Haster |
0:b1b901ae3696 | 123 | } |
Christopher Haster |
0:b1b901ae3696 | 124 | |
Christopher Haster |
14:5abf2ccf2dbf | 125 | void EventQueue::dealloc(struct event *e) { |
Christopher Haster |
14:5abf2ccf2dbf | 126 | uint32_t primask = __get_PRIMASK(); |
Christopher Haster |
14:5abf2ccf2dbf | 127 | __disable_irq(); |
Christopher Haster |
14:5abf2ccf2dbf | 128 | |
Christopher Haster |
14:5abf2ccf2dbf | 129 | e->next = _free; |
Christopher Haster |
14:5abf2ccf2dbf | 130 | _free = e; |
Christopher Haster |
14:5abf2ccf2dbf | 131 | |
Christopher Haster |
14:5abf2ccf2dbf | 132 | __set_PRIMASK(primask); |
Christopher Haster |
14:5abf2ccf2dbf | 133 | |
Christopher Haster |
14:5abf2ccf2dbf | 134 | } |
Christopher Haster |
14:5abf2ccf2dbf | 135 |