Committer:
kevman
Date:
Wed Mar 13 11:03:24 2019 +0000
Revision:
2:7aab896b1a3b
Parent:
0:38ceb79fef03
2019-03-13

Who changed what in which revision?

UserRevisionLine numberNew contents of line
kevman 0:38ceb79fef03 1 /*
kevman 0:38ceb79fef03 2 * Copyright (c) 2014-2015 ARM Limited. All rights reserved.
kevman 0:38ceb79fef03 3 * SPDX-License-Identifier: Apache-2.0
kevman 0:38ceb79fef03 4 * Licensed under the Apache License, Version 2.0 (the License); you may
kevman 0:38ceb79fef03 5 * not use this file except in compliance with the License.
kevman 0:38ceb79fef03 6 * You may obtain a copy of the License at
kevman 0:38ceb79fef03 7 *
kevman 0:38ceb79fef03 8 * http://www.apache.org/licenses/LICENSE-2.0
kevman 0:38ceb79fef03 9 *
kevman 0:38ceb79fef03 10 * Unless required by applicable law or agreed to in writing, software
kevman 0:38ceb79fef03 11 * distributed under the License is distributed on an AS IS BASIS, WITHOUT
kevman 0:38ceb79fef03 12 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
kevman 0:38ceb79fef03 13 * See the License for the specific language governing permissions and
kevman 0:38ceb79fef03 14 * limitations under the License.
kevman 0:38ceb79fef03 15 */
kevman 0:38ceb79fef03 16 #include <string.h>
kevman 0:38ceb79fef03 17 #include "ns_types.h"
kevman 0:38ceb79fef03 18 #include "ns_list.h"
kevman 0:38ceb79fef03 19 #include "eventOS_event.h"
kevman 0:38ceb79fef03 20 #include "eventOS_scheduler.h"
kevman 0:38ceb79fef03 21 #include "timer_sys.h"
kevman 0:38ceb79fef03 22 #include "nsdynmemLIB.h"
kevman 0:38ceb79fef03 23 #include "ns_timer.h"
kevman 0:38ceb79fef03 24 #include "event.h"
kevman 0:38ceb79fef03 25 #include "platform/arm_hal_interrupt.h"
kevman 0:38ceb79fef03 26
kevman 0:38ceb79fef03 27
kevman 0:38ceb79fef03 28 typedef struct arm_core_tasklet {
kevman 0:38ceb79fef03 29 int8_t id; /**< Event handler Tasklet ID */
kevman 0:38ceb79fef03 30 void (*func_ptr)(arm_event_s *);
kevman 0:38ceb79fef03 31 ns_list_link_t link;
kevman 0:38ceb79fef03 32 } arm_core_tasklet_t;
kevman 0:38ceb79fef03 33
kevman 0:38ceb79fef03 34 static NS_LIST_DEFINE(arm_core_tasklet_list, arm_core_tasklet_t, link);
kevman 0:38ceb79fef03 35 static NS_LIST_DEFINE(event_queue_active, arm_event_storage_t, link);
kevman 0:38ceb79fef03 36 static NS_LIST_DEFINE(free_event_entry, arm_event_storage_t, link);
kevman 0:38ceb79fef03 37
kevman 0:38ceb79fef03 38 // Statically allocate initial pool of events.
kevman 0:38ceb79fef03 39 #define STARTUP_EVENT_POOL_SIZE 10
kevman 0:38ceb79fef03 40 static arm_event_storage_t startup_event_pool[STARTUP_EVENT_POOL_SIZE];
kevman 0:38ceb79fef03 41
kevman 0:38ceb79fef03 42 /** Curr_tasklet tell to core and platform which task_let is active, Core Update this automatic when switch Tasklet. */
kevman 0:38ceb79fef03 43 int8_t curr_tasklet = 0;
kevman 0:38ceb79fef03 44
kevman 0:38ceb79fef03 45
kevman 0:38ceb79fef03 46 static arm_core_tasklet_t *tasklet_dynamically_allocate(void);
kevman 0:38ceb79fef03 47 static arm_event_storage_t *event_dynamically_allocate(void);
kevman 0:38ceb79fef03 48 static arm_event_storage_t *event_core_get(void);
kevman 0:38ceb79fef03 49 static void event_core_write(arm_event_storage_t *event);
kevman 0:38ceb79fef03 50
kevman 0:38ceb79fef03 51 static arm_core_tasklet_t *event_tasklet_handler_get(uint8_t tasklet_id)
kevman 0:38ceb79fef03 52 {
kevman 0:38ceb79fef03 53 ns_list_foreach(arm_core_tasklet_t, cur, &arm_core_tasklet_list) {
kevman 0:38ceb79fef03 54 if (cur->id == tasklet_id) {
kevman 0:38ceb79fef03 55 return cur;
kevman 0:38ceb79fef03 56 }
kevman 0:38ceb79fef03 57 }
kevman 0:38ceb79fef03 58 return NULL;
kevman 0:38ceb79fef03 59 }
kevman 0:38ceb79fef03 60
kevman 0:38ceb79fef03 61 bool event_tasklet_handler_id_valid(uint8_t tasklet_id)
kevman 0:38ceb79fef03 62 {
kevman 0:38ceb79fef03 63 return event_tasklet_handler_get(tasklet_id);
kevman 0:38ceb79fef03 64 }
kevman 0:38ceb79fef03 65
kevman 0:38ceb79fef03 66 // XXX this can return 0, but 0 seems to mean "none" elsewhere? Or at least
kevman 0:38ceb79fef03 67 // curr_tasklet is reset to 0 in various places.
kevman 0:38ceb79fef03 68 static int8_t tasklet_get_free_id(void)
kevman 0:38ceb79fef03 69 {
kevman 0:38ceb79fef03 70 /*(Note use of uint8_t to avoid overflow if we reach 0x7F)*/
kevman 0:38ceb79fef03 71 for (uint8_t i = 0; i <= INT8_MAX; i++) {
kevman 0:38ceb79fef03 72 if (!event_tasklet_handler_get(i)) {
kevman 0:38ceb79fef03 73 return i;
kevman 0:38ceb79fef03 74 }
kevman 0:38ceb79fef03 75 }
kevman 0:38ceb79fef03 76 return -1;
kevman 0:38ceb79fef03 77 }
kevman 0:38ceb79fef03 78
kevman 0:38ceb79fef03 79
kevman 0:38ceb79fef03 80 int8_t eventOS_event_handler_create(void (*handler_func_ptr)(arm_event_s *), uint8_t init_event_type)
kevman 0:38ceb79fef03 81 {
kevman 0:38ceb79fef03 82 arm_event_storage_t *event_tmp;
kevman 0:38ceb79fef03 83
kevman 0:38ceb79fef03 84 // XXX Do we really want to prevent multiple tasklets with same function?
kevman 0:38ceb79fef03 85 ns_list_foreach(arm_core_tasklet_t, cur, &arm_core_tasklet_list) {
kevman 0:38ceb79fef03 86 if (cur->func_ptr == handler_func_ptr) {
kevman 0:38ceb79fef03 87 return -1;
kevman 0:38ceb79fef03 88 }
kevman 0:38ceb79fef03 89 }
kevman 0:38ceb79fef03 90
kevman 0:38ceb79fef03 91 //Allocate new
kevman 0:38ceb79fef03 92 arm_core_tasklet_t *new = tasklet_dynamically_allocate();
kevman 0:38ceb79fef03 93 if (!new) {
kevman 0:38ceb79fef03 94 return -2;
kevman 0:38ceb79fef03 95 }
kevman 0:38ceb79fef03 96
kevman 0:38ceb79fef03 97 event_tmp = event_core_get();
kevman 0:38ceb79fef03 98 if (!event_tmp) {
kevman 0:38ceb79fef03 99 ns_dyn_mem_free(new);
kevman 0:38ceb79fef03 100 return -2;
kevman 0:38ceb79fef03 101 }
kevman 0:38ceb79fef03 102
kevman 0:38ceb79fef03 103 //Fill in tasklet; add to list
kevman 0:38ceb79fef03 104 new->id = tasklet_get_free_id();
kevman 0:38ceb79fef03 105 new->func_ptr = handler_func_ptr;
kevman 0:38ceb79fef03 106 ns_list_add_to_end(&arm_core_tasklet_list, new);
kevman 0:38ceb79fef03 107
kevman 0:38ceb79fef03 108 //Queue "init" event for the new task
kevman 0:38ceb79fef03 109 event_tmp->data.receiver = new->id;
kevman 0:38ceb79fef03 110 event_tmp->data.sender = 0;
kevman 0:38ceb79fef03 111 event_tmp->data.event_type = init_event_type;
kevman 0:38ceb79fef03 112 event_tmp->data.event_data = 0;
kevman 0:38ceb79fef03 113 event_core_write(event_tmp);
kevman 0:38ceb79fef03 114
kevman 0:38ceb79fef03 115 return new->id;
kevman 0:38ceb79fef03 116 }
kevman 0:38ceb79fef03 117
kevman 0:38ceb79fef03 118 int8_t eventOS_event_send(const arm_event_t *event)
kevman 0:38ceb79fef03 119 {
kevman 0:38ceb79fef03 120 if (event_tasklet_handler_get(event->receiver)) {
kevman 0:38ceb79fef03 121 arm_event_storage_t *event_tmp = event_core_get();
kevman 0:38ceb79fef03 122 if (event_tmp) {
kevman 0:38ceb79fef03 123 event_tmp->data = *event;
kevman 0:38ceb79fef03 124 event_core_write(event_tmp);
kevman 0:38ceb79fef03 125 return 0;
kevman 0:38ceb79fef03 126 }
kevman 0:38ceb79fef03 127 }
kevman 0:38ceb79fef03 128 return -1;
kevman 0:38ceb79fef03 129 }
kevman 0:38ceb79fef03 130
kevman 0:38ceb79fef03 131 void eventOS_event_send_user_allocated(arm_event_storage_t *event)
kevman 0:38ceb79fef03 132 {
kevman 0:38ceb79fef03 133 event->allocator = ARM_LIB_EVENT_USER;
kevman 0:38ceb79fef03 134 event_core_write(event);
kevman 0:38ceb79fef03 135 }
kevman 0:38ceb79fef03 136
kevman 0:38ceb79fef03 137 void eventOS_event_send_timer_allocated(arm_event_storage_t *event)
kevman 0:38ceb79fef03 138 {
kevman 0:38ceb79fef03 139 event->allocator = ARM_LIB_EVENT_TIMER;
kevman 0:38ceb79fef03 140 event_core_write(event);
kevman 0:38ceb79fef03 141 }
kevman 0:38ceb79fef03 142
kevman 0:38ceb79fef03 143 void eventOS_event_cancel_critical(arm_event_storage_t *event)
kevman 0:38ceb79fef03 144 {
kevman 0:38ceb79fef03 145 ns_list_remove(&event_queue_active, event);
kevman 0:38ceb79fef03 146 }
kevman 0:38ceb79fef03 147
kevman 0:38ceb79fef03 148 static arm_event_storage_t *event_dynamically_allocate(void)
kevman 0:38ceb79fef03 149 {
kevman 0:38ceb79fef03 150 arm_event_storage_t *event = ns_dyn_mem_temporary_alloc(sizeof(arm_event_storage_t));
kevman 0:38ceb79fef03 151 if (event) {
kevman 0:38ceb79fef03 152 event->allocator = ARM_LIB_EVENT_DYNAMIC;
kevman 0:38ceb79fef03 153 }
kevman 0:38ceb79fef03 154 return event;
kevman 0:38ceb79fef03 155 }
kevman 0:38ceb79fef03 156
kevman 0:38ceb79fef03 157 static arm_core_tasklet_t *tasklet_dynamically_allocate(void)
kevman 0:38ceb79fef03 158 {
kevman 0:38ceb79fef03 159 return ns_dyn_mem_alloc(sizeof(arm_core_tasklet_t));
kevman 0:38ceb79fef03 160 }
kevman 0:38ceb79fef03 161
kevman 0:38ceb79fef03 162 arm_event_storage_t *event_core_get(void)
kevman 0:38ceb79fef03 163 {
kevman 0:38ceb79fef03 164 arm_event_storage_t *event;
kevman 0:38ceb79fef03 165 platform_enter_critical();
kevman 0:38ceb79fef03 166 event = ns_list_get_first(&free_event_entry);
kevman 0:38ceb79fef03 167 if (event) {
kevman 0:38ceb79fef03 168 ns_list_remove(&free_event_entry, event);
kevman 0:38ceb79fef03 169 } else {
kevman 0:38ceb79fef03 170 event = event_dynamically_allocate();
kevman 0:38ceb79fef03 171 }
kevman 0:38ceb79fef03 172 if (event) {
kevman 0:38ceb79fef03 173 event->data.data_ptr = NULL;
kevman 0:38ceb79fef03 174 event->data.priority = ARM_LIB_LOW_PRIORITY_EVENT;
kevman 0:38ceb79fef03 175 }
kevman 0:38ceb79fef03 176 platform_exit_critical();
kevman 0:38ceb79fef03 177 return event;
kevman 0:38ceb79fef03 178 }
kevman 0:38ceb79fef03 179
kevman 0:38ceb79fef03 180 void event_core_free_push(arm_event_storage_t *free)
kevman 0:38ceb79fef03 181 {
kevman 0:38ceb79fef03 182 free->state = ARM_LIB_EVENT_UNQUEUED;
kevman 0:38ceb79fef03 183
kevman 0:38ceb79fef03 184 switch (free->allocator) {
kevman 0:38ceb79fef03 185 case ARM_LIB_EVENT_STARTUP_POOL:
kevman 0:38ceb79fef03 186 platform_enter_critical();
kevman 0:38ceb79fef03 187 ns_list_add_to_start(&free_event_entry, free);
kevman 0:38ceb79fef03 188 platform_exit_critical();
kevman 0:38ceb79fef03 189 break;
kevman 0:38ceb79fef03 190 case ARM_LIB_EVENT_DYNAMIC:
kevman 0:38ceb79fef03 191 // Free all dynamically allocated events.
kevman 0:38ceb79fef03 192 ns_dyn_mem_free(free);
kevman 0:38ceb79fef03 193 break;
kevman 0:38ceb79fef03 194 case ARM_LIB_EVENT_TIMER:
kevman 0:38ceb79fef03 195 // Hand it back to the timer system
kevman 0:38ceb79fef03 196 timer_sys_event_free(free);
kevman 0:38ceb79fef03 197 break;
kevman 0:38ceb79fef03 198 case ARM_LIB_EVENT_USER:
kevman 0:38ceb79fef03 199 default:
kevman 0:38ceb79fef03 200 break;
kevman 0:38ceb79fef03 201 }
kevman 0:38ceb79fef03 202 }
kevman 0:38ceb79fef03 203
kevman 0:38ceb79fef03 204
kevman 0:38ceb79fef03 205 static arm_event_storage_t *event_core_read(void)
kevman 0:38ceb79fef03 206 {
kevman 0:38ceb79fef03 207 platform_enter_critical();
kevman 0:38ceb79fef03 208 arm_event_storage_t *event = ns_list_get_first(&event_queue_active);
kevman 0:38ceb79fef03 209 if (event) {
kevman 0:38ceb79fef03 210 event->state = ARM_LIB_EVENT_RUNNING;
kevman 0:38ceb79fef03 211 ns_list_remove(&event_queue_active, event);
kevman 0:38ceb79fef03 212 }
kevman 0:38ceb79fef03 213 platform_exit_critical();
kevman 0:38ceb79fef03 214 return event;
kevman 0:38ceb79fef03 215 }
kevman 0:38ceb79fef03 216
kevman 0:38ceb79fef03 217 void event_core_write(arm_event_storage_t *event)
kevman 0:38ceb79fef03 218 {
kevman 0:38ceb79fef03 219 platform_enter_critical();
kevman 0:38ceb79fef03 220 bool added = false;
kevman 0:38ceb79fef03 221 ns_list_foreach(arm_event_storage_t, event_tmp, &event_queue_active) {
kevman 0:38ceb79fef03 222 // note enum ordering means we're checking if event_tmp is LOWER priority than event
kevman 0:38ceb79fef03 223 if (event_tmp->data.priority > event->data.priority) {
kevman 0:38ceb79fef03 224 ns_list_add_before(&event_queue_active, event_tmp, event);
kevman 0:38ceb79fef03 225 added = true;
kevman 0:38ceb79fef03 226 break;
kevman 0:38ceb79fef03 227 }
kevman 0:38ceb79fef03 228 }
kevman 0:38ceb79fef03 229 if (!added) {
kevman 0:38ceb79fef03 230 ns_list_add_to_end(&event_queue_active, event);
kevman 0:38ceb79fef03 231 }
kevman 0:38ceb79fef03 232 event->state = ARM_LIB_EVENT_QUEUED;
kevman 0:38ceb79fef03 233
kevman 0:38ceb79fef03 234 /* Wake From Idle */
kevman 0:38ceb79fef03 235 platform_exit_critical();
kevman 0:38ceb79fef03 236 eventOS_scheduler_signal();
kevman 0:38ceb79fef03 237 }
kevman 0:38ceb79fef03 238
kevman 0:38ceb79fef03 239 // Requires lock to be held
kevman 0:38ceb79fef03 240 arm_event_storage_t *eventOS_event_find_by_id_critical(uint8_t tasklet_id, uint8_t event_id)
kevman 0:38ceb79fef03 241 {
kevman 0:38ceb79fef03 242 ns_list_foreach(arm_event_storage_t, cur, &event_queue_active) {
kevman 0:38ceb79fef03 243 if (cur->data.receiver == tasklet_id && cur->data.event_id == event_id) {
kevman 0:38ceb79fef03 244 return cur;
kevman 0:38ceb79fef03 245 }
kevman 0:38ceb79fef03 246 }
kevman 0:38ceb79fef03 247
kevman 0:38ceb79fef03 248 return NULL;
kevman 0:38ceb79fef03 249 }
kevman 0:38ceb79fef03 250
kevman 0:38ceb79fef03 251 /**
kevman 0:38ceb79fef03 252 *
kevman 0:38ceb79fef03 253 * \brief Initialize Nanostack Core.
kevman 0:38ceb79fef03 254 *
kevman 0:38ceb79fef03 255 * Function Initialize Nanostack Core, Socket Interface,Buffer memory and Send Init event to all Tasklett which are Defined.
kevman 0:38ceb79fef03 256 *
kevman 0:38ceb79fef03 257 */
kevman 0:38ceb79fef03 258 void eventOS_scheduler_init(void)
kevman 0:38ceb79fef03 259 {
kevman 0:38ceb79fef03 260 /* Reset Event List variables */
kevman 0:38ceb79fef03 261 ns_list_init(&free_event_entry);
kevman 0:38ceb79fef03 262 ns_list_init(&event_queue_active);
kevman 0:38ceb79fef03 263 ns_list_init(&arm_core_tasklet_list);
kevman 0:38ceb79fef03 264
kevman 0:38ceb79fef03 265 //Add first 10 entries to "free" list
kevman 0:38ceb79fef03 266 for (unsigned i = 0; i < (sizeof(startup_event_pool) / sizeof(startup_event_pool[0])); i++) {
kevman 0:38ceb79fef03 267 startup_event_pool[i].allocator = ARM_LIB_EVENT_STARTUP_POOL;
kevman 0:38ceb79fef03 268 ns_list_add_to_start(&free_event_entry, &startup_event_pool[i]);
kevman 0:38ceb79fef03 269 }
kevman 0:38ceb79fef03 270
kevman 0:38ceb79fef03 271 /* Init Generic timer module */
kevman 0:38ceb79fef03 272 timer_sys_init(); //initialize timer
kevman 0:38ceb79fef03 273 /* Set Tasklett switcher to Idle */
kevman 0:38ceb79fef03 274 curr_tasklet = 0;
kevman 0:38ceb79fef03 275
kevman 0:38ceb79fef03 276 }
kevman 0:38ceb79fef03 277
kevman 0:38ceb79fef03 278 int8_t eventOS_scheduler_get_active_tasklet(void)
kevman 0:38ceb79fef03 279 {
kevman 0:38ceb79fef03 280 return curr_tasklet;
kevman 0:38ceb79fef03 281 }
kevman 0:38ceb79fef03 282
kevman 0:38ceb79fef03 283 void eventOS_scheduler_set_active_tasklet(int8_t tasklet)
kevman 0:38ceb79fef03 284 {
kevman 0:38ceb79fef03 285 curr_tasklet = tasklet;
kevman 0:38ceb79fef03 286 }
kevman 0:38ceb79fef03 287
kevman 0:38ceb79fef03 288 int eventOS_scheduler_timer_stop(void)
kevman 0:38ceb79fef03 289 {
kevman 0:38ceb79fef03 290 timer_sys_disable();
kevman 0:38ceb79fef03 291 if (ns_timer_sleep() != 0) {
kevman 0:38ceb79fef03 292 return 1;
kevman 0:38ceb79fef03 293 }
kevman 0:38ceb79fef03 294 return 0;
kevman 0:38ceb79fef03 295 }
kevman 0:38ceb79fef03 296
kevman 0:38ceb79fef03 297 int eventOS_scheduler_timer_synch_after_sleep(uint32_t sleep_ticks)
kevman 0:38ceb79fef03 298 {
kevman 0:38ceb79fef03 299 //Update MS to 10ms ticks
kevman 0:38ceb79fef03 300 sleep_ticks /= 10;
kevman 0:38ceb79fef03 301 sleep_ticks++;
kevman 0:38ceb79fef03 302 system_timer_tick_update(sleep_ticks);
kevman 0:38ceb79fef03 303 if (timer_sys_wakeup() == 0) {
kevman 0:38ceb79fef03 304 return 0;
kevman 0:38ceb79fef03 305 }
kevman 0:38ceb79fef03 306 return -1;
kevman 0:38ceb79fef03 307 }
kevman 0:38ceb79fef03 308
kevman 0:38ceb79fef03 309 /**
kevman 0:38ceb79fef03 310 *
kevman 0:38ceb79fef03 311 * \brief Infinite Event Read Loop.
kevman 0:38ceb79fef03 312 *
kevman 0:38ceb79fef03 313 * Function Read and handle Cores Event and switch/enable tasklet which are event receiver. WhenEvent queue is empty it goes to sleep
kevman 0:38ceb79fef03 314 *
kevman 0:38ceb79fef03 315 */
kevman 0:38ceb79fef03 316 bool eventOS_scheduler_dispatch_event(void)
kevman 0:38ceb79fef03 317 {
kevman 0:38ceb79fef03 318 curr_tasklet = 0;
kevman 0:38ceb79fef03 319
kevman 0:38ceb79fef03 320 arm_event_storage_t *cur_event = event_core_read();
kevman 0:38ceb79fef03 321 if (!cur_event) {
kevman 0:38ceb79fef03 322 return false;
kevman 0:38ceb79fef03 323 }
kevman 0:38ceb79fef03 324
kevman 0:38ceb79fef03 325 curr_tasklet = cur_event->data.receiver;
kevman 0:38ceb79fef03 326
kevman 0:38ceb79fef03 327 arm_core_tasklet_t *tasklet = event_tasklet_handler_get(curr_tasklet);
kevman 0:38ceb79fef03 328 /* Do not bother with check for NULL - tasklets cannot be deleted,
kevman 0:38ceb79fef03 329 * and user-facing API eventOS_event_send() has already checked the tasklet
kevman 0:38ceb79fef03 330 * exists, so there is no possible issue there.
kevman 0:38ceb79fef03 331 *
kevman 0:38ceb79fef03 332 * For eventOS_event_send_user_allocated(), it would be a non-recoverable
kevman 0:38ceb79fef03 333 * error to not deliver the message - we have to have a receiver to pass
kevman 0:38ceb79fef03 334 * ownership to. If the lookup fails, let it crash. We want the send call
kevman 0:38ceb79fef03 335 * itself to return void to simplify logic.
kevman 0:38ceb79fef03 336 */
kevman 0:38ceb79fef03 337
kevman 0:38ceb79fef03 338 /* Tasklet Scheduler Call */
kevman 0:38ceb79fef03 339 tasklet->func_ptr(&cur_event->data);
kevman 0:38ceb79fef03 340 event_core_free_push(cur_event);
kevman 0:38ceb79fef03 341
kevman 0:38ceb79fef03 342 /* Set Current Tasklet to Idle state */
kevman 0:38ceb79fef03 343 curr_tasklet = 0;
kevman 0:38ceb79fef03 344
kevman 0:38ceb79fef03 345 return true;
kevman 0:38ceb79fef03 346 }
kevman 0:38ceb79fef03 347
kevman 0:38ceb79fef03 348 void eventOS_scheduler_run_until_idle(void)
kevman 0:38ceb79fef03 349 {
kevman 0:38ceb79fef03 350 while (eventOS_scheduler_dispatch_event());
kevman 0:38ceb79fef03 351 }
kevman 0:38ceb79fef03 352
kevman 0:38ceb79fef03 353 /**
kevman 0:38ceb79fef03 354 *
kevman 0:38ceb79fef03 355 * \brief Infinite Event Read Loop.
kevman 0:38ceb79fef03 356 *
kevman 0:38ceb79fef03 357 * Function Read and handle Cores Event and switch/enable tasklet which are event receiver. WhenEvent queue is empty it goes to sleep
kevman 0:38ceb79fef03 358 *
kevman 0:38ceb79fef03 359 */
kevman 0:38ceb79fef03 360 NS_NORETURN void eventOS_scheduler_run(void)
kevman 0:38ceb79fef03 361 {
kevman 0:38ceb79fef03 362 while (1) {
kevman 0:38ceb79fef03 363 if (!eventOS_scheduler_dispatch_event()) {
kevman 0:38ceb79fef03 364 eventOS_scheduler_idle();
kevman 0:38ceb79fef03 365 }
kevman 0:38ceb79fef03 366 }
kevman 0:38ceb79fef03 367 }
kevman 0:38ceb79fef03 368
kevman 0:38ceb79fef03 369 void eventOS_cancel(arm_event_storage_t *event)
kevman 0:38ceb79fef03 370 {
kevman 0:38ceb79fef03 371 if (!event) {
kevman 0:38ceb79fef03 372 return;
kevman 0:38ceb79fef03 373 }
kevman 0:38ceb79fef03 374
kevman 0:38ceb79fef03 375 platform_enter_critical();
kevman 0:38ceb79fef03 376
kevman 0:38ceb79fef03 377 /*
kevman 0:38ceb79fef03 378 * Notify timer of cancellation.
kevman 0:38ceb79fef03 379 */
kevman 0:38ceb79fef03 380 if (event->allocator == ARM_LIB_EVENT_TIMER) {
kevman 0:38ceb79fef03 381 timer_sys_event_cancel_critical(event);
kevman 0:38ceb79fef03 382 }
kevman 0:38ceb79fef03 383
kevman 0:38ceb79fef03 384 /*
kevman 0:38ceb79fef03 385 * Remove event from the list,
kevman 0:38ceb79fef03 386 * Only queued can be removed, unqued are either timers or stale pointers
kevman 0:38ceb79fef03 387 * RUNNING cannot be removed, we are currenly "in" that event.
kevman 0:38ceb79fef03 388 */
kevman 0:38ceb79fef03 389 if (event->state == ARM_LIB_EVENT_QUEUED) {
kevman 0:38ceb79fef03 390 eventOS_event_cancel_critical(event);
kevman 0:38ceb79fef03 391 }
kevman 0:38ceb79fef03 392
kevman 0:38ceb79fef03 393 /*
kevman 0:38ceb79fef03 394 * Push back to "free" state
kevman 0:38ceb79fef03 395 */
kevman 0:38ceb79fef03 396 if (event->state != ARM_LIB_EVENT_RUNNING) {
kevman 0:38ceb79fef03 397 event_core_free_push(event);
kevman 0:38ceb79fef03 398 }
kevman 0:38ceb79fef03 399
kevman 0:38ceb79fef03 400 platform_exit_critical();
kevman 0:38ceb79fef03 401 }