Important changes to repositories hosted on mbed.com
Mbed hosted mercurial repositories are deprecated and are due to be permanently deleted in July 2026.
To keep a copy of this software download the repository Zip archive or clone locally using Mercurial.
It is also possible to export all your personal repositories from the account settings page.
arm_uc_scheduler.c
00001 // ---------------------------------------------------------------------------- 00002 // Copyright 2016-2017 ARM Ltd. 00003 // 00004 // SPDX-License-Identifier: Apache-2.0 00005 // 00006 // Licensed under the Apache License, Version 2.0 (the "License"); 00007 // you may not use this file except in compliance with the License. 00008 // You may obtain a copy of the License at 00009 // 00010 // http://www.apache.org/licenses/LICENSE-2.0 00011 // 00012 // Unless required by applicable law or agreed to in writing, software 00013 // distributed under the License is distributed on an "AS IS" BASIS, 00014 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 00015 // See the License for the specific language governing permissions and 00016 // limitations under the License. 00017 // ---------------------------------------------------------------------------- 00018 00019 #include "update-client-common/arm_uc_config.h" 00020 #include "update-client-common/arm_uc_scheduler.h" 00021 #include "update-client-common/arm_uc_trace.h" 00022 #include "update-client-common/arm_uc_error.h" 00023 00024 #include "atomic-queue/atomic-queue.h" 00025 00026 static struct atomic_queue arm_uc_queue = { 0 }; 00027 static void (*arm_uc_notificationHandler)(void) = NULL; 00028 static volatile uintptr_t callbacks_pending = 0; 00029 00030 int32_t ARM_UC_SchedulerGetQueuedCount(void) 00031 { 00032 return aq_count(&arm_uc_queue); 00033 } 00034 00035 #if ARM_UC_SCHEDULER_STORAGE_POOL_SIZE 00036 /* Define the scheduler's callback pool storage. 00037 * The scheduler will allocate out of this pool whenever it encounters a 00038 * callback that is already locked or a callback that is NULL. 00039 */ 00040 static arm_uc_callback_t callback_pool_storage[ARM_UC_SCHEDULER_STORAGE_POOL_SIZE]; 00041 static arm_uc_callback_t *callback_pool_root; 00042 #endif 00043 00044 static void (*scheduler_error_cb)(uint32_t parameter); 00045 static arm_uc_callback_t callback_pool_exhausted_error_callback = {0}; 00046 static arm_uc_callback_t callback_failed_take_error_callback = {0}; 00047 00048 /* Single element used for queuing errors */ 00049 static arm_uc_callback_t plugin_error_callback = {0}; 00050 static volatile uintptr_t plugin_error_pending = 0; 00051 00052 #define POOL_WATERMARK 0xABABABAB 00053 00054 void ARM_UC_SchedulerInit(void) 00055 { 00056 #if ARM_UC_SCHEDULER_STORAGE_POOL_SIZE 00057 /* Initialize the storage pool */ 00058 callback_pool_root = callback_pool_storage; 00059 for (size_t i = 0; i < ARM_UC_SCHEDULER_STORAGE_POOL_SIZE - 1; i++) { 00060 callback_pool_storage[i].next = &callback_pool_storage[i + 1]; 00061 /* watermark pool elements by setting the lock to POOL_WATERMARK. 00062 * This allows checking of the maximum number of concurrent allocations. 00063 */ 00064 callback_pool_storage[i].lock = POOL_WATERMARK; 00065 } 00066 callback_pool_storage[ARM_UC_SCHEDULER_STORAGE_POOL_SIZE - 1].next = NULL; 00067 callback_pool_storage[ARM_UC_SCHEDULER_STORAGE_POOL_SIZE - 1].lock = POOL_WATERMARK; 00068 #endif 00069 memset(&callback_pool_exhausted_error_callback, 0, sizeof(arm_uc_callback_t)); 00070 memset(&callback_failed_take_error_callback, 0, sizeof(arm_uc_callback_t)); 00071 memset(&plugin_error_callback, 0, sizeof(arm_uc_callback_t)); 00072 callbacks_pending = 0; 00073 plugin_error_pending = 0; 00074 } 00075 00076 /** 00077 * @brief Allocate a block from the pool 00078 * @details Gets a non-null block from the callback pool. 00079 * 00080 * Theory of operation: 00081 * * callback_pool_alloc starts by fetching the current value of the pool's 00082 * root. This value should be the next free item in the pool. 00083 * * If the value is NULL, then there are no elements left in the pool, so 00084 * callback_pool_alloc returns NULL. 00085 * * callback_pool_alloc tries to take this element by replacing the root 00086 * node with the following element. If replacement fails, callback_pool_alloc 00087 * tries the whole process again. This is repeated until allocation succeeds 00088 * or the root pointer is NULL. 00089 * 00090 * @retval NULL the no element was available to allocate 00091 * @retval non-NULL An allocated element 00092 */ 00093 static arm_uc_callback_t *callback_pool_alloc() 00094 { 00095 while (true) { 00096 arm_uc_callback_t *prev_free = callback_pool_root; 00097 if (NULL == prev_free) { 00098 return NULL; 00099 } 00100 arm_uc_callback_t *new_free = prev_free->next; 00101 00102 if (aq_atomic_cas_uintptr((uintptr_t *)&callback_pool_root, (uintptr_t)prev_free, (uintptr_t)new_free)) { 00103 return prev_free; 00104 } 00105 } 00106 } 00107 00108 /** 00109 * @brief Check if the pool owns a block 00110 * @detail callback_pool_owns() checks whether the pointer supplied exists 00111 * within the callback_pool_storage array. If it does, that means that the pool 00112 * should own the block. 00113 * 00114 * @param[in] e the element to evaluate for pool ownership 00115 * 00116 * @retval 1 the pool owns the callback 00117 * @retval 0 the pool does not own the callback 00118 */ 00119 00120 static int callback_pool_owns(arm_uc_callback_t *e) 00121 { 00122 int isGreater = e >= callback_pool_storage; 00123 int isLesser = (uintptr_t)e < ((uintptr_t)callback_pool_storage + sizeof(callback_pool_storage)); 00124 return isGreater && isLesser; 00125 } 00126 00127 /** 00128 * @brief Free a block owned by the pool. 00129 * @details Checks whether the supplied callback is owned by the pool and frees 00130 * it if so. Performs no operation for a callback that is not owned by the pool. 00131 * 00132 * @param[in] e the element to free 00133 */ 00134 static void callback_pool_free(arm_uc_callback_t *e) 00135 { 00136 UC_SDLR_TRACE("%s (%p)", __PRETTY_FUNCTION__, e); 00137 if (callback_pool_owns(e)) { 00138 while (true) { 00139 arm_uc_callback_t *prev_free = callback_pool_root; 00140 00141 e->next = prev_free; 00142 UC_SDLR_TRACE("%s inserting r:%p p:%p, e:%p, ", __PRETTY_FUNCTION__, callback_pool_root, prev_free, e); 00143 if (aq_atomic_cas_uintptr((uintptr_t *)&callback_pool_root, (uintptr_t)prev_free, (uintptr_t)e)) { 00144 break; 00145 } 00146 UC_SDLR_TRACE("%s inserting failed", __PRETTY_FUNCTION__); 00147 } 00148 } 00149 } 00150 00151 uint32_t ARM_UC_SchedulerGetHighWatermark(void) 00152 { 00153 uint32_t i; 00154 for (i = 0; i < ARM_UC_SCHEDULER_STORAGE_POOL_SIZE; i++) { 00155 if (callback_pool_storage[i].lock == POOL_WATERMARK) { 00156 break; 00157 } 00158 } 00159 return i; 00160 } 00161 00162 00163 void ARM_UC_AddNotificationHandler(void (*handler)(void)) 00164 { 00165 arm_uc_notificationHandler = handler; 00166 } 00167 00168 void ARM_UC_SetSchedulerErrorHandler(void(*handler)(uint32_t)) 00169 { 00170 scheduler_error_cb = handler; 00171 } 00172 00173 bool ARM_UC_PostCallbackCtx(arm_uc_callback_t *_storage, 00174 void *_ctx, 00175 arm_uc_context_callback_t _callback, 00176 uintptr_t _parameter) 00177 { 00178 bool success = true; 00179 UC_SDLR_TRACE("%s Scheduling %p(%lu) with %p (context %p)", __PRETTY_FUNCTION__, _callback, _parameter, _storage, _ctx); 00180 00181 if (_callback == NULL || _ctx == NULL) { 00182 return false; 00183 } 00184 00185 if (_storage) { 00186 int result = aq_element_take((void *) _storage, _ctx); 00187 if (result != ATOMIC_QUEUE_SUCCESS) { 00188 00189 // NOTE: This may be useful for detecting double-allocation of callbacks on mbed-os too 00190 #if defined(TARGET_IS_PC_LINUX) 00191 /* On Linux, issue an error message if the callback was not added 00192 to the queue. This is dangerous in mbed-os, since writing to the 00193 console from an interrupt context might crash the program. */ 00194 UC_SDLR_TRACE("ARM_UC_PostCallback failed to acquire lock on: %p %p; allocating a temporary callback", 00195 _storage, 00196 _callback); 00197 00198 #endif 00199 _storage = NULL; 00200 } 00201 } 00202 if (_storage == NULL) { 00203 _storage = callback_pool_alloc(); 00204 if (_storage == NULL) { 00205 success = false; 00206 /* Handle a failed alloc */ 00207 00208 #ifdef TARGET_IS_PC_LINUX 00209 /* On Linux, issue an error message if the callback was not added 00210 to the queue. This is dangerous in mbed-os, since writing to the 00211 console from an interrupt context might crash the program. */ 00212 UC_SDLR_ERR_MSG("Failed to allocate a callback block"); 00213 #endif 00214 if (scheduler_error_cb) { 00215 _storage = &callback_pool_exhausted_error_callback; 00216 int result = aq_element_take((void *) _storage, ATOMIC_QUEUE_NO_CONTEXT); 00217 if (result == ATOMIC_QUEUE_SUCCESS) { 00218 _parameter = ARM_UC_EQ_ERR_POOL_EXHAUSTED; 00219 _callback = (arm_uc_context_callback_t)scheduler_error_cb; 00220 } else { 00221 _storage = NULL; 00222 } 00223 } 00224 } else { 00225 /* This thread is guaranteed to exclusively own _storage here */ 00226 aq_initialize_element((void *) _storage); 00227 int result = aq_element_take((void *) _storage, _ctx); 00228 if (result != ATOMIC_QUEUE_SUCCESS) { 00229 success = false; 00230 /* This should be impossible */ 00231 UC_SDLR_ERR_MSG("Failed to take an allocated a callback block... this should be impossible..."); 00232 if (scheduler_error_cb) { 00233 _storage = &callback_failed_take_error_callback; 00234 int result = aq_element_take((void *) _storage, ATOMIC_QUEUE_NO_CONTEXT); 00235 if (result == ATOMIC_QUEUE_SUCCESS) { 00236 _parameter = ARM_UC_EQ_ERR_FAILED_TAKE; 00237 _callback = (arm_uc_context_callback_t)scheduler_error_cb; 00238 } else { 00239 _storage = NULL; 00240 } 00241 } 00242 } 00243 } 00244 } 00245 if (_storage) { 00246 /* populate callback struct */ 00247 _storage->callback = (void*)_callback; 00248 _storage->parameter = _parameter; 00249 00250 UC_SDLR_TRACE("%s Queueing %p(%lu) in %p", __PRETTY_FUNCTION__, _callback, _parameter, _storage); 00251 00252 /* push struct to atomic queue */ 00253 int result = aq_push_tail(&arm_uc_queue, (void *) _storage); 00254 00255 if (result == ATOMIC_QUEUE_SUCCESS) { 00256 UC_SDLR_TRACE("%s Scheduling success!", __PRETTY_FUNCTION__); 00257 00258 /* if notification handler is set, check if this is the first 00259 * insertion. 00260 * Try to set callbacks_pending to 1. 00261 * Fail if already 1 (there are other callbacks pending) 00262 * If successful, notify. 00263 */ 00264 if (arm_uc_notificationHandler) { 00265 while (callbacks_pending == 0 && arm_uc_queue.tail != NULL) { 00266 // Remove volatile qualifier from callbacks_pending 00267 int cas_result = aq_atomic_cas_uintptr((uintptr_t *)&callbacks_pending, 0, 1); 00268 if (cas_result) { 00269 UC_SDLR_TRACE("%s Invoking notify!", __PRETTY_FUNCTION__); 00270 00271 /* disable: UC_SDLR_TRACE("notify nanostack scheduler"); */ 00272 arm_uc_notificationHandler(); 00273 } 00274 } 00275 } 00276 } else { 00277 success = false; 00278 } 00279 } 00280 00281 return success; 00282 } 00283 00284 bool ARM_UC_PostCallback(arm_uc_callback_t *_storage, 00285 arm_uc_no_context_callback_t _callback, 00286 uintptr_t _parameter) 00287 { 00288 return ARM_UC_PostCallbackCtx(_storage, ATOMIC_QUEUE_NO_CONTEXT, (arm_uc_context_callback_t)_callback, _parameter); 00289 } 00290 00291 bool ARM_UC_PostErrorCallbackCtx(void *_ctx, arm_uc_context_callback_t _callback, uintptr_t _parameter) 00292 { 00293 UC_SDLR_TRACE("%s Scheduling error callback %p with parameter %lu and context %p", __PRETTY_FUNCTION__, _callback, _parameter, _ctx); 00294 00295 if (_callback == NULL || _ctx == NULL) { 00296 return false; 00297 } 00298 00299 /* Take ownership of error callback */ 00300 int result = aq_element_take((void *)&plugin_error_callback, _ctx); 00301 if (result != ATOMIC_QUEUE_SUCCESS) { 00302 UC_SDLR_ERR_MSG("ARM_UC_PostErrorCallback failed to acquire lock on error callback"); 00303 return false; 00304 } 00305 00306 /* populate callback struct */ 00307 plugin_error_callback.callback = (void*)_callback; 00308 plugin_error_callback.parameter = _parameter; 00309 00310 plugin_error_pending = 1; 00311 return true; 00312 } 00313 00314 /** 00315 * @brief Clear the callbacks_pending flag. 00316 * @details This function attempts to clear the callbacks_pending flag. This 00317 * operation can fail if: 00318 * * the flag has already been cleared 00319 * * the operation is interrupted 00320 * * the queue is not empty 00321 * 00322 * The return from this function indicates whether or not the scheduler should 00323 * continue processing callbacks. This is used to prevent duplicate 00324 * notifications. This could be a simple flag, but that would introduce several 00325 * race conditions. By using atomic Compare And Swap, we are able to detect and 00326 * correct those race conditions. 00327 * 00328 * Operation: 00329 * Case 1 00330 * If the callbacks_pending flag is clear AND the queue is empty, there is 00331 * nothing to do and the scheduler should stop processing callbacks. 00332 * 00333 * Case 2 00334 * If the callbacks_pending flag is set AND the queue is not empty, there is 00335 * nothing to do and the scheduler should continue processing callbacks. 00336 * 00337 * Case 3 00338 * If the callbacks_pending flag is clear AND the queue is not empty, then the 00339 * callbacks pending flag must be set to 1. If this operation is successful, 00340 * then the scheduler should continue processing callbacks. If the CAS fails, 00341 * then the scheduler must perform all checks and try again. 00342 * 00343 * Case 4 00344 * If the callbacks_pending flag is set AND the queue is empty, then the 00345 * callbacks_pending flag must be cleared. Atomic CAS opens an atomic context, 00346 * checks that the callbacks_pending flag is still set, then sets it to 0. 00347 * Atomic CAS will fail if either callbacks_pending is 0 OR if the CAS is 00348 * interrupted by another atomic operation. If the CAS succeeds and flag is 00349 * cleared then the scheduler must check if the queue is empty, since a new post 00350 * could have happened after callbacks_pending was stored to cbp_local. If the 00351 * CAS fails, then the scheduler must perform all checks and try again. 00352 * 00353 * @return false if the scheduler should stop processing callbacks or true if 00354 * the scheduler should continue processing callbacks. 00355 */ 00356 static bool try_clear_callbacks_pending() { 00357 bool run_again = true; 00358 bool cleared_flag = false; 00359 while (true) { 00360 /* Preserve local copies of callbacks_pending and queue_empty */ 00361 uintptr_t cbp_local = callbacks_pending; 00362 bool queue_empty = arm_uc_queue.tail == NULL; 00363 /* Case 1 */ 00364 /* Flag clear, no elements queued. Nothing to do */ 00365 if (!cbp_local && queue_empty) { 00366 run_again = false; 00367 break; 00368 } 00369 /* Case 2 */ 00370 /* Flag is set and elements are queued. Nothing to do */ 00371 if (cbp_local && !queue_empty) { 00372 /* Do not indicate a "run again" condition if the flag was 00373 * previously cleared 00374 */ 00375 run_again = !cleared_flag; 00376 break; 00377 } 00378 /* Case 3 */ 00379 /* Flag not set, elements queued. Set flag. */ 00380 if (!cbp_local && !queue_empty) { 00381 int cas_result = aq_atomic_cas_uintptr((uintptr_t*)&callbacks_pending, cbp_local, 1); 00382 /* on success, exit and continue scheduling */ 00383 if (cas_result) { 00384 run_again = true; 00385 break; 00386 } 00387 } 00388 /* Case 4 */ 00389 /* Flag set, no elements queued. Clear flag */ 00390 if (cbp_local && queue_empty) { 00391 int cas_result = aq_atomic_cas_uintptr((uintptr_t*)&callbacks_pending, cbp_local, 0); 00392 if (cas_result) { 00393 /* If the flag returns to true, then Case 2 should not set 00394 * run_again, since this would cause a duplicate notification. 00395 */ 00396 cleared_flag = true; 00397 } 00398 /* If the result is success, then the scheduler must check for 00399 * (!cbp_local && !queue_empty). If the result is failure, then the 00400 * scheduler must try again. 00401 */ 00402 } 00403 } 00404 return run_again; 00405 } 00406 00407 void ARM_UC_ProcessQueue(void) 00408 { 00409 arm_uc_callback_t *element = NULL; 00410 00411 while (true) { 00412 element = NULL; 00413 /* Always consider the error callback first */ 00414 if (plugin_error_pending) { 00415 /* Clear the read lock */ 00416 plugin_error_pending = 0; 00417 element = &plugin_error_callback; 00418 } 00419 /* If the error callback isn't taken, get an element from the queue */ 00420 else if (callbacks_pending){ 00421 element = (arm_uc_callback_t *) aq_pop_head(&arm_uc_queue); 00422 } 00423 /* If the queue is empty */ 00424 if (element == NULL) { 00425 /* Try to shut down queue processing */ 00426 if (! try_clear_callbacks_pending()) { 00427 break; 00428 } 00429 } 00430 00431 UC_SDLR_TRACE("%s Invoking %p(%lu)", __PRETTY_FUNCTION__, element->callback, element->parameter); 00432 /* Store the callback locally */ 00433 void *callback = element->callback; 00434 /* Store the parameter locally */ 00435 uint32_t parameter = element->parameter; 00436 00437 /* Release the lock on the element */ 00438 UC_SDLR_TRACE("%s Releasing %p", __PRETTY_FUNCTION__, element); 00439 void *ctx; 00440 aq_element_release((void *) element, &ctx); 00441 /* Free the element if it was pool allocated */ 00442 UC_SDLR_TRACE("%s Freeing %p", __PRETTY_FUNCTION__, element); 00443 callback_pool_free((void *) element); 00444 00445 /* execute callback */ 00446 if (ctx == ATOMIC_QUEUE_NO_CONTEXT) { 00447 ((arm_uc_no_context_callback_t)callback)(parameter); 00448 } else { 00449 ((arm_uc_context_callback_t)callback)(ctx, parameter); 00450 } 00451 } 00452 } 00453 00454 bool ARM_UC_ProcessSingleCallback(void) 00455 { 00456 bool call_again = true; 00457 /* always check the error callback first */ 00458 arm_uc_callback_t *element = NULL; 00459 /* Always consider the error callback first */ 00460 if (plugin_error_pending) { 00461 /* Clear the read lock */ 00462 plugin_error_pending = 0; 00463 element = &plugin_error_callback; 00464 } 00465 /* If the error callback isn't taken, get an element from the queue */ 00466 else { 00467 element = (arm_uc_callback_t *) aq_pop_head(&arm_uc_queue); 00468 /* If the queue is empty */ 00469 if (element == NULL) { 00470 /* Try to shut down queue processing */ 00471 call_again = try_clear_callbacks_pending(); 00472 } 00473 } 00474 00475 if (element != NULL) { 00476 UC_SDLR_TRACE("%s Invoking %p(%lu)", __PRETTY_FUNCTION__, element->callback, element->parameter); 00477 /* Store the callback locally */ 00478 void *callback = element->callback; 00479 /* Store the parameter locally */ 00480 uintptr_t parameter = element->parameter; 00481 /* Release the lock on the element */ 00482 UC_SDLR_TRACE("%s Releasing %p", __PRETTY_FUNCTION__, element); 00483 void *ctx; 00484 aq_element_release((void *) element, &ctx); 00485 /* Free the element if it was pool allocated */ 00486 UC_SDLR_TRACE("%s Freeing %p", __PRETTY_FUNCTION__, element); 00487 callback_pool_free((void *) element); 00488 00489 /* execute callback */ 00490 if (ctx == ATOMIC_QUEUE_NO_CONTEXT) { 00491 ((arm_uc_no_context_callback_t)callback)(parameter); 00492 } else { 00493 ((arm_uc_context_callback_t)callback)(ctx, parameter); 00494 } 00495 00496 /* Try to shut down queue processing */ 00497 call_again = try_clear_callbacks_pending(); 00498 } 00499 00500 return call_again || plugin_error_pending; 00501 }
Generated on Mon Aug 29 2022 19:53:38 by
