Versie 0.2 Het versturen via de NRF werkt nog niet helemaal omdat er per 4 bytes verstuurd moet worden. Wordt gefixt d.m.v. dynamic stuff!
Dependencies: BufferedSerial
Dependents: rtos_basic rtos_basic
Fork of mbed-rtos by
Revision 103:5a85840ab54e, committed 2016-02-18
- Comitter:
- mbed_official
- Date:
- Thu Feb 18 09:45:27 2016 +0000
- Parent:
- 102:f62a48e9da94
- Child:
- 104:07314541bd12
- Commit message:
- Synchronized with git revision b57f7d56840134d072ca567460a86b77fb7adcf8
Full URL: https://github.com/mbedmicro/mbed/commit/b57f7d56840134d072ca567460a86b77fb7adcf8/
Support of export function to the IAR.
Changed in this revision
--- a/rtx/TARGET_CORTEX_A/RTX_CM_lib.h Fri Feb 12 07:30:12 2016 +0000 +++ b/rtx/TARGET_CORTEX_A/RTX_CM_lib.h Thu Feb 18 09:45:27 2016 +0000 @@ -76,6 +76,62 @@ OS_RESULT _os_mut_release (uint32_t p, OS_ID mutex) __svc_indirect(0); OS_RESULT _os_mut_wait (uint32_t p, OS_ID mutex, uint16_t timeout) __svc_indirect(0); +#elif defined (__ICCARM__) + +typedef void *OS_ID; +typedef uint32_t OS_TID; +typedef uint32_t OS_MUT[4]; +typedef uint32_t OS_RESULT; + +#define runtask_id() rt_tsk_self() +#define mutex_init(m) rt_mut_init(m) +#define mutex_del(m) os_mut_delete(m) +#define mutex_wait(m) os_mut_wait(m,0xFFFF) +#define mutex_rel(m) os_mut_release(m) + +extern OS_TID rt_tsk_self (void); +extern void rt_mut_init (OS_ID mutex); +extern OS_RESULT rt_mut_delete (OS_ID mutex); +extern OS_RESULT rt_mut_release (OS_ID mutex); +extern OS_RESULT rt_mut_wait (OS_ID mutex, uint16_t timeout); + +#pragma swi_number=0 +__swi OS_RESULT _os_mut_delete (OS_ID mutex); + +static inline OS_RESULT os_mut_delete(OS_ID mutex) +{ + __asm("mov r12,%0\n" :: "r"(&rt_mut_delete) : "r12" ); + return _os_mut_delete(mutex); +} + +#pragma swi_number=0 +__swi OS_RESULT _os_mut_release (OS_ID mutex); + +static inline OS_RESULT os_mut_release(OS_ID mutex) +{ + __asm("mov r12,%0\n" :: "r"(&rt_mut_release) : "r12" ); + return _os_mut_release(mutex); +} + +#pragma swi_number=0 +__swi OS_RESULT _os_mut_wait (OS_ID mutex, uint16_t timeout); + +static inline OS_RESULT os_mut_wait(OS_ID mutex, uint16_t timeout) +{ + __asm("mov r12,%0\n" :: "r"(&rt_mut_wait) : "r12" ); + return _os_mut_wait(mutex, timeout); +} + +#include <yvals.h> /* for include DLib_Thread.h */ + +void __iar_system_Mtxinit(__iar_Rmtx *); +void __iar_system_Mtxdst(__iar_Rmtx *); +void __iar_system_Mtxlock(__iar_Rmtx *); +void __iar_system_Mtxunlock(__iar_Rmtx *); + + + + #endif @@ -174,6 +230,14 @@ static OS_MUT std_libmutex[OS_MUTEXCNT]; static uint32_t nr_mutex; extern void *__libspace_start; +#elif defined (__ICCARM__) +typedef struct os_mut_array { + OS_MUT mutex; + uint32_t used; +} os_mut_array_t; + +static os_mut_array_t std_libmutex[OS_MUTEXCNT];/* must be Zero clear */ +static uint32_t nr_mutex = 0; #endif @@ -247,6 +311,82 @@ } } +#elif defined (__ICCARM__) + +/*--------------------------- __iar_system_Mtxinit --------------------------*/ + +void __iar_system_Mtxinit(__iar_Rmtx *mutex) +{ + /* Allocate and initialize a system mutex. */ + int32_t idx; + + for (idx = 0; idx < OS_MUTEXCNT; idx++) + { + if (std_libmutex[idx].used == 0) + { + std_libmutex[idx].used = 1; + *mutex = &std_libmutex[idx].mutex; + nr_mutex++; + break; + } + } + if (nr_mutex >= OS_MUTEXCNT) + { + /* If you are here, you need to increase the number OS_MUTEXCNT. */ + for (;;); + } + + mutex_init (*mutex); +} + +/*--------------------------- __iar_system_Mtxdst ---------------------------*/ + +void __iar_system_Mtxdst(__iar_Rmtx *mutex) +{ + /* Free a system mutex. */ + int32_t idx; + + if (nr_mutex == 0) + { + for (;;); + } + + idx = ((((uint32_t)mutex) - ((uint32_t)&std_libmutex[0].mutex)) + / sizeof(os_mut_array_t)); + + if (idx >= OS_MUTEXCNT) + { + for (;;); + } + + mutex_del (*mutex); + std_libmutex[idx].used = 0; +} + +/*--------------------------- __iar_system_Mtxlock --------------------------*/ + +void __iar_system_Mtxlock(__iar_Rmtx *mutex) +{ + /* Acquire a system mutex, lock stdlib resources. */ + if (runtask_id ()) + { + /* RTX running, acquire a mutex. */ + mutex_wait (*mutex); + } +} + +/*--------------------------- __iar_system_Mtxunlock ------------------------*/ + +void __iar_system_Mtxunlock(__iar_Rmtx *mutex) +{ + /* Release a system mutex, unlock stdlib resources. */ + if (runtask_id ()) + { + /* RTX running, release a mutex. */ + mutex_rel (*mutex); + } +} + #endif @@ -386,16 +526,11 @@ #elif defined (__ICCARM__) -extern int __low_level_init(void); -extern void __iar_data_init3(void); extern void exit(int arg); -__noreturn __stackless void __cmain(void) { +void mbed_main(void) { int a; - if (__low_level_init() != 0) { - __iar_data_init3(); - } osKernelInitialize(); osThreadCreate(&os_thread_def_main, NULL); a = osKernelStart();
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rtx/TARGET_CORTEX_A/TOOLCHAIN_IAR/HAL_CA9.c Thu Feb 18 09:45:27 2016 +0000 @@ -0,0 +1,46 @@ +/*---------------------------------------------------------------------------- + * RL-ARM - RTX + *---------------------------------------------------------------------------- + * Name: HAL_CA9.c + * Purpose: Hardware Abstraction Layer for Cortex-A9 + * Rev.: 23 March 2015 + *---------------------------------------------------------------------------- + * + * Copyright (c) 2012 - 2015 ARM Limited + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * - Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + *---------------------------------------------------------------------------*/ + +//unsigned char seen_id0_active = 0; // single byte to hold a flag used in the workaround for GIC errata 733075 + + +/*---------------------------------------------------------------------------- + * Functions + *---------------------------------------------------------------------------*/ + +/* Functions move to HAL_CA9_asm.S */ + +/*---------------------------------------------------------------------------- + * end of file + *---------------------------------------------------------------------------*/
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rtx/TARGET_CORTEX_A/TOOLCHAIN_IAR/HAL_CA9_asm.s Thu Feb 18 09:45:27 2016 +0000 @@ -0,0 +1,480 @@ +/*---------------------------------------------------------------------------- + * RL-ARM - RTX + *---------------------------------------------------------------------------- + * Name: HAL_CA9.c + * Purpose: Hardware Abstraction Layer for Cortex-A9 + * Rev.: 8 April 2015 + *---------------------------------------------------------------------------- + * + * Copyright (c) 2012 - 2015 ARM Limited + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * - Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + *---------------------------------------------------------------------------*/ + + PUBLIC rt_set_PSP + PUBLIC rt_get_PSP + PUBLIC _alloc_box + PUBLIC _free_box + PUBLIC SWI_Handler + PUBLIC PendSV_Handler + PUBLIC OS_Tick_Handler + +/* macro defines form rt_HAL_CA.h */ +#define CPSR_T_BIT 0x20 +#define CPSR_I_BIT 0x80 +#define CPSR_F_BIT 0x40 + +#define MODE_USR 0x10 +#define MODE_FIQ 0x11 +#define MODE_IRQ 0x12 +#define MODE_SVC 0x13 +#define MODE_ABT 0x17 +#define MODE_UND 0x1B +#define MODE_SYS 0x1F + +/* macro defines form rt_TypeDef.h */ +#define TCB_TID 3 /* 'task id' offset */ +#define TCB_STACKF 37 /* 'stack_frame' offset */ +#ifndef __LARGE_PRIV_STACK +#define TCB_TSTACK 40 /* 'tsk_stack' offset */ +#else +#define TCB_TSTACK 44 /* 'tsk_stack' offset for LARGE_STACK */ +#endif + + + IMPORT rt_alloc_box + IMPORT rt_free_box + IMPORT os_tsk + IMPORT GICInterface_BASE + IMPORT rt_pop_req + IMPORT os_tick_irqack + IMPORT rt_systick + + SECTION `.text`:CODE:ROOT(2) + +/*---------------------------------------------------------------------------- + * Functions + *---------------------------------------------------------------------------*/ + +//For A-class, set USR/SYS stack +//__asm void rt_set_PSP (U32 stack) { +rt_set_PSP: + ARM + + MRS R1, CPSR + CPS #MODE_SYS ;no effect in USR mode + ISB + MOV SP, R0 + MSR CPSR_c, R1 ;no effect in USR mode + ISB + BX LR + +//} + +//For A-class, get USR/SYS stack +//__asm U32 rt_get_PSP (void) { +rt_get_PSP: + ARM + + MRS R1, CPSR + CPS #MODE_SYS ;no effect in USR mode + ISB + MOV R0, SP + MSR CPSR_c, R1 ;no effect in USR mode + ISB + BX LR +//} + +/*--------------------------- _alloc_box ------------------------------------*/ +//__asm void *_alloc_box (void *box_mem) { +_alloc_box: + /* Function wrapper for Unprivileged/Privileged mode. */ + ARM + + LDR R12,=(rt_alloc_box) + MRS R2, CPSR + LSLS R2, R2,#28 + BXNE R12 + SVC 0 + BX LR +//} + + +/*--------------------------- _free_box -------------------------------------*/ +//__asm int _free_box (void *box_mem, void *box) { +_free_box: + /* Function wrapper for Unprivileged/Privileged mode. */ + + LDR R12,=(rt_free_box) + MRS R2, CPSR + LSLS R2, R2,#28 + BXNE R12 + SVC 0 + BX LR + +//} + +/*-------------------------- SWI_Handler -----------------------------------*/ + +//#pragma push +//#pragma arm +//__asm void SWI_Handler (void) { +SWI_Handler: + PRESERVE8 + ARM + + IMPORT rt_tsk_lock + IMPORT rt_tsk_unlock + IMPORT SVC_Count + IMPORT SVC_Table + IMPORT rt_stk_check + IMPORT FPUEnable + IMPORT scheduler_suspended ; flag set by rt_suspend, cleared by rt_resume, read by SWI_Handler + +Mode_SVC EQU 0x13 + + SRSDB #Mode_SVC! ; Push LR_SVC and SPRS_SVC onto SVC mode stack + STR R4,[SP,#-0x4]! ; Push R4 so we can use it as a temp + + MRS R4,SPSR ; Get SPSR + TST R4,#CPSR_T_BIT ; Check Thumb Bit + LDRNEH R4,[LR,#-2] ; Thumb: Load Halfword + BICNE R4,R4,#0xFF00 ; Extract SVC Number + LDREQ R4,[LR,#-4] ; ARM: Load Word + BICEQ R4,R4,#0xFF000000 ; Extract SVC Number + + /* Lock out systick and re-enable interrupts */ + STMDB SP!,{R0-R3,R12,LR} + + AND R12, SP, #4 ; Ensure stack is 8-byte aligned + SUB SP, SP, R12 ; Adjust stack + STMDB SP!,{R12, LR} ; Store stack adjustment and dummy LR to SVC stack + + BLX rt_tsk_lock + CPSIE i + + LDMIA SP!,{R12,LR} ; Get stack adjustment & discard dummy LR + ADD SP, SP, R12 ; Unadjust stack + + LDMIA SP!,{R0-R3,R12,LR} + + CMP R4,#0 + BNE SVC_User + + MRS R4,SPSR + STR R4,[SP,#-0x4]! ; Push R4 so we can use it as a temp + AND R4, SP, #4 ; Ensure stack is 8-byte aligned + SUB SP, SP, R4 ; Adjust stack + STMDB SP!,{R4, LR} ; Store stack adjustment and dummy LR + BLX R12 + LDMIA SP!,{R4, LR} ; Get stack adjustment & discard dummy LR + ADD SP, SP, R4 ; Unadjust stack + LDR R4,[SP],#0x4 ; Restore R4 + MSR SPSR_CXSF,R4 + + /* Here we will be in SVC mode (even if coming in from PendSV_Handler or OS_Tick_Handler) */ +Sys_Switch: + LDR LR,=(os_tsk) + LDMIA LR,{R4,LR} ; os_tsk.run, os_tsk.new + CMP R4,LR + BNE switching + + STMDB SP!,{R0-R3,R12,LR} + + AND R12, SP, #4 ; Ensure stack is 8-byte aligned + SUB SP, SP, R12 ; Adjust stack + STMDB SP!,{R12,LR} ; Store stack adjustment and dummy LR to SVC stack + + CPSID i + ; Do not unlock scheduler if it has just been suspended by rt_suspend() + LDR R1,=scheduler_suspended + LDRB R0, [R1] + CMP R0, #1 + BEQ dont_unlock + BLX rt_tsk_unlock +dont_unlock: + + LDMIA SP!,{R12,LR} ; Get stack adjustment & discard dummy LR + ADD SP, SP, R12 ; Unadjust stack + + LDMIA SP!,{R0-R3,R12,LR} + LDR R4,[SP],#0x4 + RFEFD SP! ; Return from exception, no task switch + +switching: + CLREX + CMP R4,#0 + ADDEQ SP,SP,#12 ; Original R4, LR & SPSR do not need to be popped when we are paging in a different task + BEQ SVC_Next ; Runtask deleted? + + + STMDB SP!,{R8-R11} //R4 and LR already stacked + MOV R10,R4 ; Preserve os_tsk.run + MOV R11,LR ; Preserve os_tsk.new + + ADD R8,SP,#16 ; Unstack R4,LR + LDMIA R8,{R4,LR} + + SUB SP,SP,#4 ; Make space on the stack for the next instn + STMIA SP,{SP}^ ; Put User SP onto stack + LDR R8,[SP],#0x4 ; Pop User SP into R8 + + MRS R9,SPSR + STMDB R8!,{R9} ; User CPSR + STMDB R8!,{LR} ; User PC + STMDB R8,{LR}^ ; User LR + SUB R8,R8,#4 ; No writeback for store of User LR + STMDB R8!,{R0-R3,R12} ; User R0-R3,R12 + MOV R3,R10 ; os_tsk.run + MOV LR,R11 ; os_tsk.new + LDMIA SP!,{R9-R12} + ADD SP,SP,#12 ; Fix up SP for unstack of R4, LR & SPSR + STMDB R8!,{R4-R7,R9-R12} ; User R4-R11 + + //If applicable, stack VFP/NEON state + MRC p15,0,R1,c1,c0,2 ; VFP/NEON access enabled? (CPACR) + AND R2,R1,#0x00F00000 + CMP R2,#0x00F00000 + BNE no_outgoing_vfp + VMRS R2,FPSCR + STMDB R8!,{R2,R4} ; Push FPSCR, maintain 8-byte alignment + //IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32 + VSTMDB R8!,{D0-D15} + VSTMDB R8!,{D16-D31} + LDRB R2,[R3,#TCB_STACKF] ; Record in TCB that NEON/D32 state is stacked + ORR R2,R2,#4 + STRB R2,[R3,#TCB_STACKF] + //ENDIF + +no_outgoing_vfp: + STR R8,[R3,#TCB_TSTACK] + MOV R4,LR + + STR R4,[SP,#-0x4]! ; Push R4 so we can use it as a temp + AND R4, SP, #4 ; Ensure stack is 8-byte aligned + SUB SP, SP, R4 ; Adjust stack + STMDB SP!,{R4, LR} ; Store stack adjustment and dummy LR to SVC stack + + BLX rt_stk_check + + LDMIA SP!,{R4, LR} ; Get stack adjustment & discard dummy LR + ADD SP, SP, R4 ; Unadjust stack + LDR R4,[SP],#0x4 ; Restore R4 + + MOV LR,R4 + +SVC_Next: //R4 == os_tsk.run, LR == os_tsk.new, R0-R3, R5-R12 corruptible + LDR R1,=(os_tsk) ; os_tsk.run = os_tsk.new + STR LR,[R1] + LDRB R1,[LR,#TCB_TID] ; os_tsk.run->task_id + LSL R1,R1,#8 ; Store PROCID + MCR p15,0,R1,c13,c0,1 ; Write CONTEXTIDR + + LDR R0,[LR,#TCB_TSTACK] ; os_tsk.run->tsk_stack + + //Does incoming task have VFP/NEON state in stack? + LDRB R3,[LR,#TCB_STACKF] + ANDS R3, R3, #0x6 + MRC p15,0,R1,c1,c0,2 ; Read CPACR + BICEQ R1,R1,#0x00F00000 ; Disable VFP/NEON access if incoming task does not have stacked VFP/NEON state + ORRNE R1,R1,#0x00F00000 ; Enable VFP/NEON access if incoming task does have stacked VFP/NEON state + MCR p15,0,R1,c1,c0,2 ; Write CPACR + BEQ no_incoming_vfp + ISB ; We only need the sync if we enabled, otherwise we will context switch before next VFP/NEON instruction anyway + //IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32 + VLDMIA R0!,{D16-D31} + //ENDIF + VLDMIA R0!,{D0-D15} + LDR R2,[R0] + VMSR FPSCR,R2 + ADD R0,R0,#8 + +no_incoming_vfp: + LDR R1,[R0,#60] ; Restore User CPSR + MSR SPSR_CXSF,R1 + LDMIA R0!,{R4-R11} ; Restore User R4-R11 + ADD R0,R0,#4 ; Restore User R1-R3,R12 + LDMIA R0!,{R1-R3,R12} + LDMIA R0,{LR}^ ; Restore User LR + ADD R0,R0,#4 ; No writeback for load to user LR + LDMIA R0!,{LR} ; Restore User PC + ADD R0,R0,#4 ; Correct User SP for unstacked user CPSR + + STR R0,[SP,#-0x4]! ; Push R0 onto stack + LDMIA SP,{SP}^ ; Get R0 off stack into User SP + ADD SP,SP,#4 ; Put SP back + + LDR R0,[R0,#-32] ; Restore R0 + + STMDB SP!,{R0-R3,R12,LR} + + AND R12, SP, #4 ; Ensure stack is 8-byte aligned + SUB SP, SP, R12 ; Adjust stack + STMDB sp!,{R12, LR} ; Store stack adjustment and dummy LR to SVC stack + + CPSID i + BLX rt_tsk_unlock + + LDMIA sp!,{R12, LR} ; Get stack adjustment & discard dummy LR + ADD SP, SP, R12 ; Unadjust stack + + LDMIA SP!,{R0-R3,R12,LR} + + MOVS PC,LR ; Return from exception + + + /*------------------- User SVC -------------------------------*/ + +SVC_User: + LDR R12,=SVC_Count + LDR R12,[R12] + CMP R4,R12 ; Check for overflow + BHI SVC_Done + + LDR R12,=SVC_Table-4 + LDR R12,[R12,R4,LSL #2] ; Load SVC Function Address + MRS R4,SPSR ; Save SPSR + STR R4,[SP,#-0x4]! ; Push R4 so we can use it as a temp + AND R4, SP, #4 ; Ensure stack is 8-byte aligned + SUB SP, SP, R4 ; Adjust stack + STMDB SP!,{R4, LR} ; Store stack adjustment and dummy LR + BLX R12 ; Call SVC Function + LDMIA SP!,{R4, LR} ; Get stack adjustment & discard dummy LR + ADD SP, SP, R4 ; Unadjust stack + LDR R4,[SP],#0x4 ; Restore R4 + MSR SPSR_CXSF,R4 ; Restore SPSR + +SVC_Done: + STMDB sp!,{R0-R3,R12,LR} + + STR R4,[sp,#-0x4]! ; Push R4 so we can use it as a temp + AND R4, SP, #4 ; Ensure stack is 8-byte aligned + SUB SP, SP, R4 ; Adjust stack + STMDB SP!,{R4, LR} ; Store stack adjustment and dummy LR + + CPSID i + BLX rt_tsk_unlock + + LDMIA SP!,{R4, LR} ; Get stack adjustment & discard dummy LR + ADD SP, SP, R4 ; Unadjust stack + LDR R4,[SP],#0x4 ; Restore R4 + + LDMIA SP!,{R0-R3,R12,LR} + LDR R4,[SP],#0x4 + RFEFD SP! ; Return from exception +//} +//#pragma pop + +//#pragma push +//#pragma arm +//__asm void PendSV_Handler (U32 IRQn) { +PendSV_Handler: + ARM + + IMPORT rt_tsk_lock + IMPORT IRQNestLevel ; Flag indicates whether inside an ISR, and the depth of nesting. 0 = not in ISR. + IMPORT seen_id0_active ; Flag used to workaround GIC 390 errata 733075 - set in startup_Renesas_RZ_A1.s + + ADD SP,SP,#8 //fix up stack pointer (R0 has been pushed and will never be popped, R1 was pushed for stack alignment) + + //Disable systick interrupts, then write EOIR. We want interrupts disabled before we enter the context switcher. + STMDB SP!,{R0, R1} + BLX rt_tsk_lock + LDMIA SP!,{R0, R1} + LDR R1,=(GICInterface_BASE) + LDR R1, [R1, #0] + STR R0, [R1, #0x10] + + ; If it was interrupt ID0, clear the seen flag, otherwise return as normal + CMP R0, #0 + LDREQ R1, =seen_id0_active + STRBEQ R0, [R1] ; Clear the seen flag, using R0 (which is 0), to save loading another register + + LDR R0, =IRQNestLevel ; Get address of nesting counter + LDR R1, [R0] + SUB R1, R1, #1 ; Decrement nesting counter + STR R1, [R0] + + BLX (rt_pop_req) + + LDMIA SP!,{R1, LR} ; Get stack adjustment & discard dummy LR + ADD SP, SP, R1 ; Unadjust stack + + LDR R0,[SP,#24] + MSR SPSR_CXSF,R0 + LDMIA SP!,{R0-R3,R12} ; Leave SPSR & LR on the stack + STR R4,[SP,#-0x4]! + B Sys_Switch +//} +//#pragma pop + + +//#pragma push +//#pragma arm +//__asm void OS_Tick_Handler (U32 IRQn) { +OS_Tick_Handler: + ARM + + IMPORT rt_tsk_lock + IMPORT IRQNestLevel ; Flag indicates whether inside an ISR, and the depth of nesting. 0 = not in ISR. + IMPORT seen_id0_active ; Flag used to workaround GIC 390 errata 733075 - set in startup_Renesas_RZ_A1.s + + ADD SP,SP,#8 //fix up stack pointer (R0 has been pushed and will never be popped, R1 was pushed for stack alignment) + + STMDB SP!,{R0, R1} + BLX rt_tsk_lock + LDMIA SP!,{R0, R1} + LDR R1, =(GICInterface_BASE) + LDR R1, [R1, #0] + STR R0, [R1, #0x10] + + ; If it was interrupt ID0, clear the seen flag, otherwise return as normal + CMP R0, #0 + LDREQ R1, =seen_id0_active + STRBEQ R0, [R1] ; Clear the seen flag, using R0 (which is 0), to save loading another register + + LDR R0, =IRQNestLevel ; Get address of nesting counter + LDR R1, [R0] + SUB R1, R1, #1 ; Decrement nesting counter + STR R1, [R0] + + BLX (os_tick_irqack) + BLX (rt_systick) + + LDMIA SP!,{R1, LR} ; Get stack adjustment & discard dummy LR + ADD SP, SP, R1 ; Unadjust stack + + LDR R0,[SP,#24] + MSR SPSR_CXSF,R0 + LDMIA SP!,{R0-R3,R12} ; Leave SPSR & LR on the stack + STR R4,[SP,#-0x4]! + B Sys_Switch +//} +//#pragma pop + + + END +/*---------------------------------------------------------------------------- + * end of file + *---------------------------------------------------------------------------*/
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rtx/TARGET_CORTEX_A/TOOLCHAIN_IAR/SVC_Table.S Thu Feb 18 09:45:27 2016 +0000 @@ -0,0 +1,57 @@ +;/*---------------------------------------------------------------------------- +; * RL-ARM - RTX +; *---------------------------------------------------------------------------- +; * Name: SVC_TABLE.S +; * Purpose: Pre-defined SVC Table for Cortex-M +; * Rev.: V4.70 +; *---------------------------------------------------------------------------- +; * +; * Copyright (c) 1999-2009 KEIL, 2009-2013 ARM Germany GmbH +; * All rights reserved. +; * Redistribution and use in source and binary forms, with or without +; * modification, are permitted provided that the following conditions are met: +; * - Redistributions of source code must retain the above copyright +; * notice, this list of conditions and the following disclaimer. +; * - Redistributions in binary form must reproduce the above copyright +; * notice, this list of conditions and the following disclaimer in the +; * documentation and/or other materials provided with the distribution. +; * - Neither the name of ARM nor the names of its contributors may be used +; * to endorse or promote products derived from this software without +; * specific prior written permission. +; * +; * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; * ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE +; * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; * POSSIBILITY OF SUCH DAMAGE. +; *---------------------------------------------------------------------------*/ + + + SECTION SVC_TABLE:CODE:ROOT(2) + + EXPORT SVC_Count + +SVC_Cnt EQU (SVC_End-SVC_Table)/4 +SVC_Count DCD SVC_Cnt + +; Import user SVC functions here. +; IMPORT __SVC_1 + + EXPORT SVC_Table +SVC_Table +; Insert user SVC functions here. SVC 0 used by RTL Kernel. +; DCD __SVC_1 ; InitMemorySubsystem + +SVC_End + + END + +/*---------------------------------------------------------------------------- + * end of file + *---------------------------------------------------------------------------*/
--- a/rtx/TARGET_CORTEX_A/cmsis_os.h Fri Feb 12 07:30:12 2016 +0000 +++ b/rtx/TARGET_CORTEX_A/cmsis_os.h Thu Feb 18 09:45:27 2016 +0000 @@ -143,7 +143,7 @@ #define CMSIS_OS_RTX_CA /* new define for Coretex-A */ // The stack space occupied is mainly dependent on the underling C standard library -#if defined(TOOLCHAIN_GCC) || defined(TOOLCHAIN_ARM_STD) +#if defined(TOOLCHAIN_GCC) || defined(TOOLCHAIN_ARM_STD) || defined(TOOLCHAIN_IAR) # define WORDS_STACK_SIZE 512 #elif defined(TOOLCHAIN_ARM_MICRO) # define WORDS_STACK_SIZE 128
--- a/rtx/TARGET_CORTEX_A/rt_HAL_CA.h Fri Feb 12 07:30:12 2016 +0000 +++ b/rtx/TARGET_CORTEX_A/rt_HAL_CA.h Thu Feb 18 09:45:27 2016 +0000 @@ -75,8 +75,6 @@ #elif defined (__ICCARM__) /* IAR Compiler */ -#error IAR Compiler support not implemented for Cortex-A - #endif static U8 priority = 0xff; @@ -99,6 +97,15 @@ #define SGI_PENDSV_BIT ((U32)(1 << (SGI_PENDSV & 0xf))) //Increase priority filter to prevent timer and PendSV interrupts signaling. Guarantees that interrupts will not be forwarded. +#if defined (__ICCARM__) +#define OS_LOCK() int irq_dis = __disable_irq_iar();\ + priority = GICI_ICCPMR; \ + GICI_ICCPMR = 0xff; \ + GICI_ICCPMR = GICI_ICCPMR - 1; \ + __DSB();\ + if(!irq_dis) __enable_irq(); \ + +#else #define OS_LOCK() int irq_dis = __disable_irq();\ priority = GICI_ICCPMR; \ GICI_ICCPMR = 0xff; \ @@ -106,6 +113,8 @@ __DSB();\ if(!irq_dis) __enable_irq(); \ +#endif + //Restore priority filter. Re-enable timer and PendSV signaling #define OS_UNLOCK() __DSB(); \ GICI_ICCPMR = priority; \ @@ -134,9 +143,14 @@ #define rt_inc(p) while(__strex((__ldrex(p)+1),p)) #define rt_dec(p) while(__strex((__ldrex(p)-1),p)) #else +#if defined (__ICCARM__) + #define rt_inc(p) { int irq_dis = __disable_irq_iar();(*p)++;if(!irq_dis) __enable_irq(); } + #define rt_dec(p) { int irq_dis = __disable_irq_iar();(*p)--;if(!irq_dis) __enable_irq(); } +#else #define rt_inc(p) { int irq_dis = __disable_irq();(*p)++;if(!irq_dis) __enable_irq(); } #define rt_dec(p) { int irq_dis = __disable_irq();(*p)--;if(!irq_dis) __enable_irq(); } -#endif +#endif /* __ICCARM__ */ +#endif /* __USE_EXCLUSIVE_ACCESS */ __inline static U32 rt_inc_qi (U32 size, U8 *count, U8 *first) { U32 cnt,c2; @@ -152,7 +166,11 @@ } while (__strex(c2, first)); #else int irq_dis; + #if defined (__ICCARM__) + irq_dis = __disable_irq_iar(); + #else irq_dis = __disable_irq(); + #endif /* __ICCARM__ */ if ((cnt = *count) < size) { *count = cnt+1; c2 = (cnt = *first) + 1;
--- a/rtx/TARGET_CORTEX_A/rt_MemBox.c Fri Feb 12 07:30:12 2016 +0000 +++ b/rtx/TARGET_CORTEX_A/rt_MemBox.c Thu Feb 18 09:45:27 2016 +0000 @@ -101,7 +101,12 @@ #ifndef __USE_EXCLUSIVE_ACCESS int irq_dis; + +#if defined (__ICCARM__) + irq_dis = __disable_irq_iar(); +#else irq_dis = __disable_irq (); +#endif /* __ICCARM__ */ free = ((P_BM) box_mem)->free; if (free) { ((P_BM) box_mem)->free = *free; @@ -152,7 +157,11 @@ } #ifndef __USE_EXCLUSIVE_ACCESS +#if defined (__ICCARM__) + irq_dis = __disable_irq_iar(); +#else irq_dis = __disable_irq (); +#endif /* __ICCARM__ */ *((void **)box) = ((P_BM) box_mem)->free; ((P_BM) box_mem)->free = box; if (!irq_dis) __enable_irq ();
--- a/rtx/TARGET_CORTEX_A/rt_TypeDef.h Fri Feb 12 07:30:12 2016 +0000 +++ b/rtx/TARGET_CORTEX_A/rt_TypeDef.h Thu Feb 18 09:45:27 2016 +0000 @@ -69,8 +69,17 @@ /* Hardware dependant part: specific for Cortex processor */ U8 stack_frame; /* Stack frame: 0x0 Basic, 0x1 Extended, 0x2 VFP/D16 stacked, 0x4 NEON/D32 stacked */ +#if defined (__ICCARM__) +#ifndef __LARGE_PRIV_STACK + U16 priv_stack; /* Private stack size, 0= system assigned */ +#else U16 reserved; /* Reserved (padding) */ U32 priv_stack; /* Private stack size for LARGE_STACK, 0= system assigned */ +#endif /* __LARGE_PRIV_STACK */ +#else + U16 reserved; /* Reserved (padding) */ + U32 priv_stack; /* Private stack size for LARGE_STACK, 0= system assigned */ +#endif U32 tsk_stack; /* Current task Stack pointer (R13) */ U32 *stack; /* Pointer to Task Stack memory block */ @@ -79,7 +88,15 @@ } *P_TCB; #define TCB_TID 3 /* 'task id' offset */ #define TCB_STACKF 37 /* 'stack_frame' offset */ +#if defined (__ICCARM__) +#ifndef __LARGE_PRIV_STACK +#define TCB_TSTACK 40 /* 'tsk_stack' offset */ +#else #define TCB_TSTACK 44 /* 'tsk_stack' offset for LARGE_STACK */ +#endif /* __LARGE_PRIV_STACK */ +#else +#define TCB_TSTACK 44 /* 'tsk_stack' offset for LARGE_STACK */ +#endif typedef struct OS_PSFE { /* Post Service Fifo Entry */ void *id; /* Object Identification */