Important changes to repositories hosted on mbed.com
Mbed hosted mercurial repositories are deprecated and are due to be permanently deleted in July 2026.
To keep a copy of this software download the repository Zip archive or clone locally using Mercurial.
It is also possible to export all your personal repositories from the account settings page.
Fork of mbed-dev by
Revision 66:fdb3f9f9a72f, committed 2016-02-18
- Comitter:
- mbed_official
- Date:
- Thu Feb 18 09:45:10 2016 +0000
- Parent:
- 65:60c7569a3925
- Child:
- 67:4bcbbb9fcddf
- Commit message:
- Synchronized with git revision b57f7d56840134d072ca567460a86b77fb7adcf8
Full URL: https://github.com/mbedmicro/mbed/commit/b57f7d56840134d072ca567460a86b77fb7adcf8/
Support of export function to the IAR.
Changed in this revision
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/targets/cmsis/TARGET_RENESAS/TARGET_RZ_A1H/TOOLCHAIN_IAR/MBRZA1H.icf Thu Feb 18 09:45:10 2016 +0000
@@ -0,0 +1,62 @@
+/*###ICF### Section handled by ICF editor, don't touch! ****/
+/*-Editor annotation file-*/
+/* IcfEditorFile="$TOOLKIT_DIR$\config\ide\IcfEditor\a_v1_0.xml" */
+/*-Specials-*/
+define symbol __ICFEDIT_intvec_start__ = 0x18004000;
+/*-Memory Regions-*/
+define symbol __ICFEDIT_region_ROM_start__ = 0x18000000;
+define symbol __ICFEDIT_region_ROM_end__ = 0x187FFFFF;
+define symbol __ICFEDIT_region_TTB_start__ = 0x20000000;
+define symbol __ICFEDIT_region_TTB_end__ = 0x2001FFFF;
+define symbol __ICFEDIT_region_RAM_start__ = 0x20020000;
+define symbol __ICFEDIT_region_RAM_end__ = 0x209FFFFF;
+
+/*-Sizes-*/
+define symbol __ICFEDIT_size_cstack__ = 0x00004000;
+define symbol __ICFEDIT_size_svcstack__ = 0x00008000;
+define symbol __ICFEDIT_size_irqstack__ = 0x00008000;
+define symbol __ICFEDIT_size_fiqstack__ = 0x00000100;
+define symbol __ICFEDIT_size_undstack__ = 0x00000100;
+define symbol __ICFEDIT_size_abtstack__ = 0x00000100;
+define symbol __ICFEDIT_size_heap__ = 0x00080000;
+/**** End of ICF editor section. ###ICF###*/
+
+define symbol __ICFEDIT_region_RetRAM_start__ = 0x20000000;
+define symbol __ICFEDIT_region_RetRAM_end__ = 0x2001FFFF;
+
+define symbol __ICFEDIT_region_MirrorRAM_start__ = 0x60900000;
+define symbol __ICFEDIT_region_MirrorRAM_end__ = 0x609FFFFF;
+
+define symbol __ICFEDIT_region_MirrorRetRAM_start__ = 0x60000000;
+define symbol __ICFEDIT_region_MirrorRetRAM_end__ = 0x6001FFFF;
+
+define memory mem with size = 4G;
+
+define region ROM_region = mem:[from __ICFEDIT_region_ROM_start__ to __ICFEDIT_region_ROM_end__];
+define region RAM_region = mem:[from __ICFEDIT_region_RAM_start__ to __ICFEDIT_region_RAM_end__];
+define region RetRAM_region = mem:[from __ICFEDIT_region_RetRAM_start__ to __ICFEDIT_region_RetRAM_end__];
+define region MirrorRAM_region = mem:[from __ICFEDIT_region_MirrorRAM_start__ to __ICFEDIT_region_MirrorRAM_end__];
+define region MirrorRetRAM_region = mem:[from __ICFEDIT_region_MirrorRetRAM_start__ to __ICFEDIT_region_MirrorRetRAM_end__];
+
+define block CSTACK with alignment = 8, size = __ICFEDIT_size_cstack__ { };
+define block SVC_STACK with alignment = 8, size = __ICFEDIT_size_svcstack__ { };
+define block IRQ_STACK with alignment = 8, size = __ICFEDIT_size_irqstack__ { };
+define block FIQ_STACK with alignment = 8, size = __ICFEDIT_size_fiqstack__ { };
+define block UND_STACK with alignment = 8, size = __ICFEDIT_size_undstack__ { };
+define block ABT_STACK with alignment = 8, size = __ICFEDIT_size_abtstack__ { };
+define block HEAP with alignment = 8, size = __ICFEDIT_size_heap__ { };
+
+initialize by copy { readwrite };
+do not initialize { section .noinit };
+do not initialize { section MMU_TT };
+
+place at address mem:__ICFEDIT_intvec_start__ { readonly section .intvec };
+
+place in ROM_region { readonly };
+place in RAM_region { readwrite,
+ block CSTACK, block SVC_STACK, block IRQ_STACK, block FIQ_STACK,
+ block UND_STACK, block ABT_STACK, block HEAP };
+
+place in RetRAM_region { section .retram };
+place in MirrorRAM_region { section .mirrorram };
+place in MirrorRetRAM_region { section .mirrorretram };
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/targets/cmsis/TARGET_RENESAS/TARGET_RZ_A1H/TOOLCHAIN_IAR/startup_RZA1H.s Thu Feb 18 09:45:10 2016 +0000
@@ -0,0 +1,505 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Part one of the system initialization code,
+;; contains low-level
+;; initialization.
+;;
+;; Copyright 2007 IAR Systems. All rights reserved.
+;;
+;; $Revision: 49919 $
+;;
+
+ MODULE ?cstartup
+
+ ;; Forward declaration of sections.
+ SECTION SVC_STACK:DATA:NOROOT(3)
+ SECTION IRQ_STACK:DATA:NOROOT(3)
+ SECTION ABT_STACK:DATA:NOROOT(3)
+ SECTION FIQ_STACK:DATA:NOROOT(3)
+ SECTION UND_STACK:DATA:NOROOT(3)
+ SECTION CSTACK:DATA:NOROOT(3)
+
+;
+; The module in this file are included in the libraries, and may be
+; replaced by any user-defined modules that define the PUBLIC symbol
+; __iar_program_start or a user defined start symbol.
+;
+; To override the cstartup defined in the library, simply add your
+; modified version to the workbench project.
+
+ SECTION .intvec:CODE:NOROOT(2)
+
+ PUBLIC __vector
+ PUBLIC __iar_program_start
+ PUBLIC Undefined_Handler
+ EXTERN SWI_Handler
+ PUBLIC Prefetch_Handler
+ PUBLIC Abort_Handler
+ PUBLIC IRQ_Handler
+ PUBLIC FIQ_Handler
+ EXTERN VbarInit
+ EXTERN SetLowVectors
+ EXTERN init_TTB
+ EXTERN enable_mmu
+ EXTERN Peripheral_BasicInit
+ EXTERN initsct
+ EXTERN PowerON_Reset
+ PUBLIC FPUEnable
+
+
+ DATA
+
+__iar_init$$done: ; The vector table is not needed
+ ; until after copy initialization is done
+
+__vector: ; Make this a DATA label, so that stack usage
+ ; analysis doesn't consider it an uncalled fun
+
+ ARM
+
+ ; All default exception handlers (except reset) are
+ ; defined as weak symbol definitions.
+ ; If a handler is defined by the application it will take precedence.
+ LDR PC,Reset_Addr ; Reset
+ LDR PC,Undefined_Addr ; Undefined instructions
+ LDR PC,SWI_Addr ; Software interrupt (SWI/SVC)
+ LDR PC,Prefetch_Addr ; Prefetch abort
+ LDR PC,Abort_Addr ; Data abort
+ DCD 0 ; RESERVED
+ LDR PC,IRQ_Addr ; IRQ
+ LDR PC,FIQ_Addr ; FIQ
+
+ DATA
+
+Reset_Addr: DCD __iar_program_start
+Undefined_Addr: DCD Undefined_Handler
+SWI_Addr: DCD SWI_Handler
+Prefetch_Addr: DCD Prefetch_Handler
+Abort_Addr: DCD Abort_Handler
+IRQ_Addr: DCD IRQ_Handler
+FIQ_Addr: DCD FIQ_Handler
+
+
+; --------------------------------------------------
+; ?cstartup -- low-level system initialization code.
+;
+; After a reset execution starts here, the mode is ARM, supervisor
+; with interrupts disabled.
+;
+
+
+
+ SECTION .text:CODE:NOROOT(2)
+ EXTERN RZ_A1_SetSramWriteEnable
+ EXTERN create_translation_table
+ EXTERN SystemInit
+ EXTERN InitMemorySubsystem
+ EXTERN __cmain
+ REQUIRE __vector
+ EXTWEAK __iar_init_core
+ EXTWEAK __iar_init_vfp
+
+
+ ARM
+
+__iar_program_start:
+?cstartup:
+
+
+;;; @ Put any cores other than 0 to sleep
+ mrc p15, 0, r0, c0, c0, 5 ;;; @ Read MPIDR
+ ands r0, r0, #3
+
+goToSleep:
+ wfine
+ bne goToSleep
+
+
+//@ Enable access to NEON/VFP by enabling access to Coprocessors 10 and 11.
+//@ Enables Full Access i.e. in both privileged and non privileged modes
+ mrc p15, 0, r0, c1, c0, 2 ;@ Read Coprocessor Access Control Register (CPACR)
+ orr r0, r0, #(0xF << 20) ;@ Enable access to CP 10 & 11
+ mcr p15, 0, r0, c1, c0, 2 ;@ Write Coprocessor Access Control Register (CPACR)
+ isb
+
+
+;; Switch on the VFP and NEON hardware
+ mov r0, #0x40000000
+ vmsr fpexc, r0 ;@ Write FPEXC register, EN bit set
+
+ mrc p15, 0, r0, c1, c0, 0 ;@ Read CP15 System Control register
+ bic r0, r0, #(0x1 << 12) ;@ Clear I bit 12 to disable I Cache
+ bic r0, r0, #(0x1 << 2) ;@ Clear C bit 2 to disable D Cache
+ bic r0, r0, #0x1 ;@ Clear M bit 0 to disable MMU
+ bic r0, r0, #(0x1 << 11) ;@ Clear Z bit 11 to disable branch prediction
+ bic r0, r0, #(0x1 << 13) ;@ Clear V bit 13 to disable hivecs
+ mcr p15, 0, r0, c1, c0, 0 ;@ Write value back to CP15 System Control register
+ isb
+
+
+;; Set Vector Base Address Register (VBAR) to point to this application's vector table
+ ldr r0, =__vector
+ mcr p15, 0, r0, c12, c0, 0
+
+
+;
+; Add initialization needed before setup of stackpointers here.
+;
+
+;
+; Initialize the stack pointers.
+; The pattern below can be used for any of the exception stacks:
+; FIQ, IRQ, SVC, ABT, UND, SYS.
+; The USR mode uses the same stack as SYS.
+; The stack segments must be defined in the linker command file,
+; and be declared above.
+;
+
+
+; --------------------
+; Mode, correspords to bits 0-5 in CPSR
+
+#define MODE_MSK 0x1F ; Bit mask for mode bits in CPSR
+
+#define USR_MODE 0x10 ; User mode
+#define FIQ_MODE 0x11 ; Fast Interrupt Request mode
+#define IRQ_MODE 0x12 ; Interrupt Request mode
+#define SVC_MODE 0x13 ; Supervisor mode
+#define ABT_MODE 0x17 ; Abort mode
+#define UND_MODE 0x1B ; Undefined Instruction mode
+#define SYS_MODE 0x1F ; System mode
+
+#define Mode_SVC 0x13
+#define Mode_ABT 0x17
+#define Mode_UND 0x1B
+#define GICI_BASE 0xe8202000
+#define ICCIAR_OFFSET 0x0000000C
+#define ICCEOIR_OFFSET 0x00000010
+#define ICCHPIR_OFFSET 0x00000018
+#define GICD_BASE 0xe8201000
+#define GIC_ERRATA_CHECK_1 0x000003FE
+#define GIC_ERRATA_CHECK_2 0x000003FF
+#define ICDABR0_OFFSET 0x00000300
+#define ICDIPR0_OFFSET 0x00000400
+#define T_Bit 0x20 ; when T bit is set, core is in Thumb state
+
+ MRS r0, cpsr ; Original PSR value
+
+ ;; Set up the SVC stack pointer.
+ BIC r0, r0, #MODE_MSK ; Clear the mode bits
+ ORR r0, r0, #SVC_MODE ; Set SVC mode bits
+ MSR cpsr_c, r0 ; Change the mode
+ LDR sp, =SFE(SVC_STACK) ; End of SVC_STACK
+ BIC sp,sp,#0x7 ; Make sure SP is 8 aligned
+
+ ;; Set up the interrupt stack pointer.
+
+ BIC r0, r0, #MODE_MSK ; Clear the mode bits
+ ORR r0, r0, #IRQ_MODE ; Set IRQ mode bits
+ MSR cpsr_c, r0 ; Change the mode
+ LDR sp, =SFE(IRQ_STACK) ; End of IRQ_STACK
+ BIC sp,sp,#0x7 ; Make sure SP is 8 aligned
+
+ ;; Set up the fast interrupt stack pointer.
+
+ BIC r0, r0, #MODE_MSK ; Clear the mode bits
+ ORR r0, r0, #FIQ_MODE ; Set FIR mode bits
+ MSR cpsr_c, r0 ; Change the mode
+ LDR sp, =SFE(FIQ_STACK) ; End of FIQ_STACK
+ BIC sp,sp,#0x7 ; Make sure SP is 8 aligned
+
+
+ ;; Set up the ABT stack pointer.
+
+ BIC r0 ,r0, #MODE_MSK ; Clear the mode bits
+ ORR r0 ,r0, #ABT_MODE ; Set System mode bits
+ MSR cpsr_c, r0 ; Change the mode
+ LDR sp, =SFE(ABT_STACK) ; End of CSTACK
+ BIC sp,sp,#0x7 ; Make sure SP is 8 aligned
+
+
+ ;; Set up the UDF stack pointer.
+
+ BIC r0 ,r0, #MODE_MSK ; Clear the mode bits
+ ORR r0 ,r0, #UND_MODE ; Set System mode bits
+ MSR cpsr_c, r0 ; Change the mode
+ LDR sp, =SFE(UND_STACK) ; End of CSTACK
+ BIC sp,sp,#0x7 ; Make sure SP is 8 aligned
+
+ ;; Set up the normal stack pointer.
+
+ BIC r0 ,r0, #MODE_MSK ; Clear the mode bits
+ ORR r0 ,r0, #SYS_MODE ; Set System mode bits
+ MSR cpsr_c, r0 ; Change the mode
+ LDR sp, =SFE(CSTACK) ; End of CSTACK
+ BIC sp,sp,#0x7 ; Make sure SP is 8 aligned
+
+;;;
+
+ isb
+ ldr r0, =RZ_A1_SetSramWriteEnable
+ blx r0
+
+ bl create_translation_table
+
+; USR/SYS stack pointer will be set during kernel init
+ ldr r0, =SystemInit
+ blx r0
+ ldr r0, =InitMemorySubsystem
+ blx r0
+
+; fp_init
+ mov r0, #0x3000000
+ vmsr fpscr, r0
+
+
+
+;;; Continue to __cmain for C-level initialization.
+
+ FUNCALL __iar_program_start, __cmain
+ B __cmain
+
+
+ ldr r0, sf_boot ;@ dummy to keep boot loader area
+loop_here:
+ b loop_here
+
+sf_boot:
+ DC32 0x00000001
+
+Undefined_Handler:
+ EXTERN CUndefHandler
+ SRSDB SP!, #Mode_UND
+ PUSH {R0-R4, R12} /* Save APCS corruptible registers to UND mode stack */
+
+ MRS R0, SPSR
+ TST R0, #T_Bit /* Check mode */
+ MOVEQ R1, #4 /* R1 = 4 ARM mode */
+ MOVNE R1, #2 /* R1 = 2 Thumb mode */
+ SUB R0, LR, R1
+ LDREQ R0, [R0] /* ARM mode - R0 points to offending instruction */
+ BEQ undef_cont
+
+ /* Thumb instruction */
+ /* Determine if it is a 32-bit Thumb instruction */
+ LDRH R0, [R0]
+ MOV R2, #0x1c
+ CMP R2, R0, LSR #11
+ BHS undef_cont /* 16-bit Thumb instruction */
+
+ /* 32-bit Thumb instruction. Unaligned - we need to reconstruct the offending instruction. */
+ LDRH R2, [LR]
+ ORR R0, R2, R0, LSL #16
+undef_cont:
+ MOV R2, LR /* Set LR to third argument */
+
+/* AND R12, SP, #4 */ /* Ensure stack is 8-byte aligned */
+ MOV R3, SP /* Ensure stack is 8-byte aligned */
+ AND R12, R3, #4
+ SUB SP, SP, R12 /* Adjust stack */
+ PUSH {R12, LR} /* Store stack adjustment and dummy LR */
+
+ /* R0 Offending instruction */
+ /* R1 =2 (Thumb) or =4 (ARM) */
+ BL CUndefHandler
+
+ POP {R12, LR} /* Get stack adjustment & discard dummy LR */
+ ADD SP, SP, R12 /* Unadjust stack */
+
+ LDR LR, [SP, #24] /* Restore stacked LR and possibly adjust for retry */
+ SUB LR, LR, R0
+ LDR R0, [SP, #28] /* Restore stacked SPSR */
+ MSR SPSR_cxsf, R0
+ POP {R0-R4, R12} /* Restore stacked APCS registers */
+ ADD SP, SP, #8 /* Adjust SP for already-restored banked registers */
+ MOVS PC, LR
+
+Prefetch_Handler:
+ EXTERN CPAbtHandler
+ SUB LR, LR, #4 /* Pre-adjust LR */
+ SRSDB SP!, #Mode_ABT /* Save LR and SPRS to ABT mode stack */
+ PUSH {R0-R4, R12} /* Save APCS corruptible registers to ABT mode stack */
+ MRC p15, 0, R0, c5, c0, 1 /* IFSR */
+ MRC p15, 0, R1, c6, c0, 2 /* IFAR */
+
+ MOV R2, LR /* Set LR to third argument */
+
+/* AND R12, SP, #4 */ /* Ensure stack is 8-byte aligned */
+ MOV R3, SP /* Ensure stack is 8-byte aligned */
+ AND R12, R3, #4
+ SUB SP, SP, R12 /* Adjust stack */
+ PUSH {R12, LR} /* Store stack adjustment and dummy LR */
+
+ BL CPAbtHandler
+
+ POP {R12, LR} /* Get stack adjustment & discard dummy LR */
+ ADD SP, SP, R12 /* Unadjust stack */
+
+ POP {R0-R4, R12} /* Restore stack APCS registers */
+ RFEFD SP! /* Return from exception */
+
+Abort_Handler:
+ EXTERN CDAbtHandler
+ SUB LR, LR, #8 /* Pre-adjust LR */
+ SRSDB SP!, #Mode_ABT /* Save LR and SPRS to ABT mode stack */
+ PUSH {R0-R4, R12} /* Save APCS corruptible registers to ABT mode stack */
+ CLREX /* State of exclusive monitors unknown after taken data abort */
+ MRC p15, 0, R0, c5, c0, 0 /* DFSR */
+ MRC p15, 0, R1, c6, c0, 0 /* DFAR */
+
+ MOV R2, LR /* Set LR to third argument */
+
+/* AND R12, SP, #4 */ /* Ensure stack is 8-byte aligned */
+ MOV R3, SP /* Ensure stack is 8-byte aligned */
+ AND R12, R3, #4
+ SUB SP, SP, R12 /* Adjust stack */
+ PUSH {R12, LR} /* Store stack adjustment and dummy LR */
+
+ BL CDAbtHandler
+
+ POP {R12, LR} /* Get stack adjustment & discard dummy LR */
+ ADD SP, SP, R12 /* Unadjust stack */
+
+ POP {R0-R4, R12} /* Restore stacked APCS registers */
+ RFEFD SP! /* Return from exception */
+
+FIQ_Handler:
+ /* An FIQ might occur between the dummy read and the real read of the GIC in IRQ_Handler,
+ * so if a real FIQ Handler is implemented, this will be needed before returning:
+ */
+ /* LDR R1, =GICI_BASE
+ LDR R0, [R1, #ICCHPIR_OFFSET] ; Dummy Read ICCHPIR (GIC CPU Interface register) to avoid GIC 390 errata 801120
+ */
+ B .
+
+ EXTERN SVC_Handler /* refer RTX function */
+
+IRQ_Handler:
+ EXTERN IRQCount
+ EXTERN IRQTable
+ EXTERN IRQNestLevel
+
+ /* prologue */
+ SUB LR, LR, #4 /* Pre-adjust LR */
+ SRSDB SP!, #Mode_SVC /* Save LR_IRQ and SPRS_IRQ to SVC mode stack */
+ CPS #Mode_SVC /* Switch to SVC mode, to avoid a nested interrupt corrupting LR on a BL */
+ PUSH {R0-R3, R12} /* Save remaining APCS corruptible registers to SVC stack */
+
+/* AND R1, SP, #4 */ /* Ensure stack is 8-byte aligned */
+ MOV R3, SP /* Ensure stack is 8-byte aligned */
+ AND R1, R3, #4
+ SUB SP, SP, R1 /* Adjust stack */
+ PUSH {R1, LR} /* Store stack adjustment and LR_SVC to SVC stack */
+
+ LDR R0, =IRQNestLevel /* Get address of nesting counter */
+ LDR R1, [R0]
+ ADD R1, R1, #1 /* Increment nesting counter */
+ STR R1, [R0]
+
+ /* identify and acknowledge interrupt */
+ LDR R1, =GICI_BASE
+ LDR R0, [R1, #ICCHPIR_OFFSET] /* Dummy Read ICCHPIR (GIC CPU Interface register) to avoid GIC 390 errata 801120 */
+ LDR R0, [R1, #ICCIAR_OFFSET] /* Read ICCIAR (GIC CPU Interface register) */
+ DSB /* Ensure that interrupt acknowledge completes before re-enabling interrupts */
+
+ /* Workaround GIC 390 errata 733075
+ * If the ID is not 0, then service the interrupt as normal.
+ * If the ID is 0 and active, then service interrupt ID 0 as normal.
+ * If the ID is 0 but not active, then the GIC CPU interface may be locked-up, so unlock it
+ * with a dummy write to ICDIPR0. This interrupt should be treated as spurious and not serviced.
+ */
+ LDR R2, =GICD_BASE
+ LDR R3, =GIC_ERRATA_CHECK_1
+ CMP R0, R3
+ BEQ unlock_cpu
+ LDR R3, =GIC_ERRATA_CHECK_2
+ CMP R0, R3
+ BEQ unlock_cpu
+ CMP R0, #0
+ BNE int_active /* If the ID is not 0, then service the interrupt */
+ LDR R3, [R2, #ICDABR0_OFFSET] /* Get the interrupt state */
+ TST R3, #1
+ BNE int_active /* If active, then service the interrupt */
+unlock_cpu:
+ LDR R3, [R2, #ICDIPR0_OFFSET] /* Not active, so unlock the CPU interface */
+ STR R3, [R2, #ICDIPR0_OFFSET] /* with a dummy write */
+ DSB /* Ensure the write completes before continuing */
+ B ret_irq /* Do not service the spurious interrupt */
+ /* End workaround */
+
+int_active:
+ LDR R2, =IRQCount /* Read number of IRQs */
+ LDR R2, [R2]
+ CMP R0, R2 /* Clean up and return if no handler */
+ BHS ret_irq /* In a single-processor system, spurious interrupt ID 1023 does not need any special handling */
+ LDR R2, =IRQTable /* Get address of handler */
+ LDR R2, [R2, R0, LSL #2]
+ CMP R2, #0 /* Clean up and return if handler address is 0 */
+ BEQ ret_irq
+ PUSH {R0,R1}
+
+ CPSIE i /* Now safe to re-enable interrupts */
+ BLX R2 /* Call handler. R0 will be IRQ number */
+ CPSID i /* Disable interrupts again */
+
+ /* write EOIR (GIC CPU Interface register) */
+ POP {R0,R1}
+ DSB /* Ensure that interrupt source is cleared before we write the EOIR */
+ret_irq:
+ /* epilogue */
+ STR R0, [R1, #ICCEOIR_OFFSET]
+
+ LDR R0, =IRQNestLevel /* Get address of nesting counter */
+ LDR R1, [R0]
+ SUB R1, R1, #1 /* Decrement nesting counter */
+ STR R1, [R0]
+
+ POP {R1, LR} /* Get stack adjustment and restore LR_SVC */
+ ADD SP, SP, R1 /* Unadjust stack */
+
+ POP {R0-R3,R12} /* Restore stacked APCS registers */
+ RFEFD SP! /* Return from exception */
+;;;
+;;; Add more initialization here
+;;;
+FPUEnable:
+ ARM
+
+ //Permit access to VFP registers by modifying CPACR
+ MRC p15,0,R1,c1,c0,2
+ ORR R1,R1,#0x00F00000
+ MCR p15,0,R1,c1,c0,2
+
+ //Enable VFP
+ VMRS R1,FPEXC
+ ORR R1,R1,#0x40000000
+ VMSR FPEXC,R1
+
+ //Initialise VFP registers to 0
+ MOV R2,#0
+ VMOV D0, R2,R2
+ VMOV D1, R2,R2
+ VMOV D2, R2,R2
+ VMOV D3, R2,R2
+ VMOV D4, R2,R2
+ VMOV D5, R2,R2
+ VMOV D6, R2,R2
+ VMOV D7, R2,R2
+ VMOV D8, R2,R2
+ VMOV D9, R2,R2
+ VMOV D10,R2,R2
+ VMOV D11,R2,R2
+ VMOV D12,R2,R2
+ VMOV D13,R2,R2
+ VMOV D14,R2,R2
+ VMOV D15,R2,R2
+
+ //Initialise FPSCR to a known state
+ VMRS R2,FPSCR
+ LDR R3,=0x00086060 //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero.
+ AND R2,R2,R3
+ VMSR FPSCR,R2
+
+ BX LR
+
+ END
--- a/targets/cmsis/TARGET_RENESAS/TARGET_RZ_A1H/mbed_sf_boot.c Tue Feb 16 16:00:10 2016 +0000
+++ b/targets/cmsis/TARGET_RENESAS/TARGET_RZ_A1H/mbed_sf_boot.c Thu Feb 18 09:45:10 2016 +0000
@@ -30,6 +30,8 @@
#pragma arm section rodata = "BOOT_LOADER"
const char boot_loader[] __attribute__((used)) =
+#elif defined (__ICCARM__)
+__root const char boot_loader[] @ 0x18000000 =
#else
const char boot_loader[] __attribute__ ((section(".boot_loader"), used)) =
--- a/targets/cmsis/TARGET_RENESAS/TARGET_RZ_A1H/mmu_Renesas_RZ_A1.c Tue Feb 16 16:00:10 2016 +0000
+++ b/targets/cmsis/TARGET_RENESAS/TARGET_RZ_A1H/mmu_Renesas_RZ_A1.c Thu Feb 18 09:45:10 2016 +0000
@@ -71,8 +71,12 @@
extern uint32_t Image$$RO_DATA$$Base;
extern uint32_t Image$$RW_DATA$$Base;
extern uint32_t Image$$ZI_DATA$$Base;
+#if !defined ( __ICCARM__ )
extern uint32_t Image$$TTB$$ZI$$Base;
+#endif
+
#if defined( __CC_ARM )
+#elif defined( __ICCARM__ )
#else
extern uint32_t Image$$RW_DATA_NC$$Base;
extern uint32_t Image$$ZI_DATA_NC$$Base;
@@ -88,10 +92,18 @@
extern uint32_t Image$$ZI_DATA_NC$$Limit;
#endif
+#if defined( __ICCARM__ )
+#define VECTORS_SIZE (((uint32_t)Image$$VECTORS$$Limit >> 20) - ((uint32_t)Image$$VECTORS$$Base >> 20) + 1)
+#define RO_DATA_SIZE (((uint32_t)Image$$RO_DATA$$Limit >> 20) - ((uint32_t)Image$$RO_DATA$$Base >> 20) + 1)
+#define RW_DATA_SIZE (((uint32_t)Image$$RW_DATA$$Limit >> 20) - ((uint32_t)Image$$RW_DATA$$Base >> 20) + 1)
+#define ZI_DATA_SIZE (((uint32_t)Image$$ZI_DATA$$Limit >> 20) - ((uint32_t)Image$$ZI_DATA$$Base >> 20) + 1)
+#else
#define VECTORS_SIZE (((uint32_t)&Image$$VECTORS$$Limit >> 20) - ((uint32_t)&Image$$VECTORS$$Base >> 20) + 1)
#define RO_DATA_SIZE (((uint32_t)&Image$$RO_DATA$$Limit >> 20) - ((uint32_t)&Image$$RO_DATA$$Base >> 20) + 1)
#define RW_DATA_SIZE (((uint32_t)&Image$$RW_DATA$$Limit >> 20) - ((uint32_t)&Image$$RW_DATA$$Base >> 20) + 1)
#define ZI_DATA_SIZE (((uint32_t)&Image$$ZI_DATA$$Limit >> 20) - ((uint32_t)&Image$$ZI_DATA$$Base >> 20) + 1)
+#endif
+
#if defined( __CC_ARM )
#else
#define RW_DATA_NC_SIZE (((uint32_t)&Image$$RW_DATA_NC$$Limit >> 20) - ((uint32_t)&Image$$RW_DATA_NC$$Base >> 20) + 1)
@@ -112,10 +124,37 @@
static uint32_t Page_4k_Device_RW; //Shared device, not executable, rw, domain 0
static uint32_t Page_64k_Device_RW; //Shared device, not executable, rw, domain 0
+#if defined ( __ICCARM__ )
+__no_init uint32_t Image$$TTB$$ZI$$Base @ ".retram";
+uint32_t Image$$VECTORS$$Base;
+uint32_t Image$$RO_DATA$$Base;
+uint32_t Image$$RW_DATA$$Base;
+uint32_t Image$$ZI_DATA$$Base;
+
+uint32_t Image$$VECTORS$$Limit;
+uint32_t Image$$RO_DATA$$Limit;
+uint32_t Image$$RW_DATA$$Limit;
+uint32_t Image$$ZI_DATA$$Limit;
+#endif
+
void create_translation_table(void)
{
mmu_region_attributes_Type region;
+#if defined ( __ICCARM__ )
+#pragma section=".intvec"
+#pragma section=".rodata"
+#pragma section=".rwdata"
+#pragma section=".bss"
+ Image$$VECTORS$$Base = (uint32_t) __section_begin(".intvec");
+ Image$$VECTORS$$Limit= ((uint32_t)__section_begin(".intvec")+(uint32_t)__section_size(".intvec"));
+ Image$$RO_DATA$$Base = (uint32_t) __section_begin(".rodata");
+ Image$$RO_DATA$$Limit= ((uint32_t)__section_begin(".rodata")+(uint32_t)__section_size(".rodata"));
+ Image$$RW_DATA$$Base = (uint32_t) __section_begin(".rwdata");
+ Image$$RW_DATA$$Limit= ((uint32_t)__section_begin(".rwdata")+(uint32_t)__section_size(".rwdata"));
+ Image$$ZI_DATA$$Base = (uint32_t) __section_begin(".bss");
+ Image$$ZI_DATA$$Limit= ((uint32_t)__section_begin(".bss")+(uint32_t)__section_size(".bss"));
+#endif
/*
* Generate descriptors. Refer to MBRZA1H.h to get information about attributes
*
@@ -157,13 +196,25 @@
__TTSection (&Image$$TTB$$ZI$$Base, Renesas_RZ_A1_PERIPH_BASE0 , 3, Sect_Device_RW);
__TTSection (&Image$$TTB$$ZI$$Base, Renesas_RZ_A1_PERIPH_BASE1 , 49, Sect_Device_RW);
+#if defined( __ICCARM__ )
+ //Define Image
+ __TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)Image$$RO_DATA$$Base, RO_DATA_SIZE, Sect_Normal_RO);
+ __TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)Image$$VECTORS$$Base, VECTORS_SIZE, Sect_Normal_Cod);
+ __TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)Image$$RW_DATA$$Base, RW_DATA_SIZE, Sect_Normal_RW);
+ __TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)Image$$ZI_DATA$$Base, ZI_DATA_SIZE, Sect_Normal_RW);
+#else
//Define Image
__TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$RO_DATA$$Base, RO_DATA_SIZE, Sect_Normal_RO);
__TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$VECTORS$$Base, VECTORS_SIZE, Sect_Normal_Cod);
__TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$RW_DATA$$Base, RW_DATA_SIZE, Sect_Normal_RW);
__TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$ZI_DATA$$Base, ZI_DATA_SIZE, Sect_Normal_RW);
+#endif
+
#if defined( __CC_ARM )
__TTSection (&Image$$TTB$$ZI$$Base, Renesas_RZ_A1_ONCHIP_SRAM_NC_BASE, 10, Sect_Normal_NC);
+#elif defined ( __ICCARM__ )
+ __TTSection (&Image$$TTB$$ZI$$Base, Renesas_RZ_A1_ONCHIP_SRAM_NC_BASE, 10, Sect_Normal_NC);
+
#else
__TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$RW_DATA_NC$$Base, RW_DATA_NC_SIZE, Sect_Normal_NC);
__TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$ZI_DATA_NC$$Base, ZI_DATA_NC_SIZE, Sect_Normal_NC);
--- a/targets/cmsis/TARGET_RENESAS/TARGET_RZ_A1H/system_MBRZA1H.c Tue Feb 16 16:00:10 2016 +0000
+++ b/targets/cmsis/TARGET_RENESAS/TARGET_RZ_A1H/system_MBRZA1H.c Thu Feb 18 09:45:10 2016 +0000
@@ -133,6 +133,40 @@
PL310_Enable();
}
}
+#elif defined ( __ICCARM__ )
+
+void InitMemorySubsystem(void) {
+
+ /* This SVC is specific for reset where data / tlb / btac may contain undefined data, therefore before
+ * enabling the cache you must invalidate the instruction cache, the data cache, TLB, and BTAC.
+ * You are not required to invalidate the main TLB, even though it is recommended for safety
+ * reasons. This ensures compatibility with future revisions of the processor. */
+
+ unsigned int l2_id;
+
+ /* Invalidate undefined data */
+ __ca9u_inv_tlb_all();
+ __v7_inv_icache_all();
+ __v7_inv_dcache_all();
+ __v7_inv_btac();
+
+ /* Don't use this function during runtime since caches may contain valid data. For a correct cache maintenance you may need to execute a clean and
+ * invalidate in order to flush the valid data to the next level cache.
+ */
+ __enable_mmu();
+
+ /* After MMU is enabled and data has been invalidated, enable caches and BTAC */
+ __enable_caches();
+ __enable_btac();
+
+ /* If present, you may also need to Invalidate and Enable L2 cache here */
+ l2_id = PL310_GetID();
+ if (l2_id)
+ {
+ PL310_InvAllByWay();
+ PL310_Enable();
+ }
+}
#else
#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/targets/cmsis/TOOLCHAIN_IAR/TARGET_CORTEX_A/cache.s Thu Feb 18 09:45:10 2016 +0000
@@ -0,0 +1,97 @@
+/* Copyright (c) 2009 - 2012 ARM LIMITED
+
+ All rights reserved.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+ - Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ - Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ - Neither the name of ARM nor the names of its contributors may be used
+ to endorse or promote products derived from this software without
+ specific prior written permission.
+ *
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+ ---------------------------------------------------------------------------*/
+
+/*----------------------------------------------------------------------------
+ * Functions
+ *---------------------------------------------------------------------------*/
+ SECTION `.text`:CODE:NOROOT(2)
+ arm
+ PUBLIC __v7_all_cache
+/*
+ * __STATIC_ASM void __v7_all_cache(uint32_t op) {
+ */
+
+__v7_all_cache:
+
+
+ PUSH {R4-R11}
+
+ MRC p15, 1, R6, c0, c0, 1 /* Read CLIDR */
+ ANDS R3, R6, #0x07000000 /* Extract coherency level */
+ MOV R3, R3, LSR #23 /* Total cache levels << 1 */
+ BEQ Finished /* If 0, no need to clean */
+
+ MOV R10, #0 /* R10 holds current cache level << 1 */
+Loop1: ADD R2, R10, R10, LSR #1 /* R2 holds cache "Set" position */
+ MOV R1, R6, LSR R2 /* Bottom 3 bits are the Cache-type for this level */
+ AND R1, R1, #7 /* Isolate those lower 3 bits */
+ CMP R1, #2
+ BLT Skip /* No cache or only instruction cache at this level */
+
+ MCR p15, 2, R10, c0, c0, 0 /* Write the Cache Size selection register */
+ ISB /* ISB to sync the change to the CacheSizeID reg */
+ MRC p15, 1, R1, c0, c0, 0 /* Reads current Cache Size ID register */
+ AND R2, R1, #7 /* Extract the line length field */
+ ADD R2, R2, #4 /* Add 4 for the line length offset (log2 16 bytes) */
+ LDR R4, =0x3FF
+ ANDS R4, R4, R1, LSR #3 /* R4 is the max number on the way size (right aligned) */
+ CLZ R5, R4 /* R5 is the bit position of the way size increment */
+ LDR R7, =0x7FFF
+ ANDS R7, R7, R1, LSR #13 /* R7 is the max number of the index size (right aligned) */
+
+Loop2: MOV R9, R4 /* R9 working copy of the max way size (right aligned) */
+
+Loop3: ORR R11, R10, R9, LSL R5 /* Factor in the Way number and cache number into R11 */
+ ORR R11, R11, R7, LSL R2 /* Factor in the Set number */
+ CMP R0, #0
+ BNE Dccsw
+ MCR p15, 0, R11, c7, c6, 2 /* DCISW. Invalidate by Set/Way */
+ B cont
+Dccsw: CMP R0, #1
+ BNE Dccisw
+ MCR p15, 0, R11, c7, c10, 2 /* DCCSW. Clean by Set/Way */
+ B cont
+Dccisw: MCR p15, 0, R11, c7, c14, 2 /* DCCISW, Clean and Invalidate by Set/Way */
+cont: SUBS R9, R9, #1 /* Decrement the Way number */
+ BGE Loop3
+ SUBS R7, R7, #1 /* Decrement the Set number */
+ BGE Loop2
+Skip: ADD R10, R10, #2 /* increment the cache number */
+ CMP R3, R10
+ BGT Loop1
+
+Finished:
+ DSB
+ POP {R4-R11}
+ BX lr
+
+
+ END
+/*----------------------------------------------------------------------------
+ * end of file
+ *---------------------------------------------------------------------------*/
+
--- a/targets/cmsis/core_ca9.h Tue Feb 16 16:00:10 2016 +0000
+++ b/targets/cmsis/core_ca9.h Thu Feb 18 09:45:10 2016 +0000
@@ -89,6 +89,11 @@
#define __STATIC_INLINE static inline
#define __STATIC_ASM static __asm
+#include <stdint.h>
+inline uint32_t __get_PSR(void) {
+ __ASM("mrs r0, cpsr");
+}
+
#elif defined ( __TMS470__ )
#define __ASM __asm /*!< asm keyword for TI CCS Compiler */
#define __STATIC_INLINE static inline
--- a/targets/cmsis/core_caFunc.h Tue Feb 16 16:00:10 2016 +0000
+++ b/targets/cmsis/core_caFunc.h Thu Feb 18 09:45:10 2016 +0000
@@ -570,7 +570,248 @@
#elif (defined (__ICCARM__)) /*---------------- ICC Compiler ---------------------*/
-#error IAR Compiler support not implemented for Cortex-A
+#define __inline inline
+
+inline static uint32_t __disable_irq_iar() {
+ int irq_dis = __get_CPSR() & 0x80; // 7bit CPSR.I
+ __disable_irq();
+ return irq_dis;
+}
+
+#define MODE_USR 0x10
+#define MODE_FIQ 0x11
+#define MODE_IRQ 0x12
+#define MODE_SVC 0x13
+#define MODE_MON 0x16
+#define MODE_ABT 0x17
+#define MODE_HYP 0x1A
+#define MODE_UND 0x1B
+#define MODE_SYS 0x1F
+
+/** \brief Set Process Stack Pointer
+
+ This function assigns the given value to the USR/SYS Stack Pointer (PSP).
+
+ \param [in] topOfProcStack USR/SYS Stack Pointer value to set
+ */
+// from rt_CMSIS.c
+__arm static inline void __set_PSP(uint32_t topOfProcStack) {
+__asm(
+ " ARM\n"
+// " PRESERVE8\n"
+
+ " BIC R0, R0, #7 ;ensure stack is 8-byte aligned \n"
+ " MRS R1, CPSR \n"
+ " CPS #0x1F ;no effect in USR mode \n" // MODE_SYS
+ " MOV SP, R0 \n"
+ " MSR CPSR_c, R1 ;no effect in USR mode \n"
+ " ISB \n"
+ " BX LR \n");
+}
+
+/** \brief Set User Mode
+
+ This function changes the processor state to User Mode
+ */
+// from rt_CMSIS.c
+__arm static inline void __set_CPS_USR(void) {
+__asm(
+ " ARM \n"
+
+ " CPS #0x10 \n" // MODE_USR
+ " BX LR\n");
+}
+
+/** \brief Set TTBR0
+
+ This function assigns the given value to the Translation Table Base Register 0.
+
+ \param [in] ttbr0 Translation Table Base Register 0 value to set
+ */
+// from mmu_Renesas_RZ_A1.c
+__STATIC_INLINE void __set_TTBR0(uint32_t ttbr0) {
+ __MCR(15, 0, ttbr0, 2, 0, 0); // reg to cp15
+ __ISB();
+}
+
+/** \brief Set DACR
+
+ This function assigns the given value to the Domain Access Control Register.
+
+ \param [in] dacr Domain Access Control Register value to set
+ */
+// from mmu_Renesas_RZ_A1.c
+__STATIC_INLINE void __set_DACR(uint32_t dacr) {
+ __MCR(15, 0, dacr, 3, 0, 0); // reg to cp15
+ __ISB();
+}
+
+
+/******************************** Cache and BTAC enable ****************************************************/
+/** \brief Set SCTLR
+
+ This function assigns the given value to the System Control Register.
+
+ \param [in] sctlr System Control Register value to set
+ */
+// from __enable_mmu()
+__STATIC_INLINE void __set_SCTLR(uint32_t sctlr) {
+ __MCR(15, 0, sctlr, 1, 0, 0); // reg to cp15
+}
+
+/** \brief Get SCTLR
+
+ This function returns the value of the System Control Register.
+
+ \return System Control Register value
+ */
+// from __enable_mmu()
+__STATIC_INLINE uint32_t __get_SCTLR() {
+ uint32_t __regSCTLR = __MRC(15, 0, 1, 0, 0);
+ return __regSCTLR;
+}
+
+/** \brief Enable Caches
+
+ Enable Caches
+ */
+// from system_Renesas_RZ_A1.c
+__STATIC_INLINE void __enable_caches(void) {
+ __set_SCTLR( __get_SCTLR() | (1 << 12) | (1 << 2));
+}
+
+/** \brief Enable BTAC
+
+ Enable BTAC
+ */
+// from system_Renesas_RZ_A1.c
+__STATIC_INLINE void __enable_btac(void) {
+ __set_SCTLR( __get_SCTLR() | (1 << 11));
+ __ISB();
+}
+
+/** \brief Enable MMU
+
+ Enable MMU
+ */
+// from system_Renesas_RZ_A1.c
+__STATIC_INLINE void __enable_mmu(void) {
+ // Set M bit 0 to enable the MMU
+ // Set AFE bit to enable simplified access permissions model
+ // Clear TRE bit to disable TEX remap and A bit to disable strict alignment fault checking
+ __set_SCTLR( (__get_SCTLR() & ~(1 << 28) & ~(1 << 1)) | 1 | (1 << 29));
+ __ISB();
+}
+
+/******************************** TLB maintenance operations ************************************************/
+/** \brief Invalidate the whole tlb
+
+ TLBIALL. Invalidate the whole tlb
+ */
+// from system_Renesas_RZ_A1.c
+__STATIC_INLINE void __ca9u_inv_tlb_all(void) {
+ uint32_t val = 0;
+ __MCR(15, 0, val, 8, 7, 0); // reg to cp15
+ __MCR(15, 0, val, 8, 6, 0); // reg to cp15
+ __MCR(15, 0, val, 8, 5, 0); // reg to cp15
+ __DSB();
+ __ISB();
+}
+
+/******************************** BTB maintenance operations ************************************************/
+/** \brief Invalidate entire branch predictor array
+
+ BPIALL. Branch Predictor Invalidate All.
+ */
+// from system_Renesas_RZ_A1.c
+__STATIC_INLINE void __v7_inv_btac(void) {
+ uint32_t val = 0;
+ __MCR(15, 0, val, 7, 5, 6); // reg to cp15
+ __DSB(); //ensure completion of the invalidation
+ __ISB(); //ensure instruction fetch path sees new state
+}
+
+
+/******************************** L1 cache operations ******************************************************/
+
+/** \brief Invalidate the whole I$
+
+ ICIALLU. Instruction Cache Invalidate All to PoU
+ */
+// from system_Renesas_RZ_A1.c
+__STATIC_INLINE void __v7_inv_icache_all(void) {
+ uint32_t val = 0;
+ __MCR(15, 0, val, 7, 5, 0); // reg to cp15
+ __DSB(); //ensure completion of the invalidation
+ __ISB(); //ensure instruction fetch path sees new I cache state
+}
+
+// from __v7_inv_dcache_all()
+__arm static inline void __v7_all_cache(uint32_t op) {
+__asm(
+ " ARM \n"
+
+ " PUSH {R4-R11} \n"
+
+ " MRC p15, 1, R6, c0, c0, 1\n" // Read CLIDR
+ " ANDS R3, R6, #0x07000000\n" // Extract coherency level
+ " MOV R3, R3, LSR #23\n" // Total cache levels << 1
+ " BEQ Finished\n" // If 0, no need to clean
+
+ " MOV R10, #0\n" // R10 holds current cache level << 1
+ "Loop1: ADD R2, R10, R10, LSR #1\n" // R2 holds cache "Set" position
+ " MOV R1, R6, LSR R2 \n" // Bottom 3 bits are the Cache-type for this level
+ " AND R1, R1, #7 \n" // Isolate those lower 3 bits
+ " CMP R1, #2 \n"
+ " BLT Skip \n" // No cache or only instruction cache at this level
+
+ " MCR p15, 2, R10, c0, c0, 0 \n" // Write the Cache Size selection register
+ " ISB \n" // ISB to sync the change to the CacheSizeID reg
+ " MRC p15, 1, R1, c0, c0, 0 \n" // Reads current Cache Size ID register
+ " AND R2, R1, #7 \n" // Extract the line length field
+ " ADD R2, R2, #4 \n" // Add 4 for the line length offset (log2 16 bytes)
+ " movw R4, #0x3FF \n"
+ " ANDS R4, R4, R1, LSR #3 \n" // R4 is the max number on the way size (right aligned)
+ " CLZ R5, R4 \n" // R5 is the bit position of the way size increment
+ " movw R7, #0x7FFF \n"
+ " ANDS R7, R7, R1, LSR #13 \n" // R7 is the max number of the index size (right aligned)
+
+ "Loop2: MOV R9, R4 \n" // R9 working copy of the max way size (right aligned)
+
+ "Loop3: ORR R11, R10, R9, LSL R5 \n" // Factor in the Way number and cache number into R11
+ " ORR R11, R11, R7, LSL R2 \n" // Factor in the Set number
+ " CMP R0, #0 \n"
+ " BNE Dccsw \n"
+ " MCR p15, 0, R11, c7, c6, 2 \n" // DCISW. Invalidate by Set/Way
+ " B cont \n"
+ "Dccsw: CMP R0, #1 \n"
+ " BNE Dccisw \n"
+ " MCR p15, 0, R11, c7, c10, 2 \n" // DCCSW. Clean by Set/Way
+ " B cont \n"
+ "Dccisw: MCR p15, 0, R11, c7, c14, 2 \n" // DCCISW, Clean and Invalidate by Set/Way
+ "cont: SUBS R9, R9, #1 \n" // Decrement the Way number
+ " BGE Loop3 \n"
+ " SUBS R7, R7, #1 \n" // Decrement the Set number
+ " BGE Loop2 \n"
+ "Skip: ADD R10, R10, #2 \n" // increment the cache number
+ " CMP R3, R10 \n"
+ " BGT Loop1 \n"
+
+ "Finished: \n"
+ " DSB \n"
+ " POP {R4-R11} \n"
+ " BX lr \n" );
+}
+
+/** \brief Invalidate the whole D$
+
+ DCISW. Invalidate by Set/Way
+ */
+// from system_Renesas_RZ_A1.c
+__STATIC_INLINE void __v7_inv_dcache_all(void) {
+ __v7_all_cache(0);
+}
+#include "core_ca_mmu.h"
#elif (defined (__GNUC__)) /*------------------ GNU Compiler ---------------------*/
/* GNU gcc specific functions */
--- a/targets/hal/TARGET_RENESAS/TARGET_RZ_A1H/ethernet_api.c Tue Feb 16 16:00:10 2016 +0000
+++ b/targets/hal/TARGET_RENESAS/TARGET_RZ_A1H/ethernet_api.c Thu Feb 18 09:45:10 2016 +0000
@@ -60,7 +60,7 @@
#define PHY_READ (2)
#define MDC_WAIT (6) /* 400ns/4 */
#define BASIC_STS_MSK_LINK (0x0004) /* Link Status */
-#define BASIC_STS_MSK_AUTO_CMP (0x0010) /* Auto-Negotiate Complete */
+#define BASIC_STS_MSK_AUTO_CMP (0x0020) /* Auto-Negotiate Complete */
#define M_PHY_ID (0xFFFFFFF0)
#define PHY_ID_LAN8710A (0x0007C0F0)
/* ETHERPIR0 */
@@ -106,11 +106,20 @@
/* memory */
/* The whole transmit/receive descriptors (must be allocated in 16-byte boundaries) */
/* Transmit/receive buffers (must be allocated in 16-byte boundaries) */
-static uint8_t ehernet_nc_memory[(sizeof(edmac_send_desc_t) * NUM_OF_TX_DESCRIPTOR) +
+#if defined(__ICCARM__)
+#pragma data_alignment=16
+static uint8_t ethernet_nc_memory[(sizeof(edmac_send_desc_t) * NUM_OF_TX_DESCRIPTOR) +
+ (sizeof(edmac_recv_desc_t) * NUM_OF_RX_DESCRIPTOR) +
+ (NUM_OF_TX_DESCRIPTOR * SIZE_OF_BUFFER) +
+ (NUM_OF_RX_DESCRIPTOR * SIZE_OF_BUFFER)] //16 bytes aligned!
+ @ ".mirrorram";
+#else
+static uint8_t ethernet_nc_memory[(sizeof(edmac_send_desc_t) * NUM_OF_TX_DESCRIPTOR) +
(sizeof(edmac_recv_desc_t) * NUM_OF_RX_DESCRIPTOR) +
(NUM_OF_TX_DESCRIPTOR * SIZE_OF_BUFFER) +
(NUM_OF_RX_DESCRIPTOR * SIZE_OF_BUFFER)]
__attribute((section("NC_BSS"),aligned(16))); //16 bytes aligned!
+#endif
static int32_t rx_read_offset; /* read offset */
static int32_t tx_wite_offset; /* write offset */
static uint32_t send_top_index;
@@ -208,7 +217,7 @@
if (p_ethcfg->ether_mac != NULL) {
(void)memcpy(mac_addr, p_ethcfg->ether_mac, sizeof(mac_addr));
} else {
- ethernet_address(mac_addr); /* Get MAC Address */
+ ethernet_address(mac_addr); /* Get MAC Address */
}
return 0;
@@ -401,7 +410,7 @@
void ethernet_address(char *mac) {
if (mac != NULL) {
- mbed_mac_address(mac); /* Get MAC Address */
+ mbed_mac_address(mac); /* Get MAC Address */
}
}
@@ -427,8 +436,8 @@
if ((speed < 0) || (speed > 1)) {
data = 0x1000; /* Auto-Negotiation Enable */
phy_reg_write(BASIC_MODE_CONTROL_REG, data);
- data = phy_reg_read(BASIC_MODE_STATUS_REG);
for (i = 0; i < 1000; i++) {
+ data = phy_reg_read(BASIC_MODE_STATUS_REG);
if (((uint32_t)data & BASIC_STS_MSK_AUTO_CMP) != 0) {
break;
}
@@ -486,8 +495,8 @@
int32_t i;
uint8_t *p_memory_top;
- (void)memset((void *)ehernet_nc_memory, 0, sizeof(ehernet_nc_memory));
- p_memory_top = ehernet_nc_memory;
+ (void)memset((void *)ethernet_nc_memory, 0, sizeof(ethernet_nc_memory));
+ p_memory_top = ethernet_nc_memory;
/* Descriptor area configuration */
p_eth_desc_dsend = (edmac_send_desc_t *)p_memory_top;
--- a/targets/hal/TARGET_RENESAS/TARGET_RZ_A1H/serial_api.c Tue Feb 16 16:00:10 2016 +0000
+++ b/targets/hal/TARGET_RENESAS/TARGET_RZ_A1H/serial_api.c Thu Feb 18 09:45:10 2016 +0000
@@ -512,7 +512,11 @@
int data;
int was_masked;
+#if defined ( __ICCARM__ )
+ was_masked = __disable_irq_iar();
+#else
was_masked = __disable_irq();
+#endif /* __ICCARM__ */
if (obj->uart->SCFSR & 0x93) {
err_read = obj->uart->SCFSR;
obj->uart->SCFSR = (err_read & ~0x93);
@@ -529,7 +533,11 @@
while (!serial_readable(obj));
data = obj->uart->SCFRDR & 0xff;
+#if defined ( __ICCARM__ )
+ was_masked = __disable_irq_iar();
+#else
was_masked = __disable_irq();
+#endif /* __ICCARM__ */
err_read = obj->uart->SCFSR;
obj->uart->SCFSR = (err_read & 0xfffD); // Clear RDF
if (!was_masked) {
@@ -546,14 +554,22 @@
uint16_t dummy_read;
int was_masked;
+#if defined ( __ICCARM__ )
+ was_masked = __disable_irq_iar();
+#else
was_masked = __disable_irq();
+#endif /* __ICCARM__ */
obj->uart->SCSCR |= 0x0080; // Set TIE
if (!was_masked) {
__enable_irq();
}
while (!serial_writable(obj));
obj->uart->SCFTDR = c;
+#if defined ( __ICCARM__ )
+ was_masked = __disable_irq_iar();
+#else
was_masked = __disable_irq();
+#endif /* __ICCARM__ */
dummy_read = obj->uart->SCFSR;
obj->uart->SCFSR = (dummy_read & 0xff9f); // Clear TEND/TDFE
if (!was_masked) {
@@ -572,7 +588,11 @@
void serial_clear(serial_t *obj) {
int was_masked;
+#if defined ( __ICCARM__ )
+ was_masked = __disable_irq_iar();
+#else
was_masked = __disable_irq();
+#endif /* __ICCARM__ */
obj->uart->SCFCR |= 0x06; // TFRST = 1, RFRST = 1
obj->uart->SCFCR &= ~0x06; // TFRST = 0, RFRST = 0
@@ -589,7 +609,11 @@
void serial_break_set(serial_t *obj) {
int was_masked;
+#if defined ( __ICCARM__ )
+ was_masked = __disable_irq_iar();
+#else
was_masked = __disable_irq();
+#endif /* __ICCARM__ */
// TxD Output(L)
obj->uart->SCSPTR &= ~0x0001u; // SPB2DT = 0
obj->uart->SCSCR &= ~0x0020u; // TE = 0 (Output disable)
@@ -600,7 +624,11 @@
void serial_break_clear(serial_t *obj) {
int was_masked;
+#if defined ( __ICCARM__ )
+ was_masked = __disable_irq_iar();
+#else
was_masked = __disable_irq();
+#endif /* __ICCARM__ */
obj->uart->SCSCR |= 0x0020u; // TE = 1 (Output enable)
obj->uart->SCSPTR |= 0x0001u; // SPB2DT = 1
if (!was_masked) {
@@ -615,7 +643,11 @@
serial_flow_irq_set(obj, 0);
if (type == FlowControlRTSCTS) {
+#if defined ( __ICCARM__ )
+ was_masked = __disable_irq_iar();
+#else
was_masked = __disable_irq();
+#endif /* __ICCARM__ */
obj->uart->SCFCR = 0x0008u; // CTS/RTS enable
if (!was_masked) {
__enable_irq();
@@ -623,7 +655,11 @@
pinmap_pinout(rxflow, PinMap_UART_RTS);
pinmap_pinout(txflow, PinMap_UART_CTS);
} else {
+#if defined ( __ICCARM__ )
+ was_masked = __disable_irq_iar();
+#else
was_masked = __disable_irq();
+#endif /* __ICCARM__ */
obj->uart->SCFCR = 0x0000u; // CTS/RTS diable
if (!was_masked) {
__enable_irq();
--- a/targets/hal/TARGET_RENESAS/TARGET_RZ_A1H/us_ticker.c Tue Feb 16 16:00:10 2016 +0000
+++ b/targets/hal/TARGET_RENESAS/TARGET_RZ_A1H/us_ticker.c Thu Feb 18 09:45:10 2016 +0000
@@ -85,7 +85,11 @@
uint64_t us_val64;
int check_irq_masked;
+#if defined ( __ICCARM__)
+ check_irq_masked = __disable_irq_iar();
+#else
check_irq_masked = __disable_irq();
+#endif /* __ICCARM__ */
cnt_val64 = ticker_read_counter64();
us_val64 = (cnt_val64 / count_clock);
