From 86429dbf1eca8ee0e08176997f508647c3abf6bd Mon Sep 17 00:00:00 2001 From: Chris Chua Date: Sun, 19 Mar 2023 06:22:08 +1100 Subject: Using ARM Unified Assembler Language Change-Id: Iae32a8ba8eff6087330e458fafc912a12fee4509 --- firmware/asm/arm/corelock.c | 3 ++- firmware/asm/arm/lcd-as-memframe.S | 4 ++-- firmware/asm/arm/memcpy.S | 20 ++++++++++---------- firmware/asm/arm/memmove.S | 20 ++++++++++---------- firmware/asm/arm/memset.S | 26 +++++++++++++------------- firmware/asm/arm/memset16.S | 20 ++++++++++---------- firmware/asm/arm/thread.c | 5 +++-- 7 files changed, 50 insertions(+), 48 deletions(-) (limited to 'firmware/asm') diff --git a/firmware/asm/arm/corelock.c b/firmware/asm/arm/corelock.c index b36a40b45b..07ec77a60e 100644 --- a/firmware/asm/arm/corelock.c +++ b/firmware/asm/arm/corelock.c @@ -61,6 +61,7 @@ int corelock_try_lock(struct corelock *cl) /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */ asm volatile ( + ".syntax unified \n" "mov r1, %[id] \n" /* r1 = PROCESSOR_ID */ "ldrb r1, [r1] \n" "strb r1, [%[cl], r1, lsr #7] \n" /* cl->myl[core] = core */ @@ -71,7 +72,7 @@ int corelock_try_lock(struct corelock *cl) "bne 1f \n" /* yes? lock acquired */ "ldrb %[rv], [%[cl], #2] \n" /* || cl->turn == core? */ "ands %[rv], %[rv], r1 \n" - "streqb %[rv], [%[cl], r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */ + "strbeq %[rv], [%[cl], r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */ "1: \n" /* Done */ : [rv] "=r"(rval) : [id] "i" (&PROCESSOR_ID), [cl] "r" (cl) diff --git a/firmware/asm/arm/lcd-as-memframe.S b/firmware/asm/arm/lcd-as-memframe.S index 52ab0447c2..d42b2a920d 100644 --- a/firmware/asm/arm/lcd-as-memframe.S +++ b/firmware/asm/arm/lcd-as-memframe.S @@ -91,9 +91,9 @@ lcd_copy_buffer_rect: @ stmia r0!, { r6-r12, r14 } @ bgt 30b @ octword loop @ 40: @ finish line @ - ldreqh r6, [r1], #2 @ finish last halfword if eq ... + ldrheq r6, [r1], #2 @ finish last halfword if eq ... add r1, r1, r4, lsl #1 @ - streqh r6, [r0], #2 @ ... + strheq r6, [r0], #2 @ ... add r0, r0, r4, lsl #1 @ subs r3, r3, #1 @ next line bgt 10b @ copy line @ diff --git a/firmware/asm/arm/memcpy.S b/firmware/asm/arm/memcpy.S index 83d43293e6..86fc6b7930 100644 --- a/firmware/asm/arm/memcpy.S +++ b/firmware/asm/arm/memcpy.S @@ -99,22 +99,22 @@ memcpy: 7: ldmfd sp!, {r5 - r8} 8: movs r2, r2, lsl #31 - ldrneb r3, [r1], #1 - ldrcsb r4, [r1], #1 - ldrcsb ip, [r1] - strneb r3, [r0], #1 - strcsb r4, [r0], #1 - strcsb ip, [r0] + ldrbne r3, [r1], #1 + ldrbcs r4, [r1], #1 + ldrbcs ip, [r1] + strbne r3, [r0], #1 + strbcs r4, [r0], #1 + strbcs ip, [r0] ldmpc regs="r0, r4" 9: rsb ip, ip, #4 cmp ip, #2 - ldrgtb r3, [r1], #1 - ldrgeb r4, [r1], #1 + ldrbgt r3, [r1], #1 + ldrbge r4, [r1], #1 ldrb lr, [r1], #1 - strgtb r3, [r0], #1 - strgeb r4, [r0], #1 + strbgt r3, [r0], #1 + strbge r4, [r0], #1 subs r2, r2, ip strb lr, [r0], #1 blt 8b diff --git a/firmware/asm/arm/memmove.S b/firmware/asm/arm/memmove.S index d8cab048be..e5c9b42928 100644 --- a/firmware/asm/arm/memmove.S +++ b/firmware/asm/arm/memmove.S @@ -106,20 +106,20 @@ memmove: 7: ldmfd sp!, {r5 - r8} 8: movs r2, r2, lsl #31 - ldrneb r3, [r1, #-1]! - ldrcsb r4, [r1, #-1]! - ldrcsb ip, [r1, #-1] - strneb r3, [r0, #-1]! - strcsb r4, [r0, #-1]! - strcsb ip, [r0, #-1] + ldrbne r3, [r1, #-1]! + ldrbcs r4, [r1, #-1]! + ldrbcs ip, [r1, #-1] + strbne r3, [r0, #-1]! + strbcs r4, [r0, #-1]! + strbcs ip, [r0, #-1] ldmpc regs="r0, r4" 9: cmp ip, #2 - ldrgtb r3, [r1, #-1]! - ldrgeb r4, [r1, #-1]! + ldrbgt r3, [r1, #-1]! + ldrbge r4, [r1, #-1]! ldrb lr, [r1, #-1]! - strgtb r3, [r0, #-1]! - strgeb r4, [r0, #-1]! + strbgt r3, [r0, #-1]! + strbge r4, [r0, #-1]! subs r2, r2, ip strb lr, [r0, #-1]! blt 8b diff --git a/firmware/asm/arm/memset.S b/firmware/asm/arm/memset.S index 64cd95cc9e..d727f2a5ec 100644 --- a/firmware/asm/arm/memset.S +++ b/firmware/asm/arm/memset.S @@ -34,8 +34,8 @@ 1: cmp r2, #4 @ 1 do we have enough blt 5f @ 1 bytes to align with? cmp r3, #2 @ 1 - strgtb r1, [r0, #-1]! @ 1 - strgeb r1, [r0, #-1]! @ 1 + strbgt r1, [r0, #-1]! @ 1 + strbge r1, [r0, #-1]! @ 1 strb r1, [r0, #-1]! @ 1 sub r2, r2, r3 @ 1 r2 = r2 - r3 b 2f @@ -65,24 +65,24 @@ memset: mov lr, r1 3: subs r2, r2, #64 - stmgedb r0!, {r1, r3, ip, lr} @ 64 bytes at a time. - stmgedb r0!, {r1, r3, ip, lr} - stmgedb r0!, {r1, r3, ip, lr} - stmgedb r0!, {r1, r3, ip, lr} + stmdbge r0!, {r1, r3, ip, lr} @ 64 bytes at a time. + stmdbge r0!, {r1, r3, ip, lr} + stmdbge r0!, {r1, r3, ip, lr} + stmdbge r0!, {r1, r3, ip, lr} bgt 3b ldrpc cond=eq @ Now <64 bytes to go. /* * No need to correct the count; we're only testing bits from now on */ tst r2, #32 - stmnedb r0!, {r1, r3, ip, lr} - stmnedb r0!, {r1, r3, ip, lr} + stmdbne r0!, {r1, r3, ip, lr} + stmdbne r0!, {r1, r3, ip, lr} tst r2, #16 - stmnedb r0!, {r1, r3, ip, lr} + stmdbne r0!, {r1, r3, ip, lr} ldr lr, [sp], #4 5: tst r2, #8 - stmnedb r0!, {r1, r3} + stmdbne r0!, {r1, r3} tst r2, #4 strne r1, [r0, #-4]! /* @@ -90,10 +90,10 @@ memset: * may have an unaligned pointer as well. */ 6: tst r2, #2 - strneb r1, [r0, #-1]! - strneb r1, [r0, #-1]! + strbne r1, [r0, #-1]! + strbne r1, [r0, #-1]! tst r2, #1 - strneb r1, [r0, #-1]! + strbne r1, [r0, #-1]! bx lr .end: .size memset,.end-memset diff --git a/firmware/asm/arm/memset16.S b/firmware/asm/arm/memset16.S index 5c787b1bed..226eac39e1 100644 --- a/firmware/asm/arm/memset16.S +++ b/firmware/asm/arm/memset16.S @@ -35,7 +35,7 @@ memset16: tst r0, #2 @ unaligned? cmpne r2, #0 - strneh r1, [r0], #2 @ store one halfword to align + strhne r1, [r0], #2 @ store one halfword to align subne r2, r2, #1 /* @@ -54,29 +54,29 @@ memset16: mov lr, r1 2: subs r2, r2, #32 - stmgeia r0!, {r1, r3, ip, lr} @ 64 bytes at a time. - stmgeia r0!, {r1, r3, ip, lr} - stmgeia r0!, {r1, r3, ip, lr} - stmgeia r0!, {r1, r3, ip, lr} + stmiage r0!, {r1, r3, ip, lr} @ 64 bytes at a time. + stmiage r0!, {r1, r3, ip, lr} + stmiage r0!, {r1, r3, ip, lr} + stmiage r0!, {r1, r3, ip, lr} bgt 2b ldrpc cond=eq @ Now <64 bytes to go. /* * No need to correct the count; we're only testing bits from now on */ tst r2, #16 - stmneia r0!, {r1, r3, ip, lr} - stmneia r0!, {r1, r3, ip, lr} + stmiane r0!, {r1, r3, ip, lr} + stmiane r0!, {r1, r3, ip, lr} tst r2, #8 - stmneia r0!, {r1, r3, ip, lr} + stmiane r0!, {r1, r3, ip, lr} ldr lr, [sp], #4 4: tst r2, #4 - stmneia r0!, {r1, r3} + stmiane r0!, {r1, r3} tst r2, #2 strne r1, [r0], #4 tst r2, #1 - strneh r1, [r0], #2 + strhne r1, [r0], #2 bx lr .end: .size memset16,.end-memset16 diff --git a/firmware/asm/arm/thread.c b/firmware/asm/arm/thread.c index cf685526e3..bd9f950616 100644 --- a/firmware/asm/arm/thread.c +++ b/firmware/asm/arm/thread.c @@ -73,15 +73,16 @@ static inline void store_context(void* addr) static inline void load_context(const void* addr) { asm volatile( + ".syntax unified \n" "ldr r0, [%0, #40] \n" /* Load start pointer */ "cmp r0, #0 \n" /* Check for NULL */ /* If not already running, jump to start */ #if ARM_ARCH == 4 && defined(USE_THUMB) - "ldmneia %0, { r0, r12 } \n" + "ldmiane %0, { r0, r12 } \n" "bxne r12 \n" #else - "ldmneia %0, { r0, pc } \n" + "ldmiane %0, { r0, pc } \n" #endif "ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */ -- cgit v1.2.3