summaryrefslogtreecommitdiff
path: root/firmware/asm/arm/corelock.c
diff options
context:
space:
mode:
authorSolomon Peachy <pizza@shaftnet.org>2020-06-29 19:23:03 -0400
committerSolomon Peachy <pizza@shaftnet.org>2020-07-03 21:36:41 +0000
commit905e19905b8601b672ed9a9b7121dfdb2444602c (patch)
tree851998a51631946a860ed678c518f32ec92affb4 /firmware/asm/arm/corelock.c
parent9cf2492407af6fae5928685205e7fc17032ac253 (diff)
downloadrockbox-905e19905b8601b672ed9a9b7121dfdb2444602c.tar.gz
rockbox-905e19905b8601b672ed9a9b7121dfdb2444602c.zip
ARM: Rejigger the asm corelock functions
This appears to solve _some_ of the crashes experienced when using gcc494 on the multicore PP targets (eg most older ipods). (With this change, the asm vs plain-C versions behave identically) corelock_lock(), corelock_unlock(), and corelock_trylock() were declared with the 'naked' attribute. However, naked functions are only allowed to have 'Basic Asm' components, and we used some extended asm, but without declaring clobbered registers, making assumptions about register arguments, and also directly returned to the caller via asm code. This is what the GCC docs have to say about this stuff: "While using extended asm or a mixture of basic asm and C code may appear to work, they cannot be depended upon to work reliably and are not supported." Change-Id: I79a9c4895584f9af365e6c2387595e9c45d89c7d
Diffstat (limited to 'firmware/asm/arm/corelock.c')
-rw-r--r--firmware/asm/arm/corelock.c71
1 files changed, 37 insertions, 34 deletions
diff --git a/firmware/asm/arm/corelock.c b/firmware/asm/arm/corelock.c
index 713164e49b..b36a40b45b 100644
--- a/firmware/asm/arm/corelock.c
+++ b/firmware/asm/arm/corelock.c
@@ -28,69 +28,72 @@
28 * Wait for the corelock to become free and acquire it when it does. 28 * Wait for the corelock to become free and acquire it when it does.
29 *--------------------------------------------------------------------------- 29 *---------------------------------------------------------------------------
30 */ 30 */
31void __attribute__((naked)) corelock_lock(struct corelock *cl) 31void corelock_lock(struct corelock *cl)
32{ 32{
33 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */ 33 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
34 asm volatile ( 34 asm volatile (
35 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */ 35 "mov r1, %[id] \n" /* r1 = PROCESSOR_ID */
36 "ldrb r1, [r1] \n" 36 "ldrb r1, [r1] \n"
37 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */ 37 "strb r1, [%[cl], r1, lsr #7] \n" /* cl->myl[core] = core */
38 "eor r2, r1, #0xff \n" /* r2 = othercore */ 38 "eor r2, r1, #0xff \n" /* r2 = othercore */
39 "strb r2, [r0, #2] \n" /* cl->turn = othercore */ 39 "strb r2, [%[cl], #2] \n" /* cl->turn = othercore */
40 "1: \n" 40 "1: \n"
41 "ldrb r3, [r0, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */ 41 "ldrb r2, [%[cl], r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
42 "cmp r3, #0 \n" /* yes? lock acquired */ 42 "cmp r2, #0 \n" /* yes? lock acquired */
43 "bxeq lr \n" 43 "beq 2f \n"
44 "ldrb r3, [r0, #2] \n" /* || cl->turn == core ? */ 44 "ldrb r2, [%[cl], #2] \n" /* || cl->turn == core ? */
45 "cmp r3, r1 \n" 45 "cmp r2, r1 \n"
46 "bxeq lr \n" /* yes? lock acquired */ 46 "bne 1b \n" /* no? try again */
47 "b 1b \n" /* keep trying */ 47 "2: \n" /* Done */
48 : : "i"(&PROCESSOR_ID) 48 :
49 : [id] "i"(&PROCESSOR_ID), [cl] "r" (cl)
50 : "r1","r2","cc"
49 ); 51 );
50 (void)cl;
51} 52}
52 53
53/*--------------------------------------------------------------------------- 54/*---------------------------------------------------------------------------
54 * Try to aquire the corelock. If free, caller gets it, otherwise return 0. 55 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
55 *--------------------------------------------------------------------------- 56 *---------------------------------------------------------------------------
56 */ 57 */
57int __attribute__((naked)) corelock_try_lock(struct corelock *cl) 58int corelock_try_lock(struct corelock *cl)
58{ 59{
60 int rval = 0;
61
59 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */ 62 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
60 asm volatile ( 63 asm volatile (
61 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */ 64 "mov r1, %[id] \n" /* r1 = PROCESSOR_ID */
62 "ldrb r1, [r1] \n" 65 "ldrb r1, [r1] \n"
63 "mov r3, r0 \n" 66 "strb r1, [%[cl], r1, lsr #7] \n" /* cl->myl[core] = core */
64 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
65 "eor r2, r1, #0xff \n" /* r2 = othercore */ 67 "eor r2, r1, #0xff \n" /* r2 = othercore */
66 "strb r2, [r0, #2] \n" /* cl->turn = othercore */ 68 "strb r2, [%[cl], #2] \n" /* cl->turn = othercore */
67 "ldrb r0, [r3, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */ 69 "ldrb %[rv], [%[cl], r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
68 "eors r0, r0, r2 \n" /* yes? lock acquired */ 70 "eors %[rv], %[rv], r2 \n"
69 "bxne lr \n" 71 "bne 1f \n" /* yes? lock acquired */
70 "ldrb r0, [r3, #2] \n" /* || cl->turn == core? */ 72 "ldrb %[rv], [%[cl], #2] \n" /* || cl->turn == core? */
71 "ands r0, r0, r1 \n" 73 "ands %[rv], %[rv], r1 \n"
72 "streqb r0, [r3, r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */ 74 "streqb %[rv], [%[cl], r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */
73 "bx lr \n" /* return result */ 75 "1: \n" /* Done */
74 : : "i"(&PROCESSOR_ID) 76 : [rv] "=r"(rval)
77 : [id] "i" (&PROCESSOR_ID), [cl] "r" (cl)
78 : "r1","r2","cc"
75 ); 79 );
76 80
77 return 0; 81 return rval;
78 (void)cl;
79} 82}
80 83
81/*--------------------------------------------------------------------------- 84/*---------------------------------------------------------------------------
82 * Release ownership of the corelock 85 * Release ownership of the corelock
83 *--------------------------------------------------------------------------- 86 *---------------------------------------------------------------------------
84 */ 87 */
85void __attribute__((naked)) corelock_unlock(struct corelock *cl) 88void corelock_unlock(struct corelock *cl)
86{ 89{
87 asm volatile ( 90 asm volatile (
88 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */ 91 "mov r1, %[id] \n" /* r1 = PROCESSOR_ID */
89 "ldrb r1, [r1] \n" 92 "ldrb r1, [r1] \n"
90 "mov r2, #0 \n" /* cl->myl[core] = 0 */ 93 "mov r2, #0 \n" /* cl->myl[core] = 0 */
91 "strb r2, [r0, r1, lsr #7] \n" 94 "strb r2, [%[cl], r1, lsr #7] \n"
92 "bx lr \n" 95 :
93 : : "i"(&PROCESSOR_ID) 96 : [id] "i" (&PROCESSOR_ID), [cl] "r" (cl)
97 : "r1","r2"
94 ); 98 );
95 (void)cl;
96} 99}