summaryrefslogtreecommitdiff
path: root/firmware/asm/arm
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/asm/arm')
-rw-r--r--firmware/asm/arm/corelock.c96
1 files changed, 96 insertions, 0 deletions
diff --git a/firmware/asm/arm/corelock.c b/firmware/asm/arm/corelock.c
new file mode 100644
index 0000000000..713164e49b
--- /dev/null
+++ b/firmware/asm/arm/corelock.c
@@ -0,0 +1,96 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2007 by Daniel Ankers
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21
22/* Core locks using Peterson's mutual exclusion algorithm.
23 * ASM optimized version of C code, see firmware/asm/corelock.c */
24
25#include "cpu.h"
26
27/*---------------------------------------------------------------------------
28 * Wait for the corelock to become free and acquire it when it does.
29 *---------------------------------------------------------------------------
30 */
31void __attribute__((naked)) corelock_lock(struct corelock *cl)
32{
33 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
34 asm volatile (
35 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
36 "ldrb r1, [r1] \n"
37 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
38 "eor r2, r1, #0xff \n" /* r2 = othercore */
39 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
40 "1: \n"
41 "ldrb r3, [r0, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
42 "cmp r3, #0 \n" /* yes? lock acquired */
43 "bxeq lr \n"
44 "ldrb r3, [r0, #2] \n" /* || cl->turn == core ? */
45 "cmp r3, r1 \n"
46 "bxeq lr \n" /* yes? lock acquired */
47 "b 1b \n" /* keep trying */
48 : : "i"(&PROCESSOR_ID)
49 );
50 (void)cl;
51}
52
53/*---------------------------------------------------------------------------
54 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
55 *---------------------------------------------------------------------------
56 */
57int __attribute__((naked)) corelock_try_lock(struct corelock *cl)
58{
59 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
60 asm volatile (
61 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
62 "ldrb r1, [r1] \n"
63 "mov r3, r0 \n"
64 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
65 "eor r2, r1, #0xff \n" /* r2 = othercore */
66 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
67 "ldrb r0, [r3, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
68 "eors r0, r0, r2 \n" /* yes? lock acquired */
69 "bxne lr \n"
70 "ldrb r0, [r3, #2] \n" /* || cl->turn == core? */
71 "ands r0, r0, r1 \n"
72 "streqb r0, [r3, r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */
73 "bx lr \n" /* return result */
74 : : "i"(&PROCESSOR_ID)
75 );
76
77 return 0;
78 (void)cl;
79}
80
81/*---------------------------------------------------------------------------
82 * Release ownership of the corelock
83 *---------------------------------------------------------------------------
84 */
85void __attribute__((naked)) corelock_unlock(struct corelock *cl)
86{
87 asm volatile (
88 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
89 "ldrb r1, [r1] \n"
90 "mov r2, #0 \n" /* cl->myl[core] = 0 */
91 "strb r2, [r0, r1, lsr #7] \n"
92 "bx lr \n"
93 : : "i"(&PROCESSOR_ID)
94 );
95 (void)cl;
96}