summaryrefslogtreecommitdiff
path: root/firmware/target/arm
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2009-02-11 23:56:00 +0000
committerMichael Sevakis <jethead71@rockbox.org>2009-02-11 23:56:00 +0000
commit63e709c7c83a3c0a462275d6b7c053804127e295 (patch)
treeba6b42e381886d8803b607e34567e14ef77436fe /firmware/target/arm
parent00a9685985881866dd08d9dc38ef58e93a27917f (diff)
downloadrockbox-63e709c7c83a3c0a462275d6b7c053804127e295.tar.gz
rockbox-63e709c7c83a3c0a462275d6b7c053804127e295.zip
Refine the routines in mmu-arm.c and move them to mmu-arm.S since the code is now 100% assembly.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@19980 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'firmware/target/arm')
-rw-r--r--firmware/target/arm/imx31/gigabeat-s/system-target.h7
-rw-r--r--firmware/target/arm/mmu-arm.S487
-rw-r--r--firmware/target/arm/mmu-arm.c322
-rw-r--r--firmware/target/arm/mmu-arm.h20
-rw-r--r--firmware/target/arm/s3c2440/gigabeat-fx/system-target.h5
5 files changed, 504 insertions, 337 deletions
diff --git a/firmware/target/arm/imx31/gigabeat-s/system-target.h b/firmware/target/arm/imx31/gigabeat-s/system-target.h
index 921af0ec8b..26fb5172b2 100644
--- a/firmware/target/arm/imx31/gigabeat-s/system-target.h
+++ b/firmware/target/arm/imx31/gigabeat-s/system-target.h
@@ -57,13 +57,6 @@ void imx31_regclr32(volatile uint32_t *reg_p, uint32_t mask);
57 57
58#define KDEV_INIT 58#define KDEV_INIT
59 59
60#define HAVE_CPUCACHE_INVALIDATE
61#define HAVE_CPUCACHE_FLUSH
62
63/* Different internal names */
64#define cpucache_flush clean_dcache
65#define cpucache_invalidate invalidate_idcache
66
67struct ARM_REGS { 60struct ARM_REGS {
68 int r0; 61 int r0;
69 int r1; 62 int r1;
diff --git a/firmware/target/arm/mmu-arm.S b/firmware/target/arm/mmu-arm.S
new file mode 100644
index 0000000000..2223be2210
--- /dev/null
+++ b/firmware/target/arm/mmu-arm.S
@@ -0,0 +1,487 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2006,2007 by Greg White
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21#include "config.h"
22#include "cpu.h"
23
24#if CONFIG_CPU == IMX31L
25/* TTB routines not used */
26
27/** Cache coherency **/
28
29/*
30 * Invalidate DCache for this range
31 * will do write back
32 * void invalidate_dcache_range(const void *base, unsigned int size)
33 */
34 .section .text, "ax", %progbits
35 .align 2
36 .global invalidate_dcache_range
37 .type invalidate_dcache_range, %function
38 @ MVA format: 31:5 = Modified virtual address, 4:0 = Ignored
39invalidate_dcache_range:
40 add r1, r0, r1 @ size -> end
41 cmp r1, r0 @ end <= start?
42 subhi r1, r1, #1 @ round it down
43 movhi r2, #0 @
44 mcrrhi p15, 0, r1, r0, c14 @ Clean and invalidate DCache range
45 mcrhi p15, 0, r2, c7, c10, 4 @ Data synchronization barrier
46 bx lr @
47 .size invalidate_dcache_range, .-invalidate_dcache_range
48
49/*
50 * clean DCache for this range
51 * forces DCache writeback for the specified range
52 * void clean_dcache_range(const void *base, unsigned int size);
53 */
54 .section .text, "ax", %progbits
55 .align 2
56 .global clean_dcache_range
57 .type clean_dcache_range, %function
58 @ MVA format: 31:5 = Modified virtual address, 4:0 = Ignored
59clean_dcache_range:
60 add r1, r0, r1 @ size -> end
61 cmp r1, r0 @ end <= start?
62 subhi r1, r1, #1 @ round it down
63 movhi r2, #0 @
64 mcrrhi p15, 0, r1, r0, c12 @ Clean DCache range
65 mcrhi p15, 0, r2, c7, c10, 4 @ Data synchronization barrier
66 bx lr @
67 .size clean_dcache_range, .-clean_dcache_range
68
69/*
70 * Dump DCache for this range
71 * will *NOT* do write back except for buffer edges not on a line boundary
72 * void dump_dcache_range(const void *base, unsigned int size);
73 */
74 .section .text, "ax", %progbits
75 .align 2
76 .global dump_dcache_range
77 .type dump_dcache_range, %function
78 @ MVA format (mcr): 31:5 = Modified virtual address, 4:0 = SBZ
79 @ MVA format (mcrr): 31:5 = Modified virtual address, 4:0 = Ignored
80 dump_dcache_range:
81 add r1, r0, r1 @ size -> end
82 cmp r1, r0 @ end <= start?
83 bxls lr @
84 tst r0, #31 @ Check first line for bits set
85 bicne r0, r0, #31 @ Clear low five bits (down)
86 mcrne p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
87 @ if not cache aligned
88 addne r0, r0, #32 @ Move to the next cache line
89 @
90 tst r1, #31 @ Check last line for bits set
91 bicne r1, r1, #31 @ Clear low five bits (down)
92 mcrne p15, 0, r1, c7, c14, 1 @ Clean and invalidate line by MVA
93 @ if not cache aligned
94 sub r1, r1, #32 @ Move to the previous cache line
95 cmp r1, r0 @ end < start now?
96 mcrrhs p15, 0, r1, r0, c6 @ Invalidate DCache range
97 mov r0, #0 @
98 mcr p15, 0, r0, c7, c10, 4 @ Data synchronization barrier
99 bx lr @
100 .size dump_dcache_range, .-dump_dcache_range
101
102
103/*
104 * Cleans entire DCache
105 * void clean_dcache(void);
106 */
107 .section .text, "ax", %progbits
108 .align 2
109 .global clean_dcache
110 .type clean_dcache, %function
111 .global cpucache_flush @ Alias
112clean_dcache:
113cpucache_flush:
114 mov r0, #0 @
115 mcr p15, 0, r0, c7, c10, 0 @ Clean entire DCache
116 mcr p15, 0, r0, c7, c10, 4 @ Data synchronization barrier
117 bx lr @
118 .size clean_dcache, .-clean_dcache
119
120/*
121 * Invalidate entire DCache
122 * will do writeback
123 * void invalidate_dcache(void);
124 */
125 .section .text, "ax", %progbits
126 .align 2
127 .global invalidate_dcache
128 .type invalidate_dcache, %function
129invalidate_dcache:
130 mov r0, #0 @
131 mcr p15, 0, r0, c7, c14, 0 @ Clean and invalidate entire DCache
132 mcr p15, 0, r0, c7, c10, 4 @ Data synchronization barrier
133 bx lr @
134 .size invalidate_dcache, .-invalidate_dcache
135
136/*
137 * Invalidate entire ICache and DCache
138 * will do writeback
139 * void invalidate_idcache(void);
140 */
141 .section .text, "ax", %progbits
142 .align 2
143 .global invalidate_idcache
144 .type invalidate_idcache, %function
145 .global cpucache_invalidate @ Alias
146invalidate_idcache:
147cpucache_invalidate:
148 mov r0, #0 @
149 mcr p15, 0, r0, c7, c14, 0 @ Clean and invalidate entire DCache
150 mcr p15, 0, r0, c7, c5, 0 @ Invalidate entire ICache
151 @ Also flushes the branch target cache
152 mcr p15, 0, r0, c7, c10, 4 @ Data synchronization barrier
153 mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer (IMB)
154 bx lr @
155 .size invalidate_idcache, .-invalidate_idcache
156
157#else /* !IMX31L */
158/** MMU setup **/
159
160/*
161 * void ttb_init(void);
162 */
163 .section .text, "ax", %progbits
164 .align 2
165 .global ttb_init
166 .type ttb_init, %function
167ttb_init:
168 ldr r0, =TTB_BASE_ADDR @
169 mvn r1, #0 @
170 mcr p15, 0, r0, c2, c0, 0 @ Set the TTB base address
171 mcr p15, 0, r1, c3, c0, 0 @ Set all domains to manager status
172 bx lr @
173 .size ttb_init, .-ttb_init
174
175/*
176 * void map_section(unsigned int pa, unsigned int va, int mb, int flags);
177 */
178 .section .text, "ax", %progbits
179 .align 2
180 .global map_section
181 .type map_section, %function
182map_section:
183 @ align to 1MB
184 @ pa &= (-1 << 20);
185 mov r0, r0, lsr #20
186 mov r0, r0, lsl #20
187
188 @ pa |= (flags | 0x412);
189 @ bit breakdown:
190 @ 10: superuser - r/w, user - no access
191 @ 4: should be "1"
192 @ 3,2: Cache flags (flags (r3))
193 @ 1: Section signature
194 orr r0, r0, r3
195 orr r0, r0, #0x410
196 orr r0, r0, #0x2
197
198 @ unsigned int* ttbPtr = TTB_BASE + (va >> 20);
199 @ sections are 1MB size
200 mov r1, r1, lsr #20
201 ldr r3, =TTB_BASE_ADDR
202 add r1, r3, r1, lsl #0x2
203
204 @ Add MB to pa, flags are already present in pa, but addition
205 @ should not effect them
206 @
207 @ for( ; mb>0; mb--, pa += (1 << 20))
208 @ {
209 @ *(ttbPtr++) = pa;
210 @ }
211 cmp r2, #0
212 bxle lr
213 mov r3, #0x0
2141: @ loop
215 str r0, [r1], #4
216 add r0, r0, #0x100000
217 add r3, r3, #0x1
218 cmp r2, r3
219 bne 1b @ loop
220 bx lr
221 .size map_section, .-map_section
222
223/*
224 * void enable_mmu(void);
225 */
226 .section .text, "ax", %progbits
227 .align 2
228 .global enable_mmu
229 .type enable_mmu, %function
230enable_mmu:
231 mov r0, #0 @
232 mcr p15, 0, r0, c8, c7, 0 @ invalidate TLB
233 mcr p15, 0, r0, c7, c7,0 @ invalidate both i and dcache
234 mrc p15, 0, r0, c1, c0, 0 @
235 orr r0, r0, #1 @ enable mmu bit, i and dcache
236 orr r0, r0, #1<<2 @ enable dcache
237 orr r0, r0, #1<<12 @ enable icache
238 mcr p15, 0, r0, c1, c0, 0 @
239 nop @
240 nop @
241 nop @
242 nop @
243 bx lr @
244 .size enable_mmu, .-enable_mmu
245 .ltorg
246
247/** Cache coherency **/
248
249/*
250 * Invalidate DCache for this range
251 * will do write back
252 * void invalidate_dcache_range(const void *base, unsigned int size);
253 */
254 .section .text, "ax", %progbits
255 .align 2
256 .global invalidate_dcache_range
257 .type invalidate_dcache_range, %function
258 @ MVA format: 31:5 = Modified virtual address, 4:0 = SBZ
259invalidate_dcache_range:
260 add r1, r0, r1 @ size -> end
261 cmp r1, r0 @ end <= start?
262 bxls lr @
263 bic r0, r0, #31 @ Align start to cache line (down)
2641: @ inv_start @
265 mcr p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
266 add r0, r0, #32 @
267 cmp r1, r0 @
268 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
269 addhi r0, r0, #32 @
270 cmphi r1, r0 @
271 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
272 addhi r0, r0, #32 @
273 cmphi r1, r0 @
274 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
275 addhi r0, r0, #32 @
276 cmphi r1, r0 @
277 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
278 addhi r0, r0, #32 @
279 cmphi r1, r0 @
280 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
281 addhi r0, r0, #32 @
282 cmphi r1, r0 @
283 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
284 addhi r0, r0, #32 @
285 cmphi r1, r0 @
286 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
287 addhi r0, r0, #32 @
288 cmphi r1, r0 @
289 bhi 1b @ inv_start @
290 mov r0, #0 @
291 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
292 bx lr @
293 .size invalidate_dcache_range, .-invalidate_dcache_range
294
295/*
296 * clean DCache for this range
297 * forces DCache writeback for the specified range
298 * void clean_dcache_range(const void *base, unsigned int size);
299 */
300 .section .text, "ax", %progbits
301 .align 2
302 .global clean_dcache_range
303 .type clean_dcache_range, %function
304 @ MVA format: 31:5 = Modified virtual address, 4:0 = SBZ
305clean_dcache_range:
306 add r1, r0, r1 @ size -> end
307 cmp r1, r0 @ end <= start?
308 bxls lr @
309 bic r0, r0, #31 @ Align start to cache line (down)
3101: @ clean_start @
311 mcr p15, 0, r0, c7, c10, 1 @ Clean line by MVA
312 add r0, r0, #32 @
313 cmp r1, r0 @
314 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
315 addhi r0, r0, #32 @
316 cmphi r1, r0 @
317 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
318 addhi r0, r0, #32 @
319 cmphi r1, r0 @
320 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
321 addhi r0, r0, #32 @
322 cmphi r1, r0 @
323 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
324 addhi r0, r0, #32 @
325 cmphi r1, r0 @
326 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
327 addhi r0, r0, #32 @
328 cmphi r1, r0 @
329 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
330 addhi r0, r0, #32 @
331 cmphi r1, r0 @
332 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
333 addhi r0, r0, #32 @
334 cmphi r1, r0 @
335 bhi 1b @clean_start @
336 mov r0, #0 @
337 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
338 bx lr @
339 .size clean_dcache_range, .-clean_dcache_range
340
341/*
342 * Dump DCache for this range
343 * will *NOT* do write back except for buffer edges not on a line boundary
344 * void dump_dcache_range(const void *base, unsigned int size);
345 */
346 .section .text, "ax", %progbits
347 .align 2
348 .global dump_dcache_range
349 .type dump_dcache_range, %function
350 @ MVA format: 31:5 = Modified virtual address, 4:0 = SBZ
351 dump_dcache_range:
352 add r1, r0, r1 @ size -> end
353 cmp r1, r0 @ end <= start?
354 bxls lr @
355 tst r0, #31 @ Check first line for bits set
356 bicne r0, r0, #31 @ Clear low five bits (down)
357 mcrne p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
358 @ if not cache aligned
359 addne r0, r0, #32 @ Move to the next cache line
360 @
361 tst r1, #31 @ Check last line for bits set
362 bicne r1, r1, #31 @ Clear low five bits (down)
363 mcrne p15, 0, r1, c7, c14, 1 @ Clean and invalidate line by MVA
364 @ if not cache aligned
365 cmp r1, r0 @ end <= start now?
3661: @ dump_start @
367 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
368 addhi r0, r0, #32 @
369 cmphi r1, r0 @
370 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
371 addhi r0, r0, #32 @
372 cmphi r1, r0 @
373 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
374 addhi r0, r0, #32 @
375 cmphi r1, r0 @
376 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
377 addhi r0, r0, #32 @
378 cmphi r1, r0 @
379 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
380 addhi r0, r0, #32 @
381 cmphi r1, r0 @
382 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
383 addhi r0, r0, #32 @
384 cmphi r1, r0 @
385 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
386 addhi r0, r0, #32 @
387 cmphi r1, r0 @
388 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
389 addhi r0, r0, #32 @
390 cmphi r1, r0 @
391 bhi 1b @ dump_start @
392 mov r0, #0 @
393 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
394 bx lr @
395 .size dump_dcache_range, .-dump_dcache_range
396
397/*
398 * Cleans entire DCache
399 * void clean_dcache(void);
400 */
401 .section .text, "ax", %progbits
402 .align 2
403 .global clean_dcache
404 .type clean_dcache, %function
405 .global cpucache_flush @ Alias
406clean_dcache:
407cpucache_flush:
408 @ Index format: 31:26 = index, 7:5 = segment, remainder = SBZ
409 mov r0, #0x00000000 @
4101: @ clean_start @
411 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
412 add r0, r0, #0x00000020 @
413 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
414 add r0, r0, #0x00000020 @
415 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
416 add r0, r0, #0x00000020 @
417 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
418 add r0, r0, #0x00000020 @
419 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
420 add r0, r0, #0x00000020 @
421 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
422 add r0, r0, #0x00000020 @
423 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
424 add r0, r0, #0x00000020 @
425 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
426 sub r0, r0, #0x000000e0 @
427 adds r0, r0, #0x04000000 @ will wrap to zero at loop end
428 bne 1b @ clean_start @
429 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
430 bx lr @
431 .size clean_dcache, .-clean_dcache
432
433/*
434 * Invalidate entire DCache
435 * will do writeback
436 * void invalidate_dcache(void);
437 */
438 .section .text, "ax", %progbits
439 .align 2
440 .global invalidate_dcache
441 .type invalidate_dcache, %function
442invalidate_dcache:
443 @ Index format: 31:26 = index, 7:5 = segment, remainder = SBZ
444 mov r0, #0x00000000 @
4451: @ inv_start @
446 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
447 add r0, r0, #0x00000020 @
448 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
449 add r0, r0, #0x00000020 @
450 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
451 add r0, r0, #0x00000020 @
452 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
453 add r0, r0, #0x00000020 @
454 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
455 add r0, r0, #0x00000020 @
456 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
457 add r0, r0, #0x00000020 @
458 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
459 add r0, r0, #0x00000020 @
460 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
461 sub r0, r0, #0x000000e0 @
462 adds r0, r0, #0x04000000 @ will wrap to zero at loop end
463 bne 1b @ inv_start @
464 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
465 bx lr @
466 .size invalidate_dcache, .-invalidate_dcache
467
468/*
469 * Invalidate entire ICache and DCache
470 * will do writeback
471 * void invalidate_idcache(void);
472 */
473 .section .text, "ax", %progbits
474 .align 2
475 .global invalidate_idcache
476 .type invalidate_idcache, %function
477 .global cpucache_invalidate @ Alias
478invalidate_idcache:
479cpucache_invalidate:
480 mov r1, lr @ save lr to r1, call uses r0 only
481 bl invalidate_dcache @ Clean and invalidate entire DCache
482 mcr p15, 0, r0, c7, c5, 0 @ Invalidate ICache (r0=0 from call)
483 mov pc, r1 @
484 .size invalidate_idcache, .-invalidate_idcache
485
486#endif /* !IMX31L */
487
diff --git a/firmware/target/arm/mmu-arm.c b/firmware/target/arm/mmu-arm.c
deleted file mode 100644
index fae7fd0b8f..0000000000
--- a/firmware/target/arm/mmu-arm.c
+++ /dev/null
@@ -1,322 +0,0 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2006,2007 by Greg White
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21#include "cpu.h"
22#include "mmu-arm.h"
23#include "panic.h"
24
25void __attribute__((naked)) ttb_init(void) {
26 asm volatile
27 (
28 "mcr p15, 0, %[ttbB], c2, c0, 0 \n" /* Set the TTB base address */
29 "mcr p15, 0, %[ffff], c3, c0, 0 \n" /* Set all domains to manager status */
30 "bx lr \n"
31 :
32 : [ttbB] "r" (TTB_BASE),
33 [ffff] "r" (0xFFFFFFFF)
34 );
35}
36
37void __attribute__((naked)) map_section(unsigned int pa, unsigned int va, int mb, int flags) {
38 asm volatile
39 (
40 /* pa &= (-1 << 20); // align to 1MB */
41 "mov r0, r0, lsr #20 \n"
42 "mov r0, r0, lsl #20 \n"
43
44 /* pa |= (flags | 0x412);
45 * bit breakdown:
46 * 10: superuser - r/w, user - no access
47 * 4: should be "1"
48 * 3,2: Cache flags (flags (r3))
49 * 1: Section signature
50 */
51
52 "orr r0, r0, r3 \n"
53 "orr r0, r0, #0x410 \n"
54 "orr r0, r0, #0x2 \n"
55 :
56 :
57 );
58
59 register unsigned long *ttb_base asm ("r3") = TTB_BASE; /* force in r3 */
60
61 asm volatile
62 (
63 /* unsigned int* ttbPtr = TTB_BASE + (va >> 20);
64 * sections are 1MB size
65 */
66
67 "mov r1, r1, lsr #20 \n"
68 "add r1, %[ttbB], r1, lsl #0x2 \n"
69
70 /* Add MB to pa, flags are already present in pa, but addition
71 * should not effect them
72 *
73 * #define MB (1 << 20)
74 * for( ; mb>0; mb--, pa += MB)
75 * {
76 * *(ttbPtr++) = pa;
77 * }
78 * #undef MB
79 */
80
81 "cmp r2, #0 \n"
82 "bxle lr \n"
83 "mov r3, #0x0 \n"
84 "loop: \n"
85 "str r0, [r1], #4 \n"
86 "add r0, r0, #0x100000 \n"
87 "add r3, r3, #0x1 \n"
88 "cmp r2, r3 \n"
89 "bne loop \n"
90 "bx lr \n"
91 :
92 : [ttbB] "r" (ttb_base) /* This /HAS/ to be in r3 */
93 );
94 (void) pa;
95 (void) va;
96 (void) mb;
97 (void) flags;
98}
99
100void __attribute__((naked)) enable_mmu(void) {
101 asm volatile(
102 "mov r0, #0 \n"
103 "mcr p15, 0, r0, c8, c7, 0 \n" /* invalidate TLB */
104 "mcr p15, 0, r0, c7, c7,0 \n" /* invalidate both icache and dcache */
105 "mrc p15, 0, r0, c1, c0, 0 \n"
106 "orr r0, r0, #1 \n" /* enable mmu bit, icache and dcache */
107 "orr r0, r0, #1<<2 \n" /* enable dcache */
108 "orr r0, r0, #1<<12 \n" /* enable icache */
109 "mcr p15, 0, r0, c1, c0, 0 \n"
110 "nop \n"
111 "nop \n"
112 "nop \n"
113 "nop \n"
114 "bx lr \n"
115 :
116 :
117 : "r0"
118 );
119}
120
121#if CONFIG_CPU == IMX31L
122void __attribute__((naked)) invalidate_dcache_range(const void *base, unsigned int size)
123{
124 asm volatile(
125 "add r1, r1, r0 \n"
126 "mov r2, #0 \n"
127 "mcrr p15, 0, r1, r0, c14 \n" /* Clean and invalidate dcache range */
128 "mcr p15, 0, r2, c7, c10, 4 \n" /* Data synchronization barrier */
129 "bx lr \n"
130 );
131 (void)base; (void)size;
132}
133#else
134/* Invalidate DCache for this range */
135/* Will do write back */
136void invalidate_dcache_range(const void *base, unsigned int size) {
137 unsigned int addr = (((int) base) & ~31); /* Align start to cache line*/
138 unsigned int end = ((addr+size) & ~31)+64; /* Align end to cache line, pad */
139 asm volatile(
140 "inv_start: \n"
141 "mcr p15, 0, %0, c7, c14, 1 \n" /* Clean and invalidate this line */
142 "add %0, %0, #32 \n"
143 "cmp %0, %1 \n"
144 "mcrne p15, 0, %0, c7, c14, 1 \n" /* Clean and invalidate this line */
145 "addne %0, %0, #32 \n"
146 "cmpne %0, %1 \n"
147 "mcrne p15, 0, %0, c7, c14, 1 \n" /* Clean and invalidate this line */
148 "addne %0, %0, #32 \n"
149 "cmpne %0, %1 \n"
150 "mcrne p15, 0, %0, c7, c14, 1 \n" /* Clean and invalidate this line */
151 "addne %0, %0, #32 \n"
152 "cmpne %0, %1 \n"
153 "mcrne p15, 0, %0, c7, c14, 1 \n" /* Clean and invalidate this line */
154 "addne %0, %0, #32 \n"
155 "cmpne %0, %1 \n"
156 "mcrne p15, 0, %0, c7, c14, 1 \n" /* Clean and invalidate this line */
157 "addne %0, %0, #32 \n"
158 "cmpne %0, %1 \n"
159 "mcrne p15, 0, %0, c7, c14, 1 \n" /* Clean and invalidate this line */
160 "addne %0, %0, #32 \n"
161 "cmpne %0, %1 \n"
162 "mcrne p15, 0, %0, c7, c14, 1 \n" /* Clean and invalidate this line */
163 "addne %0, %0, #32 \n"
164 "cmpne %0, %1 \n"
165 "bne inv_start \n"
166 "mov %0, #0\n"
167 "mcr p15,0,%0,c7,c10,4\n" /* Drain write buffer */
168 : : "r" (addr), "r" (end)
169 );
170}
171#endif
172
173
174#if CONFIG_CPU == IMX31L
175void __attribute__((naked)) clean_dcache_range(const void *base, unsigned int size)
176{
177 asm volatile(
178 "add r1, r1, r0 \n"
179 "mov r2, #0 \n"
180 "mcrr p15, 0, r1, r0, c12 \n" /* Clean dcache range */
181 "mcr p15, 0, r2, c7, c10, 4 \n" /* Data synchronization barrier */
182 "bx lr \n"
183 );
184 (void)base; (void)size;
185}
186#else
187/* clean DCache for this range */
188/* forces DCache writeback for the specified range */
189void clean_dcache_range(const void *base, unsigned int size) {
190 unsigned int addr = (int) base;
191 unsigned int end = addr+size+32;
192 asm volatile(
193 "bic %0, %0, #31 \n"
194"clean_start: \n"
195 "mcr p15, 0, %0, c7, c10, 1 \n" /* Clean this line */
196 "add %0, %0, #32 \n"
197 "cmp %0, %1 \n"
198 "mcrlo p15, 0, %0, c7, c10, 1 \n" /* Clean this line */
199 "addlo %0, %0, #32 \n"
200 "cmplo %0, %1 \n"
201 "mcrlo p15, 0, %0, c7, c10, 1 \n" /* Clean this line */
202 "addlo %0, %0, #32 \n"
203 "cmplo %0, %1 \n"
204 "mcrlo p15, 0, %0, c7, c10, 1 \n" /* Clean this line */
205 "addlo %0, %0, #32 \n"
206 "cmplo %0, %1 \n"
207 "mcrlo p15, 0, %0, c7, c10, 1 \n" /* Clean this line */
208 "addlo %0, %0, #32 \n"
209 "cmplo %0, %1 \n"
210 "mcrlo p15, 0, %0, c7, c10, 1 \n" /* Clean this line */
211 "addlo %0, %0, #32 \n"
212 "cmplo %0, %1 \n"
213 "mcrlo p15, 0, %0, c7, c10, 1 \n" /* Clean this line */
214 "addlo %0, %0, #32 \n"
215 "cmplo %0, %1 \n"
216 "mcrlo p15, 0, %0, c7, c10, 1 \n" /* Clean this line */
217 "addlo %0, %0, #32 \n"
218 "cmplo %0, %1 \n"
219 "blo clean_start \n"
220 "mov %0, #0\n"
221 "mcr p15,0,%0,c7,c10,4 \n" /* Drain write buffer */
222 : : "r" (addr), "r" (end));
223}
224#endif
225
226#if CONFIG_CPU == IMX31L
227void __attribute__((naked)) dump_dcache_range(const void *base, unsigned int size)
228{
229 asm volatile(
230 "add r1, r1, r0 \n"
231 "mcrr p15, 0, r1, r0, c6 \n"
232 "bx lr \n"
233 );
234 (void)base; (void)size;
235}
236#else
237/* Dump DCache for this range */
238/* Will *NOT* do write back */
239void dump_dcache_range(const void *base, unsigned int size) {
240 unsigned int addr = (int) base;
241 unsigned int end = addr+size;
242 asm volatile(
243 "tst %0, #31 \n" /* Check to see if low five bits are set */
244 "bic %0, %0, #31 \n" /* Clear them */
245 "mcrne p15, 0, %0, c7, c14, 1 \n" /* Clean and invalidate this line, if those bits were set */
246 "add %0, %0, #32 \n" /* Move to the next cache line */
247 "tst %1, #31 \n" /* Check last line for bits set */
248 "bic %1, %1, #31 \n" /* Clear those bits */
249 "mcrne p15, 0, %1, c7, c14, 1 \n" /* Clean and invalidate this line, if not cache aligned */
250"dump_start: \n"
251 "mcr p15, 0, %0, c7, c6, 1 \n" /* Invalidate this line */
252 "add %0, %0, #32 \n" /* Next cache line */
253 "cmp %0, %1 \n"
254 "bne dump_start \n"
255"dump_end: \n"
256 "mcr p15,0,%0,c7,c10,4 \n" /* Drain write buffer */
257 : : "r" (addr), "r" (end));
258}
259#endif
260
261#if CONFIG_CPU == IMX31L
262void __attribute__((naked)) clean_dcache(void)
263{
264 asm volatile (
265 /* Clean entire data cache */
266 "mov r0, #0 \n"
267 "mcr p15, 0, r0, c7, c10, 0 \n"
268 /* Data synchronization barrier */
269 "mcr p15, 0, r0, c7, c10, 4 \n"
270 "bx lr \n"
271 );
272}
273#else
274/* Cleans entire DCache */
275void clean_dcache(void)
276{
277 unsigned int index, addr, low;
278
279 for(index = 0; index <= 63; index++)
280 {
281 for(low = 0;low <= 7; low++)
282 {
283 addr = (index << 26) | (low << 5);
284 asm volatile
285 (
286 "mcr p15, 0, %[addr], c7, c10, 2 \n" /* Clean this entry by index */
287 :
288 : [addr] "r" (addr)
289 );
290 }
291 }
292}
293#endif
294
295#if CONFIG_CPU == IMX31L
296void invalidate_idcache(void)
297{
298 asm volatile(
299 /* Clean and invalidate entire data cache */
300 "mcr p15, 0, %0, c7, c14, 0 \n"
301 /* Invalidate entire instruction cache
302 * Also flushes the branch target cache */
303 "mcr p15, 0, %0, c7, c5, 0 \n"
304 /* Data synchronization barrier */
305 "mcr p15, 0, %0, c7, c10, 4 \n"
306 /* Flush prefetch buffer */
307 "mcr p15, 0, %0, c7, c5, 4 \n"
308 : : "r"(0)
309 );
310}
311#else
312void invalidate_idcache(void)
313{
314 clean_dcache();
315 asm volatile(
316 "mov r0, #0 \n"
317 "mcr p15, 0, r0, c7, c5, 0 \n"
318 : : : "r0"
319 );
320}
321#endif
322
diff --git a/firmware/target/arm/mmu-arm.h b/firmware/target/arm/mmu-arm.h
index b6b23e6185..7058fffddc 100644
--- a/firmware/target/arm/mmu-arm.h
+++ b/firmware/target/arm/mmu-arm.h
@@ -18,11 +18,14 @@
18 * KIND, either express or implied. 18 * KIND, either express or implied.
19 * 19 *
20 ****************************************************************************/ 20 ****************************************************************************/
21#ifndef MMU_ARM_H
22#define MMY_ARM_H
21 23
22#define CACHE_ALL 0x0C 24#define CACHE_ALL 0x0C
23#define CACHE_NONE 0 25#define CACHE_NONE 0
24#define BUFFERED 0x04 26#define BUFFERED 0x04
25 27
28void memory_init(void);
26void ttb_init(void); 29void ttb_init(void);
27void enable_mmu(void); 30void enable_mmu(void);
28void map_section(unsigned int pa, unsigned int va, int mb, int flags); 31void map_section(unsigned int pa, unsigned int va, int mb, int flags);
@@ -30,8 +33,12 @@ void map_section(unsigned int pa, unsigned int va, int mb, int flags);
30/* Cleans entire DCache */ 33/* Cleans entire DCache */
31void clean_dcache(void); 34void clean_dcache(void);
32 35
36/* Invalidate entire DCache */
37/* will do writeback */
38void invalidate_dcache(void);
39
33/* Invalidate DCache for this range */ 40/* Invalidate DCache for this range */
34/* Will do write back */ 41/* will do writeback */
35void invalidate_dcache_range(const void *base, unsigned int size); 42void invalidate_dcache_range(const void *base, unsigned int size);
36 43
37/* clean DCache for this range */ 44/* clean DCache for this range */
@@ -39,7 +46,14 @@ void invalidate_dcache_range(const void *base, unsigned int size);
39void clean_dcache_range(const void *base, unsigned int size); 46void clean_dcache_range(const void *base, unsigned int size);
40 47
41/* Dump DCache for this range */ 48/* Dump DCache for this range */
42/* Will *NOT* do write back */ 49/* Will *NOT* do write back except for buffer ends not on a line boundary */
43void dump_dcache_range(const void *base, unsigned int size); 50void dump_dcache_range(const void *base, unsigned int size);
44 51
45void memory_init(void); 52/* Invalidate entire ICache and DCache */
53/* will do writeback */
54void invalidate_idcache(void);
55
56#define HAVE_CPUCACHE_INVALIDATE
57#define HAVE_CPUCACHE_FLUSH
58
59#endif /* MMU_ARM_H */
diff --git a/firmware/target/arm/s3c2440/gigabeat-fx/system-target.h b/firmware/target/arm/s3c2440/gigabeat-fx/system-target.h
index aa7c0aa50c..f310b9ac26 100644
--- a/firmware/target/arm/s3c2440/gigabeat-fx/system-target.h
+++ b/firmware/target/arm/s3c2440/gigabeat-fx/system-target.h
@@ -41,9 +41,4 @@ void s3c_regset32(volatile unsigned long *reg, unsigned long bits);
41/* Clear register bits */ 41/* Clear register bits */
42void s3c_regclr32(volatile unsigned long *reg, unsigned long bits); 42void s3c_regclr32(volatile unsigned long *reg, unsigned long bits);
43 43
44#define HAVE_CPUCACHE_FLUSH
45#define HAVE_CPUCACHE_INVALIDATE
46#define cpucache_flush clean_dcache
47#define cpucache_invalidate invalidate_idcache
48
49#endif /* SYSTEM_TARGET_H */ 44#endif /* SYSTEM_TARGET_H */