summaryrefslogtreecommitdiff
path: root/firmware/target/arm/mmu-arm.S
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/target/arm/mmu-arm.S')
-rw-r--r--firmware/target/arm/mmu-arm.S487
1 files changed, 487 insertions, 0 deletions
diff --git a/firmware/target/arm/mmu-arm.S b/firmware/target/arm/mmu-arm.S
new file mode 100644
index 0000000000..2223be2210
--- /dev/null
+++ b/firmware/target/arm/mmu-arm.S
@@ -0,0 +1,487 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2006,2007 by Greg White
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21#include "config.h"
22#include "cpu.h"
23
24#if CONFIG_CPU == IMX31L
25/* TTB routines not used */
26
27/** Cache coherency **/
28
29/*
30 * Invalidate DCache for this range
31 * will do write back
32 * void invalidate_dcache_range(const void *base, unsigned int size)
33 */
34 .section .text, "ax", %progbits
35 .align 2
36 .global invalidate_dcache_range
37 .type invalidate_dcache_range, %function
38 @ MVA format: 31:5 = Modified virtual address, 4:0 = Ignored
39invalidate_dcache_range:
40 add r1, r0, r1 @ size -> end
41 cmp r1, r0 @ end <= start?
42 subhi r1, r1, #1 @ round it down
43 movhi r2, #0 @
44 mcrrhi p15, 0, r1, r0, c14 @ Clean and invalidate DCache range
45 mcrhi p15, 0, r2, c7, c10, 4 @ Data synchronization barrier
46 bx lr @
47 .size invalidate_dcache_range, .-invalidate_dcache_range
48
49/*
50 * clean DCache for this range
51 * forces DCache writeback for the specified range
52 * void clean_dcache_range(const void *base, unsigned int size);
53 */
54 .section .text, "ax", %progbits
55 .align 2
56 .global clean_dcache_range
57 .type clean_dcache_range, %function
58 @ MVA format: 31:5 = Modified virtual address, 4:0 = Ignored
59clean_dcache_range:
60 add r1, r0, r1 @ size -> end
61 cmp r1, r0 @ end <= start?
62 subhi r1, r1, #1 @ round it down
63 movhi r2, #0 @
64 mcrrhi p15, 0, r1, r0, c12 @ Clean DCache range
65 mcrhi p15, 0, r2, c7, c10, 4 @ Data synchronization barrier
66 bx lr @
67 .size clean_dcache_range, .-clean_dcache_range
68
69/*
70 * Dump DCache for this range
71 * will *NOT* do write back except for buffer edges not on a line boundary
72 * void dump_dcache_range(const void *base, unsigned int size);
73 */
74 .section .text, "ax", %progbits
75 .align 2
76 .global dump_dcache_range
77 .type dump_dcache_range, %function
78 @ MVA format (mcr): 31:5 = Modified virtual address, 4:0 = SBZ
79 @ MVA format (mcrr): 31:5 = Modified virtual address, 4:0 = Ignored
80 dump_dcache_range:
81 add r1, r0, r1 @ size -> end
82 cmp r1, r0 @ end <= start?
83 bxls lr @
84 tst r0, #31 @ Check first line for bits set
85 bicne r0, r0, #31 @ Clear low five bits (down)
86 mcrne p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
87 @ if not cache aligned
88 addne r0, r0, #32 @ Move to the next cache line
89 @
90 tst r1, #31 @ Check last line for bits set
91 bicne r1, r1, #31 @ Clear low five bits (down)
92 mcrne p15, 0, r1, c7, c14, 1 @ Clean and invalidate line by MVA
93 @ if not cache aligned
94 sub r1, r1, #32 @ Move to the previous cache line
95 cmp r1, r0 @ end < start now?
96 mcrrhs p15, 0, r1, r0, c6 @ Invalidate DCache range
97 mov r0, #0 @
98 mcr p15, 0, r0, c7, c10, 4 @ Data synchronization barrier
99 bx lr @
100 .size dump_dcache_range, .-dump_dcache_range
101
102
103/*
104 * Cleans entire DCache
105 * void clean_dcache(void);
106 */
107 .section .text, "ax", %progbits
108 .align 2
109 .global clean_dcache
110 .type clean_dcache, %function
111 .global cpucache_flush @ Alias
112clean_dcache:
113cpucache_flush:
114 mov r0, #0 @
115 mcr p15, 0, r0, c7, c10, 0 @ Clean entire DCache
116 mcr p15, 0, r0, c7, c10, 4 @ Data synchronization barrier
117 bx lr @
118 .size clean_dcache, .-clean_dcache
119
120/*
121 * Invalidate entire DCache
122 * will do writeback
123 * void invalidate_dcache(void);
124 */
125 .section .text, "ax", %progbits
126 .align 2
127 .global invalidate_dcache
128 .type invalidate_dcache, %function
129invalidate_dcache:
130 mov r0, #0 @
131 mcr p15, 0, r0, c7, c14, 0 @ Clean and invalidate entire DCache
132 mcr p15, 0, r0, c7, c10, 4 @ Data synchronization barrier
133 bx lr @
134 .size invalidate_dcache, .-invalidate_dcache
135
136/*
137 * Invalidate entire ICache and DCache
138 * will do writeback
139 * void invalidate_idcache(void);
140 */
141 .section .text, "ax", %progbits
142 .align 2
143 .global invalidate_idcache
144 .type invalidate_idcache, %function
145 .global cpucache_invalidate @ Alias
146invalidate_idcache:
147cpucache_invalidate:
148 mov r0, #0 @
149 mcr p15, 0, r0, c7, c14, 0 @ Clean and invalidate entire DCache
150 mcr p15, 0, r0, c7, c5, 0 @ Invalidate entire ICache
151 @ Also flushes the branch target cache
152 mcr p15, 0, r0, c7, c10, 4 @ Data synchronization barrier
153 mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer (IMB)
154 bx lr @
155 .size invalidate_idcache, .-invalidate_idcache
156
157#else /* !IMX31L */
158/** MMU setup **/
159
160/*
161 * void ttb_init(void);
162 */
163 .section .text, "ax", %progbits
164 .align 2
165 .global ttb_init
166 .type ttb_init, %function
167ttb_init:
168 ldr r0, =TTB_BASE_ADDR @
169 mvn r1, #0 @
170 mcr p15, 0, r0, c2, c0, 0 @ Set the TTB base address
171 mcr p15, 0, r1, c3, c0, 0 @ Set all domains to manager status
172 bx lr @
173 .size ttb_init, .-ttb_init
174
175/*
176 * void map_section(unsigned int pa, unsigned int va, int mb, int flags);
177 */
178 .section .text, "ax", %progbits
179 .align 2
180 .global map_section
181 .type map_section, %function
182map_section:
183 @ align to 1MB
184 @ pa &= (-1 << 20);
185 mov r0, r0, lsr #20
186 mov r0, r0, lsl #20
187
188 @ pa |= (flags | 0x412);
189 @ bit breakdown:
190 @ 10: superuser - r/w, user - no access
191 @ 4: should be "1"
192 @ 3,2: Cache flags (flags (r3))
193 @ 1: Section signature
194 orr r0, r0, r3
195 orr r0, r0, #0x410
196 orr r0, r0, #0x2
197
198 @ unsigned int* ttbPtr = TTB_BASE + (va >> 20);
199 @ sections are 1MB size
200 mov r1, r1, lsr #20
201 ldr r3, =TTB_BASE_ADDR
202 add r1, r3, r1, lsl #0x2
203
204 @ Add MB to pa, flags are already present in pa, but addition
205 @ should not effect them
206 @
207 @ for( ; mb>0; mb--, pa += (1 << 20))
208 @ {
209 @ *(ttbPtr++) = pa;
210 @ }
211 cmp r2, #0
212 bxle lr
213 mov r3, #0x0
2141: @ loop
215 str r0, [r1], #4
216 add r0, r0, #0x100000
217 add r3, r3, #0x1
218 cmp r2, r3
219 bne 1b @ loop
220 bx lr
221 .size map_section, .-map_section
222
223/*
224 * void enable_mmu(void);
225 */
226 .section .text, "ax", %progbits
227 .align 2
228 .global enable_mmu
229 .type enable_mmu, %function
230enable_mmu:
231 mov r0, #0 @
232 mcr p15, 0, r0, c8, c7, 0 @ invalidate TLB
233 mcr p15, 0, r0, c7, c7,0 @ invalidate both i and dcache
234 mrc p15, 0, r0, c1, c0, 0 @
235 orr r0, r0, #1 @ enable mmu bit, i and dcache
236 orr r0, r0, #1<<2 @ enable dcache
237 orr r0, r0, #1<<12 @ enable icache
238 mcr p15, 0, r0, c1, c0, 0 @
239 nop @
240 nop @
241 nop @
242 nop @
243 bx lr @
244 .size enable_mmu, .-enable_mmu
245 .ltorg
246
247/** Cache coherency **/
248
249/*
250 * Invalidate DCache for this range
251 * will do write back
252 * void invalidate_dcache_range(const void *base, unsigned int size);
253 */
254 .section .text, "ax", %progbits
255 .align 2
256 .global invalidate_dcache_range
257 .type invalidate_dcache_range, %function
258 @ MVA format: 31:5 = Modified virtual address, 4:0 = SBZ
259invalidate_dcache_range:
260 add r1, r0, r1 @ size -> end
261 cmp r1, r0 @ end <= start?
262 bxls lr @
263 bic r0, r0, #31 @ Align start to cache line (down)
2641: @ inv_start @
265 mcr p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
266 add r0, r0, #32 @
267 cmp r1, r0 @
268 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
269 addhi r0, r0, #32 @
270 cmphi r1, r0 @
271 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
272 addhi r0, r0, #32 @
273 cmphi r1, r0 @
274 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
275 addhi r0, r0, #32 @
276 cmphi r1, r0 @
277 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
278 addhi r0, r0, #32 @
279 cmphi r1, r0 @
280 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
281 addhi r0, r0, #32 @
282 cmphi r1, r0 @
283 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
284 addhi r0, r0, #32 @
285 cmphi r1, r0 @
286 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
287 addhi r0, r0, #32 @
288 cmphi r1, r0 @
289 bhi 1b @ inv_start @
290 mov r0, #0 @
291 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
292 bx lr @
293 .size invalidate_dcache_range, .-invalidate_dcache_range
294
295/*
296 * clean DCache for this range
297 * forces DCache writeback for the specified range
298 * void clean_dcache_range(const void *base, unsigned int size);
299 */
300 .section .text, "ax", %progbits
301 .align 2
302 .global clean_dcache_range
303 .type clean_dcache_range, %function
304 @ MVA format: 31:5 = Modified virtual address, 4:0 = SBZ
305clean_dcache_range:
306 add r1, r0, r1 @ size -> end
307 cmp r1, r0 @ end <= start?
308 bxls lr @
309 bic r0, r0, #31 @ Align start to cache line (down)
3101: @ clean_start @
311 mcr p15, 0, r0, c7, c10, 1 @ Clean line by MVA
312 add r0, r0, #32 @
313 cmp r1, r0 @
314 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
315 addhi r0, r0, #32 @
316 cmphi r1, r0 @
317 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
318 addhi r0, r0, #32 @
319 cmphi r1, r0 @
320 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
321 addhi r0, r0, #32 @
322 cmphi r1, r0 @
323 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
324 addhi r0, r0, #32 @
325 cmphi r1, r0 @
326 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
327 addhi r0, r0, #32 @
328 cmphi r1, r0 @
329 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
330 addhi r0, r0, #32 @
331 cmphi r1, r0 @
332 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
333 addhi r0, r0, #32 @
334 cmphi r1, r0 @
335 bhi 1b @clean_start @
336 mov r0, #0 @
337 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
338 bx lr @
339 .size clean_dcache_range, .-clean_dcache_range
340
341/*
342 * Dump DCache for this range
343 * will *NOT* do write back except for buffer edges not on a line boundary
344 * void dump_dcache_range(const void *base, unsigned int size);
345 */
346 .section .text, "ax", %progbits
347 .align 2
348 .global dump_dcache_range
349 .type dump_dcache_range, %function
350 @ MVA format: 31:5 = Modified virtual address, 4:0 = SBZ
351 dump_dcache_range:
352 add r1, r0, r1 @ size -> end
353 cmp r1, r0 @ end <= start?
354 bxls lr @
355 tst r0, #31 @ Check first line for bits set
356 bicne r0, r0, #31 @ Clear low five bits (down)
357 mcrne p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
358 @ if not cache aligned
359 addne r0, r0, #32 @ Move to the next cache line
360 @
361 tst r1, #31 @ Check last line for bits set
362 bicne r1, r1, #31 @ Clear low five bits (down)
363 mcrne p15, 0, r1, c7, c14, 1 @ Clean and invalidate line by MVA
364 @ if not cache aligned
365 cmp r1, r0 @ end <= start now?
3661: @ dump_start @
367 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
368 addhi r0, r0, #32 @
369 cmphi r1, r0 @
370 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
371 addhi r0, r0, #32 @
372 cmphi r1, r0 @
373 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
374 addhi r0, r0, #32 @
375 cmphi r1, r0 @
376 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
377 addhi r0, r0, #32 @
378 cmphi r1, r0 @
379 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
380 addhi r0, r0, #32 @
381 cmphi r1, r0 @
382 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
383 addhi r0, r0, #32 @
384 cmphi r1, r0 @
385 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
386 addhi r0, r0, #32 @
387 cmphi r1, r0 @
388 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
389 addhi r0, r0, #32 @
390 cmphi r1, r0 @
391 bhi 1b @ dump_start @
392 mov r0, #0 @
393 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
394 bx lr @
395 .size dump_dcache_range, .-dump_dcache_range
396
397/*
398 * Cleans entire DCache
399 * void clean_dcache(void);
400 */
401 .section .text, "ax", %progbits
402 .align 2
403 .global clean_dcache
404 .type clean_dcache, %function
405 .global cpucache_flush @ Alias
406clean_dcache:
407cpucache_flush:
408 @ Index format: 31:26 = index, 7:5 = segment, remainder = SBZ
409 mov r0, #0x00000000 @
4101: @ clean_start @
411 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
412 add r0, r0, #0x00000020 @
413 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
414 add r0, r0, #0x00000020 @
415 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
416 add r0, r0, #0x00000020 @
417 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
418 add r0, r0, #0x00000020 @
419 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
420 add r0, r0, #0x00000020 @
421 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
422 add r0, r0, #0x00000020 @
423 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
424 add r0, r0, #0x00000020 @
425 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
426 sub r0, r0, #0x000000e0 @
427 adds r0, r0, #0x04000000 @ will wrap to zero at loop end
428 bne 1b @ clean_start @
429 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
430 bx lr @
431 .size clean_dcache, .-clean_dcache
432
433/*
434 * Invalidate entire DCache
435 * will do writeback
436 * void invalidate_dcache(void);
437 */
438 .section .text, "ax", %progbits
439 .align 2
440 .global invalidate_dcache
441 .type invalidate_dcache, %function
442invalidate_dcache:
443 @ Index format: 31:26 = index, 7:5 = segment, remainder = SBZ
444 mov r0, #0x00000000 @
4451: @ inv_start @
446 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
447 add r0, r0, #0x00000020 @
448 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
449 add r0, r0, #0x00000020 @
450 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
451 add r0, r0, #0x00000020 @
452 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
453 add r0, r0, #0x00000020 @
454 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
455 add r0, r0, #0x00000020 @
456 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
457 add r0, r0, #0x00000020 @
458 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
459 add r0, r0, #0x00000020 @
460 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
461 sub r0, r0, #0x000000e0 @
462 adds r0, r0, #0x04000000 @ will wrap to zero at loop end
463 bne 1b @ inv_start @
464 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
465 bx lr @
466 .size invalidate_dcache, .-invalidate_dcache
467
468/*
469 * Invalidate entire ICache and DCache
470 * will do writeback
471 * void invalidate_idcache(void);
472 */
473 .section .text, "ax", %progbits
474 .align 2
475 .global invalidate_idcache
476 .type invalidate_idcache, %function
477 .global cpucache_invalidate @ Alias
478invalidate_idcache:
479cpucache_invalidate:
480 mov r1, lr @ save lr to r1, call uses r0 only
481 bl invalidate_dcache @ Clean and invalidate entire DCache
482 mcr p15, 0, r0, c7, c5, 0 @ Invalidate ICache (r0=0 from call)
483 mov pc, r1 @
484 .size invalidate_idcache, .-invalidate_idcache
485
486#endif /* !IMX31L */
487