summaryrefslogtreecommitdiff
path: root/firmware/target/arm/mmu-arm.S
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/target/arm/mmu-arm.S')
-rw-r--r--firmware/target/arm/mmu-arm.S38
1 files changed, 26 insertions, 12 deletions
diff --git a/firmware/target/arm/mmu-arm.S b/firmware/target/arm/mmu-arm.S
index 947e96b75e..0119b26133 100644
--- a/firmware/target/arm/mmu-arm.S
+++ b/firmware/target/arm/mmu-arm.S
@@ -22,7 +22,6 @@
22#include "cpu.h" 22#include "cpu.h"
23 23
24/* Used by ARMv4 & ARMv5 CPUs with cp15 register and MMU */ 24/* Used by ARMv4 & ARMv5 CPUs with cp15 register and MMU */
25/* WARNING : assume size of a data cache line == 32 bytes */
26 25
27#if CONFIG_CPU == TCC7801 || CONFIG_CPU == AT91SAM9260 26#if CONFIG_CPU == TCC7801 || CONFIG_CPU == AT91SAM9260
28/* MMU present but unused */ 27/* MMU present but unused */
@@ -40,19 +39,38 @@
40#define USE_MMU 39#define USE_MMU
41#define CACHE_SIZE 16 40#define CACHE_SIZE 16
42 41
42#elif CONFIG_CPU == S5L8701
43/* MMU not present */
44#define CACHE_SIZE 4
45
43#else 46#else
44#error Cache settings unknown for this CPU ! 47#error Cache settings unknown for this CPU !
45 48
46#endif /* CPU specific configuration */ 49#endif /* CPU specific configuration */
47 50
48@ Index format: 31:26 = index, N:5 = segment, remainder = SBZ 51@ Index format: 31:26 = index, N:5 = segment, remainder = SBZ
49@ assume 64-way set associative separate I/D caches, 32B (2^5) cache line size 52@ assume 64-way set associative separate I/D caches
50@ CACHE_SIZE = N (kB) = N*2^10 B 53@ CACHE_SIZE = N (kB) = N*2^10 B
51@ number of lines = N*2^(10-5) = N*2^(5) 54@ number of lines = N*2^(10-CACHEALIGN_BITS)
52@ Index bits = 6 55@ Index bits = 6
53@ Segment loops = N*2^(5-6) = N*2^(-1) = N/2 56@ Segment loops = N*2^(10-CACHEALIGN_BITS-6) = N*2^(4-CACHEALIGN_BITS)
57@ Segment loops = N/2^(CACHEALIGN_BITS - 4)
58@ Segment loops = N/(1<<(CACHEALIGN_BITS - 4))
59
60#ifdef CACHE_SIZE
54 61
62#if CACHEALIGN_BITS == 4
63#define INDEX_STEPS CACHE_SIZE
64#elif CACHEALIGN_BITS == 5
55#define INDEX_STEPS (CACHE_SIZE/2) 65#define INDEX_STEPS (CACHE_SIZE/2)
66#endif /* CACHEALIGN_BITS */
67
68@ assume 64-way set associative separate I/D caches (log2(64) == 6)
69@ Index format: 31:26 = index, M:N = segment, remainder = SBZ
70@ Segment bits = log2(cache size in bytes / cache line size in byte) - Index bits (== 6)
71@ N = CACHEALIGN_BITS
72
73#endif /* CACHE_SIZE */
56 74
57 75
58#ifdef USE_MMU 76#ifdef USE_MMU
@@ -318,15 +336,13 @@ cpucache_flush:
318 bne clean_dcache 336 bne clean_dcache
319 mov r1, #0 337 mov r1, #0
320#else 338#else
321 @ Index format: 31:26 = index, N:5 = segment, remainder = SBZ, assume 64-way set associative separate I/D caches
322 @ N = log2(cache size in bytes / cache line size in bytes == 32) - 6 /* index bits */ + 4 /* start offset */
323 mov r1, #0x00000000 @ 339 mov r1, #0x00000000 @
3241: @ clean_start @ 3401: @ clean_start @
325 mcr p15, 0, r1, c7, c10, 2 @ Clean entry by index 341 mcr p15, 0, r1, c7, c10, 2 @ Clean entry by index
326 add r0, r1, #0x00000020 @ 342 add r0, r1, #(1<<CACHEALIGN_BITS)
327 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index 343 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
328.rept INDEX_STEPS - 2 /* 2 steps already executed */ 344.rept INDEX_STEPS - 2 /* 2 steps already executed */
329 add r0, r0, #0x00000020 @ 345 add r0, r0, #(1<<CACHEALIGN_BITS)
330 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index 346 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
331.endr 347.endr
332 adds r1, r1, #0x04000000 @ will wrap to zero at loop end 348 adds r1, r1, #0x04000000 @ will wrap to zero at loop end
@@ -351,15 +367,13 @@ invalidate_dcache:
351 bne invalidate_dcache 367 bne invalidate_dcache
352 mov r1, #0 368 mov r1, #0
353#else 369#else
354 @ Index format: 31:26 = index, N:5 = segment, remainder = SBZ, assume 64-way set associative separate I/D caches
355 @ N = log2(cache size in bytes / cache line size in bytes == 32) - 6 /* index bits */ + 4 /* start offset */
356 mov r1, #0x00000000 @ 370 mov r1, #0x00000000 @
3571: @ inv_start @ 3711: @ inv_start @
358 mcr p15, 0, r1, c7, c14, 2 @ Clean and invalidate entry by index 372 mcr p15, 0, r1, c7, c14, 2 @ Clean and invalidate entry by index
359 add r0, r1, #0x00000020 @ 373 add r0, r1, #(1<<CACHEALIGN_BITS)
360 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index 374 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
361.rept INDEX_STEPS - 2 /* 2 steps already executed */ 375.rept INDEX_STEPS - 2 /* 2 steps already executed */
362 add r0, r0, #0x00000020 @ 376 add r0, r0, #(1<<CACHEALIGN_BITS)
363 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index 377 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
364.endr 378.endr
365 adds r1, r1, #0x04000000 @ will wrap to zero at loop end 379 adds r1, r1, #0x04000000 @ will wrap to zero at loop end