summaryrefslogtreecommitdiff
path: root/firmware/target/mips/mmu-mips.c
diff options
context:
space:
mode:
authorSolomon Peachy <pizza@shaftnet.org>2020-08-28 21:45:58 -0400
committerSolomon Peachy <pizza@shaftnet.org>2020-09-03 15:34:28 -0400
commit0cb162a76b16d58250a33e817af6a763e89a770a (patch)
treeaf5ac50c1ec59f665e0a4845672a16d758b44953 /firmware/target/mips/mmu-mips.c
parent1ae8213a64c23ac86173b8139e01c7cad350ec6b (diff)
downloadrockbox-0cb162a76b16d58250a33e817af6a763e89a770a.tar.gz
rockbox-0cb162a76b16d58250a33e817af6a763e89a770a.zip
mips: Heavily rework DMA & caching code
Based on code originally written by Amaury Pouly (g#1789, g#1791, g#1527) but rebased and heavily updated. Change-Id: Ic794abb5e8d89feb4b88fc3abe854270fb28db70
Diffstat (limited to 'firmware/target/mips/mmu-mips.c')
-rw-r--r--firmware/target/mips/mmu-mips.c185
1 files changed, 122 insertions, 63 deletions
diff --git a/firmware/target/mips/mmu-mips.c b/firmware/target/mips/mmu-mips.c
index 552348014e..14a013211d 100644
--- a/firmware/target/mips/mmu-mips.c
+++ b/firmware/target/mips/mmu-mips.c
@@ -5,9 +5,9 @@
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < 5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ 6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/ 7 * \/ \/ \/ \/ \/
8 * $Id$
9 * 8 *
10 * Copyright (C) 2009 by Maurus Cuelenaere 9 * Copyright (C) 2009 by Maurus Cuelenaere
10 * Copyright (C) 2015 by Marcin Bukat
11 * 11 *
12 * This program is free software; you can redistribute it and/or 12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License 13 * modify it under the terms of the GNU General Public License
@@ -25,8 +25,16 @@
25#include "system.h" 25#include "system.h"
26#include "mmu-mips.h" 26#include "mmu-mips.h"
27 27
28#if CONFIG_CPU == JZ4732 || CONFIG_CPU == JZ4760B
29/* XBurst core has 32 JTLB entries */
30#define NR_TLB_ENTRIES 32
31#else
32#error please define NR_TLB_ENTRIES
33#endif
34
28#define BARRIER \ 35#define BARRIER \
29 __asm__ __volatile__( \ 36 __asm__ __volatile__( \
37 " .set push \n" \
30 " .set noreorder \n" \ 38 " .set noreorder \n" \
31 " nop \n" \ 39 " nop \n" \
32 " nop \n" \ 40 " nop \n" \
@@ -34,7 +42,7 @@
34 " nop \n" \ 42 " nop \n" \
35 " nop \n" \ 43 " nop \n" \
36 " nop \n" \ 44 " nop \n" \
37 " .set reorder \n"); 45 " .set pop \n");
38 46
39#define DEFAULT_PAGE_SHIFT PL_4K 47#define DEFAULT_PAGE_SHIFT PL_4K
40#define DEFAULT_PAGE_MASK PM_4K 48#define DEFAULT_PAGE_MASK PM_4K
@@ -43,6 +51,7 @@
43#define VPN2_SHIFT S_EntryHiVPN2 51#define VPN2_SHIFT S_EntryHiVPN2
44#define PFN_SHIFT S_EntryLoPFN 52#define PFN_SHIFT S_EntryLoPFN
45#define PFN_MASK 0xffffff 53#define PFN_MASK 0xffffff
54
46static void local_flush_tlb_all(void) 55static void local_flush_tlb_all(void)
47{ 56{
48 unsigned long old_ctx; 57 unsigned long old_ctx;
@@ -55,10 +64,11 @@ static void local_flush_tlb_all(void)
55 write_c0_entrylo1(0); 64 write_c0_entrylo1(0);
56 BARRIER; 65 BARRIER;
57 66
58 /* Blast 'em all away. */ 67 /* blast all entries except the wired one */
59 for(entry = 0; entry < 32; entry++) 68 for(entry = read_c0_wired(); entry < NR_TLB_ENTRIES; entry++)
60 { 69 {
61 /* Make sure all entries differ. */ 70 /* Make sure all entries differ and are in unmapped space, making them
71 * impossible to match */
62 write_c0_entryhi(UNIQUE_ENTRYHI(entry, DEFAULT_PAGE_SHIFT)); 72 write_c0_entryhi(UNIQUE_ENTRYHI(entry, DEFAULT_PAGE_SHIFT));
63 write_c0_index(entry); 73 write_c0_index(entry);
64 BARRIER; 74 BARRIER;
@@ -119,84 +129,133 @@ void mmu_init(void)
119 write_c0_framemask(0); 129 write_c0_framemask(0);
120 130
121 local_flush_tlb_all(); 131 local_flush_tlb_all();
122/*
123 map_address(0x80000000, 0x80000000, 0x4000, K_CacheAttrC);
124 map_address(0x80004000, 0x80004000, MEMORYSIZE * 0x100000, K_CacheAttrC);
125*/
126} 132}
127 133
128#define SYNC_WB() __asm__ __volatile__ ("sync") 134/* Target specific operations:
135 * - invalidate BTB (Branch Table Buffer)
136 * - sync barrier after cache operations */
137#if CONFIG_CPU == JZ4732 || CONFIG_CPU == JZ4760B
138#define INVALIDATE_BTB() \
139do { \
140 unsigned long tmp; \
141 __asm__ __volatile__( \
142 " .set push \n" \
143 " .set noreorder \n" \
144 " .set mips32 \n" \
145 " mfc0 %0, $16, 7 \n" \
146 " nop \n" \
147 " ori %0, 2 \n" \
148 " mtc0 %0, $16, 7 \n" \
149 " nop \n" \
150 " .set pop \n" \
151 : "=&r"(tmp)); \
152 } while (0)
153
154#define SYNC_WB() __asm__ __volatile__ ("sync":::"memory")
155#else /* !JZ4732 */
156#define INVALIDATE_BTB() do { } while(0)
157#define SYNC_WB() do { } while(0)
158#endif /* CONFIG_CPU */
159
160#define __CACHE_OP(op, addr) \
161 __asm__ __volatile__( \
162 " .set push\n\t \n" \
163 " .set noreorder \n" \
164 " .set mips32\n\t \n" \
165 " cache %0, %1 \n" \
166 " .set pop \n" \
167 : \
168 : "i" (op), "m"(*(unsigned char *)(addr)))
129 169
130#define cache_op(base,op) \ 170/* rockbox cache api */
131 __asm__ __volatile__(" \
132 .set noreorder; \
133 .set mips3; \
134 cache %1, (%0); \
135 .set mips0; \
136 .set reorder" \
137 : \
138 : "r" (base), \
139 "i" (op));
140 171
141void __icache_invalidate_all(void) 172/* Writeback whole D-cache
173 * Alias to commit_discard_dcache() as there is no index type
174 * variant of writeback-only operation
175 */
176void commit_dcache(void) __attribute__((alias("commit_discard_dcache")));
177
178/* Writeback whole D-cache and invalidate D-cache lines */
179void commit_discard_dcache(void)
142{ 180{
143 unsigned long start; 181 unsigned int i;
144 unsigned long end; 182
183 /* Use index type operation and iterate whole cache */
184 for (i=A_K0BASE; i<A_K0BASE+CACHE_SIZE; i+=CACHEALIGN_SIZE)
185 __CACHE_OP(DCIndexWBInv, i);
145 186
146 start = A_K0BASE;
147 end = start + CACHE_SIZE;
148 while(start < end)
149 {
150 cache_op(start,ICIndexInv);
151 start += CACHE_LINE_SIZE;
152 }
153 SYNC_WB(); 187 SYNC_WB();
154} 188}
155 189
156void __dcache_invalidate_all(void) 190/* Writeback lines of D-cache corresponding to address range and
191 * invalidate those D-cache lines
192 */
193void commit_discard_dcache_range(const void *base, unsigned int size)
157{ 194{
158 unsigned long start; 195 char *s;
159 unsigned long end; 196
197 for (s=(char *)base; s<(char *)base+size; s+=CACHEALIGN_SIZE)
198 __CACHE_OP(DCHitWBInv, s);
160 199
161 start = A_K0BASE;
162 end = start + CACHE_SIZE;
163 while (start < end)
164 {
165 cache_op(start,DCIndexWBInv);
166 start += CACHE_LINE_SIZE;
167 }
168 SYNC_WB(); 200 SYNC_WB();
169} 201}
170 202
171void __idcache_invalidate_all(void) 203/* Writeback lines of D-cache corresponding to address range
204 */
205void commit_dcache_range(const void *base, unsigned int size)
172{ 206{
173 __dcache_invalidate_all(); 207 char *s;
174 __icache_invalidate_all(); 208
209 for (s=(char *)base; s<(char *)base+size; s+=CACHEALIGN_SIZE)
210 __CACHE_OP(DCHitWB, s);
211
212 SYNC_WB();
175} 213}
176 214
177void __dcache_writeback_all(void) 215/* Invalidate D-cache lines corresponding to address range
216 * WITHOUT writeback
217 */
218void discard_dcache_range(const void *base, unsigned int size)
178{ 219{
179 __dcache_invalidate_all(); 220 char *s;
221
222 if (((int)base & CACHEALIGN_SIZE - 1) ||
223 (((int)base + size) & CACHEALIGN_SIZE - 1)) {
224 /* Overlapping sections, so we need to write back instead */
225 commit_discard_dcache_range(base, size);
226 return;
227 };
228
229 for (s=(char *)base; s<(char *)base+size; s+=CACHEALIGN_SIZE)
230 __CACHE_OP(DCHitInv, s);
231
232 SYNC_WB();
180} 233}
181 234
182void dma_cache_wback_inv(unsigned long addr, unsigned long size) 235/* Invalidate whole I-cache */
236static void discard_icache(void)
183{ 237{
184 unsigned long end, a; 238 unsigned int i;
185 239
186 if (size >= CACHE_SIZE*2) { 240 asm volatile (".set push \n"
187 __dcache_writeback_all(); 241 ".set noreorder \n"
188 } 242 ".set mips32 \n"
189 else { 243 "mtc0 $0, $28 \n" /* TagLo */
190 unsigned long dc_lsize = CACHE_LINE_SIZE; 244 "mtc0 $0, $29 \n" /* TagHi */
191 245 ".set pop \n"
192 a = addr & ~(dc_lsize - 1); 246 );
193 end = (addr + size - 1) & ~(dc_lsize - 1); 247 /* Use index type operation and iterate whole cache */
194 while (1) { 248 for (i=A_K0BASE; i<A_K0BASE+CACHE_SIZE; i+=CACHEALIGN_SIZE)
195 cache_op(a,DCHitWBInv); 249 __CACHE_OP(ICIndexStTag, i);
196 if (a == end) 250
197 break; 251 INVALIDATE_BTB();
198 a += dc_lsize; 252}
199 } 253
200 } 254/* Invalidate the entire I-cache
201 SYNC_WB(); 255 * and writeback + invalidate the entire D-cache
256 */
257void commit_discard_idcache(void)
258{
259 commit_discard_dcache();
260 discard_icache();
202} 261}