summaryrefslogtreecommitdiff
path: root/firmware/target/mips/mmu-mips.c
diff options
context:
space:
mode:
authorAidan MacDonald <amachronic@protonmail.com>2021-03-03 17:54:38 +0000
committerSolomon Peachy <pizza@shaftnet.org>2021-03-03 20:50:28 +0000
commit74a3d1f5be2d364a33f37e0ad621538df1bfba4b (patch)
tree8989db6f499d53384645a7a6c6ee84933764f7fd /firmware/target/mips/mmu-mips.c
parentf906df017dd7e82f8452cc479373a1b341a02bd9 (diff)
downloadrockbox-74a3d1f5be2d364a33f37e0ad621538df1bfba4b.tar.gz
rockbox-74a3d1f5be2d364a33f37e0ad621538df1bfba4b.zip
Fix MIPS cache operations and enable HAVE_CPU_CACHE_ALIGN on MIPS
- The range-based cache operations on MIPS were broken and only worked properly when BOTH the address and size were multiples of the cache line size. If this was not the case, the last cache line of the range would not be touched! Fix is to align start/end pointers to cache lines before iterating. - To my knowledge all MIPS processors have a cache, so I enabled HAVE_CPU_CACHE_ALIGN by default. This also allows mmu-mips.c to use the CACHEALIGN_UP/DOWN macros. - Make jz4760/system-target.h define its cache line size properly. Change-Id: I1fcd04a59791daa233b9699f04d5ac1cc6bacee7
Diffstat (limited to 'firmware/target/mips/mmu-mips.c')
-rw-r--r--firmware/target/mips/mmu-mips.c39
1 files changed, 24 insertions, 15 deletions
diff --git a/firmware/target/mips/mmu-mips.c b/firmware/target/mips/mmu-mips.c
index eb7004952e..f4ffbfa6ee 100644
--- a/firmware/target/mips/mmu-mips.c
+++ b/firmware/target/mips/mmu-mips.c
@@ -192,10 +192,11 @@ void commit_discard_dcache(void)
192 */ 192 */
193void commit_discard_dcache_range(const void *base, unsigned int size) 193void commit_discard_dcache_range(const void *base, unsigned int size)
194{ 194{
195 register char *s; 195 char *ptr = CACHEALIGN_DOWN((char*)base);
196 char *end = CACHEALIGN_UP((char*)base + size);
196 197
197 for (s=(char *)base; s<(char *)base+size; s+=CACHEALIGN_SIZE) 198 for(; ptr != end; ptr += CACHEALIGN_SIZE)
198 __CACHE_OP(DCHitWBInv, s); 199 __CACHE_OP(DCHitWBInv, ptr);
199 200
200 SYNC_WB(); 201 SYNC_WB();
201} 202}
@@ -204,10 +205,11 @@ void commit_discard_dcache_range(const void *base, unsigned int size)
204 */ 205 */
205void commit_dcache_range(const void *base, unsigned int size) 206void commit_dcache_range(const void *base, unsigned int size)
206{ 207{
207 register char *s; 208 char *ptr = CACHEALIGN_DOWN((char*)base);
209 char *end = CACHEALIGN_UP((char*)base + size);
208 210
209 for (s=(char *)base; s<(char *)base+size; s+=CACHEALIGN_SIZE) 211 for(; ptr != end; ptr += CACHEALIGN_SIZE)
210 __CACHE_OP(DCHitWB, s); 212 __CACHE_OP(DCHitWB, ptr);
211 213
212 SYNC_WB(); 214 SYNC_WB();
213} 215}
@@ -217,17 +219,24 @@ void commit_dcache_range(const void *base, unsigned int size)
217 */ 219 */
218void discard_dcache_range(const void *base, unsigned int size) 220void discard_dcache_range(const void *base, unsigned int size)
219{ 221{
220 register char *s; 222 char *ptr = CACHEALIGN_DOWN((char*)base);
223 char *end = CACHEALIGN_UP((char*)base + size);
221 224
222 if (((int)base & CACHEALIGN_SIZE - 1) || 225 if(ptr != base) {
223 (((int)base + size) & CACHEALIGN_SIZE - 1)) { 226 /* Start of region not cache aligned */
224 /* Overlapping sections, so we need to write back instead */ 227 __CACHE_OP(DCHitWBInv, ptr);
225 commit_discard_dcache_range(base, size); 228 ptr += CACHEALIGN_SIZE;
226 return; 229 }
227 }; 230
231 if(base+size != end) {
232 /* End of region not cache aligned */
233 end -= CACHEALIGN_SIZE;
234 __CACHE_OP(DCHitWBInv, end);
235 }
228 236
229 for (s=(char *)base; s<(char *)base+size; s+=CACHEALIGN_SIZE) 237 /* Interior of region is safe to discard */
230 __CACHE_OP(DCHitInv, s); 238 for(; ptr != end; ptr += CACHEALIGN_SIZE)
239 __CACHE_OP(DCHitInv, ptr);
231 240
232 SYNC_WB(); 241 SYNC_WB();
233} 242}