summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAidan MacDonald <amachronic@protonmail.com>2021-05-13 15:29:02 +0100
committerAidan MacDonald <amachronic@protonmail.com>2021-05-14 10:30:41 +0000
commiteb0336ededda8d64b0f5fd98853b85ea5aa46060 (patch)
tree7570b0789ba8c2b6f85b2513b56a40d600af334c
parentff28d238b8a9dd5a5e457387378428ca8e3c8ffd (diff)
downloadrockbox-eb0336ededda8d64b0f5fd98853b85ea5aa46060.tar.gz
rockbox-eb0336ededda8d64b0f5fd98853b85ea5aa46060.zip
FAT: align writes when bounce buffering is enabled
Motivation: turns out the DMA in the M3K's MSC controller is buggy, and can't handle unaligned addresses properly despite the HW docs claiming otherwise. Extending the FAT driver bounce buffering code is the easiest way to work around the problem (but probably not the most efficient). Change-Id: I1b59b0eb4bbc881d317ff10c64ecadb1f9041236
-rw-r--r--firmware/drivers/fat.c57
1 files changed, 28 insertions, 29 deletions
diff --git a/firmware/drivers/fat.c b/firmware/drivers/fat.c
index 50619983e9..337e29a1bc 100644
--- a/firmware/drivers/fat.c
+++ b/firmware/drivers/fat.c
@@ -2384,44 +2384,43 @@ static long transfer(struct bpb *fat_bpb, unsigned long start, long count,
2384 panicf("Write %ld after data\n", 2384 panicf("Write %ld after data\n",
2385 start + count - fat_bpb->totalsectors); 2385 start + count - fat_bpb->totalsectors);
2386 } 2386 }
2387 else
2388 {
2389 rc = storage_write_sectors(IF_MD(fat_bpb->drive,)
2390 start + fat_bpb->startsector, count, buf);
2391 }
2392 } 2387 }
2393 else
2394 {
2395 void* xferbuf = buf;
2396#ifdef STORAGE_NEEDS_BOUNCE_BUFFER
2397 int remain = count;
2398 int xferred = 0;
2399 int aligned = 1;
2400 if(STORAGE_OVERLAP((uintptr_t)buf)) {
2401 xferbuf = FAT_BOUNCE_BUFFER(fat_bpb);
2402 aligned = 0;
2403 count = MIN(remain, FAT_BOUNCE_SECTORS);
2404 }
2405 2388
2406 while(remain > 0) {
2407#endif
2408 rc = storage_read_sectors(IF_MD(fat_bpb->drive,)
2409 start + fat_bpb->startsector, count, xferbuf);
2410#ifdef STORAGE_NEEDS_BOUNCE_BUFFER 2389#ifdef STORAGE_NEEDS_BOUNCE_BUFFER
2390 if(UNLIKELY(STORAGE_OVERLAP((uintptr_t)buf))) {
2391 void* xfer_buf = FAT_BOUNCE_BUFFER(fat_bpb);
2392 while(count > 0) {
2393 int xfer_count = MIN(count, FAT_BOUNCE_SECTORS);
2394
2395 if(write) {
2396 memcpy(xfer_buf, buf, xfer_count * SECTOR_SIZE);
2397 rc = storage_write_sectors(IF_MD(fat_bpb->drive,)
2398 start + fat_bpb->startsector, xfer_count, xfer_buf);
2399 } else {
2400 rc = storage_read_sectors(IF_MD(fat_bpb->drive,)
2401 start + fat_bpb->startsector, xfer_count, xfer_buf);
2402 memcpy(buf, xfer_buf, xfer_count * SECTOR_SIZE);
2403 }
2404
2411 if(rc < 0) 2405 if(rc < 0)
2412 break; 2406 break;
2413 if(LIKELY(aligned))
2414 break;
2415 2407
2416 memcpy(buf, xferbuf, count * SECTOR_SIZE); 2408 buf += xfer_count * SECTOR_SIZE;
2417 buf += count * SECTOR_SIZE; 2409 start += xfer_count;
2418 xferred += count; 2410 count -= xfer_count;
2419 start += count;
2420 remain -= count;
2421 count = MIN(remain, FAT_BOUNCE_SECTORS);
2422 } 2411 }
2412 } else {
2423#endif 2413#endif
2414 if(write) {
2415 rc = storage_write_sectors(IF_MD(fat_bpb->drive,)
2416 start + fat_bpb->startsector, count, buf);
2417 } else {
2418 rc = storage_read_sectors(IF_MD(fat_bpb->drive,)
2419 start + fat_bpb->startsector, count, buf);
2420 }
2421#ifdef STORAGE_NEEDS_BOUNCE_BUFFER
2424 } 2422 }
2423#endif
2425 2424
2426 if (rc < 0) 2425 if (rc < 0)
2427 { 2426 {