diff options
author | Jörg Hohensohn <hohensoh@rockbox.org> | 2005-01-04 23:01:25 +0000 |
---|---|---|
committer | Jörg Hohensohn <hohensoh@rockbox.org> | 2005-01-04 23:01:25 +0000 |
commit | 50dba1fad0a3d83c3ee803b398b4969109bd70c1 (patch) | |
tree | 8e3e8b5f25ee86a4c3bb1f4776db50705b996184 /firmware | |
parent | 8be9775d532d1a62f6fde629a73447bfd75561c6 (diff) | |
download | rockbox-50dba1fad0a3d83c3ee803b398b4969109bd70c1.tar.gz rockbox-50dba1fad0a3d83c3ee803b398b4969109bd70c1.zip |
modify fat cache entries atomic, this was potentially unsafe
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@5534 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'firmware')
-rw-r--r-- | firmware/drivers/fat.c | 36 |
1 files changed, 24 insertions, 12 deletions
diff --git a/firmware/drivers/fat.c b/firmware/drivers/fat.c index 81e12fd0b4..b3f6891800 100644 --- a/firmware/drivers/fat.c +++ b/firmware/drivers/fat.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include "panic.h" | 27 | #include "panic.h" |
28 | #include "system.h" | 28 | #include "system.h" |
29 | #include "timefuncs.h" | 29 | #include "timefuncs.h" |
30 | #include "kernel.h" | ||
30 | 31 | ||
31 | #define BYTES2INT16(array,pos) \ | 32 | #define BYTES2INT16(array,pos) \ |
32 | (array[pos] | (array[pos+1] << 8 )) | 33 | (array[pos] | (array[pos+1] << 8 )) |
@@ -230,7 +231,7 @@ static struct bpb fat_bpbs[NUM_VOLUMES]; /* mounted partition info */ | |||
230 | 231 | ||
231 | static int update_fsinfo(IF_MV_NONVOID(struct bpb* fat_bpb)); | 232 | static int update_fsinfo(IF_MV_NONVOID(struct bpb* fat_bpb)); |
232 | static int bpb_is_sane(IF_MV_NONVOID(struct bpb* fat_bpb)); | 233 | static int bpb_is_sane(IF_MV_NONVOID(struct bpb* fat_bpb)); |
233 | static void *cache_fat_sector(IF_MV2(struct bpb* fat_bpb,) int secnum); | 234 | static void *cache_fat_sector(IF_MV2(struct bpb* fat_bpb,) int secnum, bool dirty); |
234 | static int create_dos_name(const unsigned char *name, unsigned char *newname); | 235 | static int create_dos_name(const unsigned char *name, unsigned char *newname); |
235 | static unsigned int find_free_cluster(IF_MV2(struct bpb* fat_bpb,) unsigned int start); | 236 | static unsigned int find_free_cluster(IF_MV2(struct bpb* fat_bpb,) unsigned int start); |
236 | static int transfer(IF_MV2(struct bpb* fat_bpb,) unsigned int start, int count, char* buf, bool write ); | 237 | static int transfer(IF_MV2(struct bpb* fat_bpb,) unsigned int start, int count, char* buf, bool write ); |
@@ -250,6 +251,7 @@ struct fat_cache_entry | |||
250 | 251 | ||
251 | static char fat_cache_sectors[FAT_CACHE_SIZE][SECTOR_SIZE]; | 252 | static char fat_cache_sectors[FAT_CACHE_SIZE][SECTOR_SIZE]; |
252 | static struct fat_cache_entry fat_cache[FAT_CACHE_SIZE]; | 253 | static struct fat_cache_entry fat_cache[FAT_CACHE_SIZE]; |
254 | static struct mutex cache_mutex; | ||
253 | 255 | ||
254 | static int cluster2sec(IF_MV2(struct bpb* fat_bpb,) int cluster) | 256 | static int cluster2sec(IF_MV2(struct bpb* fat_bpb,) int cluster) |
255 | { | 257 | { |
@@ -299,6 +301,9 @@ void fat_size(IF_MV2(int volume,) unsigned int* size, unsigned int* free) | |||
299 | void fat_init(void) | 301 | void fat_init(void) |
300 | { | 302 | { |
301 | unsigned int i; | 303 | unsigned int i; |
304 | |||
305 | mutex_init(&cache_mutex); | ||
306 | |||
302 | /* mark the FAT cache as unused */ | 307 | /* mark the FAT cache as unused */ |
303 | for(i = 0;i < FAT_CACHE_SIZE;i++) | 308 | for(i = 0;i < FAT_CACHE_SIZE;i++) |
304 | { | 309 | { |
@@ -488,7 +493,7 @@ void fat_recalc_free(IF_MV_NONVOID(int volume)) | |||
488 | { | 493 | { |
489 | for (i = 0; i<fat_bpb->fatsize; i++) { | 494 | for (i = 0; i<fat_bpb->fatsize; i++) { |
490 | unsigned int j; | 495 | unsigned int j; |
491 | unsigned short* fat = cache_fat_sector(IF_MV2(fat_bpb,) i); | 496 | unsigned short* fat = cache_fat_sector(IF_MV2(fat_bpb,) i, false); |
492 | for (j = 0; j < CLUSTERS_PER_FAT16_SECTOR; j++) { | 497 | for (j = 0; j < CLUSTERS_PER_FAT16_SECTOR; j++) { |
493 | unsigned int c = i * CLUSTERS_PER_FAT16_SECTOR + j; | 498 | unsigned int c = i * CLUSTERS_PER_FAT16_SECTOR + j; |
494 | if ( c > fat_bpb->dataclusters+1 ) /* nr 0 is unused */ | 499 | if ( c > fat_bpb->dataclusters+1 ) /* nr 0 is unused */ |
@@ -507,7 +512,7 @@ void fat_recalc_free(IF_MV_NONVOID(int volume)) | |||
507 | { | 512 | { |
508 | for (i = 0; i<fat_bpb->fatsize; i++) { | 513 | for (i = 0; i<fat_bpb->fatsize; i++) { |
509 | unsigned int j; | 514 | unsigned int j; |
510 | unsigned int* fat = cache_fat_sector(IF_MV2(fat_bpb,) i); | 515 | unsigned int* fat = cache_fat_sector(IF_MV2(fat_bpb,) i, false); |
511 | for (j = 0; j < CLUSTERS_PER_FAT_SECTOR; j++) { | 516 | for (j = 0; j < CLUSTERS_PER_FAT_SECTOR; j++) { |
512 | unsigned int c = i * CLUSTERS_PER_FAT_SECTOR + j; | 517 | unsigned int c = i * CLUSTERS_PER_FAT_SECTOR + j; |
513 | if ( c > fat_bpb->dataclusters+1 ) /* nr 0 is unused */ | 518 | if ( c > fat_bpb->dataclusters+1 ) /* nr 0 is unused */ |
@@ -621,7 +626,10 @@ static void flush_fat_sector(struct fat_cache_entry *fce, | |||
621 | fce->dirty = false; | 626 | fce->dirty = false; |
622 | } | 627 | } |
623 | 628 | ||
624 | static void *cache_fat_sector(IF_MV2(struct bpb* fat_bpb,) int fatsector) | 629 | /* Note: The returned pointer is only safely valid until the next |
630 | task switch! (Any subsequent ata read/write may yield.) */ | ||
631 | static void *cache_fat_sector(IF_MV2(struct bpb* fat_bpb,) | ||
632 | int fatsector, bool dirty) | ||
625 | { | 633 | { |
626 | #ifndef HAVE_MULTIVOLUME | 634 | #ifndef HAVE_MULTIVOLUME |
627 | struct bpb* fat_bpb = &fat_bpbs[0]; | 635 | struct bpb* fat_bpb = &fat_bpbs[0]; |
@@ -632,6 +640,8 @@ static void *cache_fat_sector(IF_MV2(struct bpb* fat_bpb,) int fatsector) | |||
632 | unsigned char *sectorbuf = &fat_cache_sectors[cache_index][0]; | 640 | unsigned char *sectorbuf = &fat_cache_sectors[cache_index][0]; |
633 | int rc; | 641 | int rc; |
634 | 642 | ||
643 | mutex_lock(&cache_mutex); /* make changes atomic */ | ||
644 | |||
635 | /* Delete the cache entry if it isn't the sector we want */ | 645 | /* Delete the cache entry if it isn't the sector we want */ |
636 | if(fce->inuse && (fce->secnum != secnum | 646 | if(fce->inuse && (fce->secnum != secnum |
637 | #ifdef HAVE_MULTIVOLUME | 647 | #ifdef HAVE_MULTIVOLUME |
@@ -657,6 +667,7 @@ static void *cache_fat_sector(IF_MV2(struct bpb* fat_bpb,) int fatsector) | |||
657 | { | 667 | { |
658 | DEBUGF( "cache_fat_sector() - Could not read sector %d" | 668 | DEBUGF( "cache_fat_sector() - Could not read sector %d" |
659 | " (error %d)\n", secnum, rc); | 669 | " (error %d)\n", secnum, rc); |
670 | mutex_unlock(&cache_mutex); | ||
660 | return NULL; | 671 | return NULL; |
661 | } | 672 | } |
662 | fce->inuse = true; | 673 | fce->inuse = true; |
@@ -665,6 +676,9 @@ static void *cache_fat_sector(IF_MV2(struct bpb* fat_bpb,) int fatsector) | |||
665 | fce->fat_vol = fat_bpb; | 676 | fce->fat_vol = fat_bpb; |
666 | #endif | 677 | #endif |
667 | } | 678 | } |
679 | if (dirty) | ||
680 | fce->dirty = true; /* dirt remains, sticky until flushed */ | ||
681 | mutex_unlock(&cache_mutex); | ||
668 | return sectorbuf; | 682 | return sectorbuf; |
669 | } | 683 | } |
670 | 684 | ||
@@ -686,7 +700,7 @@ static unsigned int find_free_cluster(IF_MV2(struct bpb* fat_bpb,) unsigned int | |||
686 | for (i = 0; i<fat_bpb->fatsize; i++) { | 700 | for (i = 0; i<fat_bpb->fatsize; i++) { |
687 | unsigned int j; | 701 | unsigned int j; |
688 | unsigned int nr = (i + sector) % fat_bpb->fatsize; | 702 | unsigned int nr = (i + sector) % fat_bpb->fatsize; |
689 | unsigned short* fat = cache_fat_sector(IF_MV2(fat_bpb,) nr); | 703 | unsigned short* fat = cache_fat_sector(IF_MV2(fat_bpb,) nr, false); |
690 | if ( !fat ) | 704 | if ( !fat ) |
691 | break; | 705 | break; |
692 | for (j = 0; j < CLUSTERS_PER_FAT16_SECTOR; j++) { | 706 | for (j = 0; j < CLUSTERS_PER_FAT16_SECTOR; j++) { |
@@ -714,7 +728,7 @@ static unsigned int find_free_cluster(IF_MV2(struct bpb* fat_bpb,) unsigned int | |||
714 | for (i = 0; i<fat_bpb->fatsize; i++) { | 728 | for (i = 0; i<fat_bpb->fatsize; i++) { |
715 | unsigned int j; | 729 | unsigned int j; |
716 | unsigned int nr = (i + sector) % fat_bpb->fatsize; | 730 | unsigned int nr = (i + sector) % fat_bpb->fatsize; |
717 | unsigned int* fat = cache_fat_sector(IF_MV2(fat_bpb,) nr); | 731 | unsigned int* fat = cache_fat_sector(IF_MV2(fat_bpb,) nr, false); |
718 | if ( !fat ) | 732 | if ( !fat ) |
719 | break; | 733 | break; |
720 | for (j = 0; j < CLUSTERS_PER_FAT_SECTOR; j++) { | 734 | for (j = 0; j < CLUSTERS_PER_FAT_SECTOR; j++) { |
@@ -760,13 +774,12 @@ static int update_fat_entry(IF_MV2(struct bpb* fat_bpb,) unsigned int entry, uns | |||
760 | if ( entry < 2 ) | 774 | if ( entry < 2 ) |
761 | panicf("Updating reserved FAT entry %d.\n",entry); | 775 | panicf("Updating reserved FAT entry %d.\n",entry); |
762 | 776 | ||
763 | sec = cache_fat_sector(IF_MV2(fat_bpb,) sector); | 777 | sec = cache_fat_sector(IF_MV2(fat_bpb,) sector, true); |
764 | if (!sec) | 778 | if (!sec) |
765 | { | 779 | { |
766 | DEBUGF( "update_fat_entry() - Could not cache sector %d\n", sector); | 780 | DEBUGF( "update_fat_entry() - Could not cache sector %d\n", sector); |
767 | return -1; | 781 | return -1; |
768 | } | 782 | } |
769 | fat_cache[(sector + fat_bpb->bpb_rsvdseccnt) & FAT_CACHE_MASK].dirty = true; | ||
770 | 783 | ||
771 | if ( val ) { | 784 | if ( val ) { |
772 | if (SWAB16(sec[offset]) == 0x0000 && fat_bpb->fsinfo.freecount > 0) | 785 | if (SWAB16(sec[offset]) == 0x0000 && fat_bpb->fsinfo.freecount > 0) |
@@ -796,13 +809,12 @@ static int update_fat_entry(IF_MV2(struct bpb* fat_bpb,) unsigned int entry, uns | |||
796 | if ( entry < 2 ) | 809 | if ( entry < 2 ) |
797 | panicf("Updating reserved FAT entry %d.\n",entry); | 810 | panicf("Updating reserved FAT entry %d.\n",entry); |
798 | 811 | ||
799 | sec = cache_fat_sector(IF_MV2(fat_bpb,) sector); | 812 | sec = cache_fat_sector(IF_MV2(fat_bpb,) sector, true); |
800 | if (!sec) | 813 | if (!sec) |
801 | { | 814 | { |
802 | DEBUGF( "update_fat_entry() - Could not cache sector %d\n", sector); | 815 | DEBUGF( "update_fat_entry() - Could not cache sector %d\n", sector); |
803 | return -1; | 816 | return -1; |
804 | } | 817 | } |
805 | fat_cache[(sector + fat_bpb->bpb_rsvdseccnt) & FAT_CACHE_MASK].dirty = true; | ||
806 | 818 | ||
807 | if ( val ) { | 819 | if ( val ) { |
808 | if (!(SWAB32(sec[offset]) & 0x0fffffff) && | 820 | if (!(SWAB32(sec[offset]) & 0x0fffffff) && |
@@ -836,7 +848,7 @@ static int read_fat_entry(IF_MV2(struct bpb* fat_bpb,) unsigned int entry) | |||
836 | int offset = entry % CLUSTERS_PER_FAT16_SECTOR; | 848 | int offset = entry % CLUSTERS_PER_FAT16_SECTOR; |
837 | unsigned short* sec; | 849 | unsigned short* sec; |
838 | 850 | ||
839 | sec = cache_fat_sector(IF_MV2(fat_bpb,) sector); | 851 | sec = cache_fat_sector(IF_MV2(fat_bpb,) sector, false); |
840 | if (!sec) | 852 | if (!sec) |
841 | { | 853 | { |
842 | DEBUGF( "read_fat_entry() - Could not cache sector %d\n", sector); | 854 | DEBUGF( "read_fat_entry() - Could not cache sector %d\n", sector); |
@@ -852,7 +864,7 @@ static int read_fat_entry(IF_MV2(struct bpb* fat_bpb,) unsigned int entry) | |||
852 | int offset = entry % CLUSTERS_PER_FAT_SECTOR; | 864 | int offset = entry % CLUSTERS_PER_FAT_SECTOR; |
853 | unsigned int* sec; | 865 | unsigned int* sec; |
854 | 866 | ||
855 | sec = cache_fat_sector(IF_MV2(fat_bpb,) sector); | 867 | sec = cache_fat_sector(IF_MV2(fat_bpb,) sector, false); |
856 | if (!sec) | 868 | if (!sec) |
857 | { | 869 | { |
858 | DEBUGF( "read_fat_entry() - Could not cache sector %d\n", sector); | 870 | DEBUGF( "read_fat_entry() - Could not cache sector %d\n", sector); |