diff options
author | Frank Gevaerts <frank@gevaerts.be> | 2009-03-29 19:56:21 +0000 |
---|---|---|
committer | Frank Gevaerts <frank@gevaerts.be> | 2009-03-29 19:56:21 +0000 |
commit | 0ece30a72612bbb8267390bce93076a398c8095f (patch) | |
tree | 2b64d817fed5f37ea4f8c53658d263e6a67bedd8 /firmware | |
parent | d1af8f879a5ee354bd6360b2669674dfce7cdb37 (diff) | |
download | rockbox-0ece30a72612bbb8267390bce93076a398c8095f.tar.gz rockbox-0ece30a72612bbb8267390bce93076a398c8095f.zip |
Commit FS#10015 - Use chained transfer descriptor to speed up USB transfers on PP and iMX31
(not exactly the same. This one actually works)
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@20570 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'firmware')
-rw-r--r-- | firmware/target/arm/usb-drv-arc.c | 53 | ||||
-rw-r--r-- | firmware/usbstack/usb_storage.c | 9 |
2 files changed, 36 insertions, 26 deletions
diff --git a/firmware/target/arm/usb-drv-arc.c b/firmware/target/arm/usb-drv-arc.c index 7fd00afe25..8e4eab61ca 100644 --- a/firmware/target/arm/usb-drv-arc.c +++ b/firmware/target/arm/usb-drv-arc.c | |||
@@ -308,6 +308,10 @@ | |||
308 | #define DTD_RESERVED_PIPE_OFFSET 20 | 308 | #define DTD_RESERVED_PIPE_OFFSET 20 |
309 | /*-------------------------------------------------------------------------*/ | 309 | /*-------------------------------------------------------------------------*/ |
310 | 310 | ||
311 | /* 4 transfer descriptors per endpoint allow 64k transfers, which is the usual MSC | ||
312 | transfer size, so it seems like a good size */ | ||
313 | #define NUM_TDS_PER_EP 4 | ||
314 | |||
311 | /* manual: 32.13.2 Endpoint Transfer Descriptor (dTD) */ | 315 | /* manual: 32.13.2 Endpoint Transfer Descriptor (dTD) */ |
312 | struct transfer_descriptor { | 316 | struct transfer_descriptor { |
313 | unsigned int next_td_ptr; /* Next TD pointer(31-5), T(0) set | 317 | unsigned int next_td_ptr; /* Next TD pointer(31-5), T(0) set |
@@ -322,7 +326,7 @@ struct transfer_descriptor { | |||
322 | unsigned int reserved; | 326 | unsigned int reserved; |
323 | } __attribute__ ((packed)); | 327 | } __attribute__ ((packed)); |
324 | 328 | ||
325 | static struct transfer_descriptor td_array[USB_NUM_ENDPOINTS*2] | 329 | static struct transfer_descriptor td_array[USB_NUM_ENDPOINTS*2*NUM_TDS_PER_EP] |
326 | USB_DEVBSS_ATTR __attribute__((aligned(32))); | 330 | USB_DEVBSS_ATTR __attribute__((aligned(32))); |
327 | 331 | ||
328 | /* manual: 32.13.1 Endpoint Queue Head (dQH) */ | 332 | /* manual: 32.13.1 Endpoint Queue Head (dQH) */ |
@@ -665,7 +669,7 @@ static int prime_transfer(int endpoint, void* ptr, int len, bool send, bool wait | |||
665 | unsigned int mask = pipe2mask[pipe]; | 669 | unsigned int mask = pipe2mask[pipe]; |
666 | struct queue_head* qh = &qh_array[pipe]; | 670 | struct queue_head* qh = &qh_array[pipe]; |
667 | static long last_tick; | 671 | static long last_tick; |
668 | struct transfer_descriptor* new_td; | 672 | struct transfer_descriptor* new_td,*cur_td,*prev_td; |
669 | 673 | ||
670 | int oldlevel = disable_irq_save(); | 674 | int oldlevel = disable_irq_save(); |
671 | /* | 675 | /* |
@@ -674,11 +678,23 @@ static int prime_transfer(int endpoint, void* ptr, int len, bool send, bool wait | |||
674 | } | 678 | } |
675 | */ | 679 | */ |
676 | qh->status = 0; | 680 | qh->status = 0; |
677 | qh->length = 0; | ||
678 | qh->wait = wait; | 681 | qh->wait = wait; |
679 | 682 | ||
680 | new_td=&td_array[pipe]; | 683 | new_td=&td_array[pipe*NUM_TDS_PER_EP]; |
681 | prepare_td(new_td, 0, ptr, len,pipe); | 684 | cur_td=new_td; |
685 | prev_td=0; | ||
686 | int tdlen; | ||
687 | |||
688 | do | ||
689 | { | ||
690 | tdlen=MIN(len,16384); | ||
691 | prepare_td(cur_td, prev_td, ptr, tdlen,pipe); | ||
692 | ptr+=tdlen; | ||
693 | prev_td=cur_td; | ||
694 | cur_td++; | ||
695 | len-=tdlen; | ||
696 | } | ||
697 | while(len>0 ); | ||
682 | //logf("starting ep %d %s",endpoint,send?"send":"receive"); | 698 | //logf("starting ep %d %s",endpoint,send?"send":"receive"); |
683 | 699 | ||
684 | qh->dtd.next_td_ptr = (unsigned int)new_td; | 700 | qh->dtd.next_td_ptr = (unsigned int)new_td; |
@@ -807,6 +823,7 @@ static void prepare_td(struct transfer_descriptor* td, | |||
807 | 823 | ||
808 | if (previous_td != 0) { | 824 | if (previous_td != 0) { |
809 | previous_td->next_td_ptr=(unsigned int)td; | 825 | previous_td->next_td_ptr=(unsigned int)td; |
826 | previous_td->size_ioc_sts&=~DTD_IOC; | ||
810 | } | 827 | } |
811 | } | 828 | } |
812 | 829 | ||
@@ -845,27 +862,19 @@ static void transfer_completed(void) | |||
845 | int pipe = ep * 2 + dir; | 862 | int pipe = ep * 2 + dir; |
846 | if (mask & pipe2mask[pipe]) { | 863 | if (mask & pipe2mask[pipe]) { |
847 | struct queue_head* qh = &qh_array[pipe]; | 864 | struct queue_head* qh = &qh_array[pipe]; |
848 | struct transfer_descriptor *td = &td_array[pipe]; | ||
849 | |||
850 | if(td->size_ioc_sts & DTD_STATUS_ACTIVE) { | ||
851 | /* TODO this shouldn't happen, but...*/ | ||
852 | break; | ||
853 | } | ||
854 | if((td->size_ioc_sts & DTD_PACKET_SIZE) >> DTD_LENGTH_BIT_POS != 0 && dir==0) { | ||
855 | /* We got less data than we asked for. */ | ||
856 | } | ||
857 | qh->length = (td->reserved & DTD_RESERVED_LENGTH_MASK) - | ||
858 | ((td->size_ioc_sts & DTD_PACKET_SIZE) >> DTD_LENGTH_BIT_POS); | ||
859 | if(td->size_ioc_sts & DTD_ERROR_MASK) { | ||
860 | logf("pipe %d err %x", pipe, td->size_ioc_sts & DTD_ERROR_MASK); | ||
861 | qh->status |= td->size_ioc_sts & DTD_ERROR_MASK; | ||
862 | /* TODO we need to handle this somehow. Flush the endpoint ? */ | ||
863 | } | ||
864 | if(qh->wait) { | 865 | if(qh->wait) { |
865 | qh->wait=0; | 866 | qh->wait=0; |
866 | wakeup_signal(&transfer_completion_signal[pipe]); | 867 | wakeup_signal(&transfer_completion_signal[pipe]); |
867 | } | 868 | } |
868 | usb_core_transfer_complete(ep, dir?USB_DIR_IN:USB_DIR_OUT, qh->status, qh->length); | 869 | int length=0; |
870 | struct transfer_descriptor* td=&td_array[pipe*NUM_TDS_PER_EP]; | ||
871 | while(td!=DTD_NEXT_TERMINATE && td!=0) | ||
872 | { | ||
873 | length += ((td->reserved & DTD_RESERVED_LENGTH_MASK) - | ||
874 | ((td->size_ioc_sts & DTD_PACKET_SIZE) >> DTD_LENGTH_BIT_POS)); | ||
875 | td=(struct transfer_descriptor*) td->next_td_ptr; | ||
876 | } | ||
877 | usb_core_transfer_complete(ep, dir?USB_DIR_IN:USB_DIR_OUT, qh->status, length); | ||
869 | } | 878 | } |
870 | } | 879 | } |
871 | } | 880 | } |
diff --git a/firmware/usbstack/usb_storage.c b/firmware/usbstack/usb_storage.c index ca9f150164..195a7c3661 100644 --- a/firmware/usbstack/usb_storage.c +++ b/firmware/usbstack/usb_storage.c | |||
@@ -46,11 +46,12 @@ | |||
46 | 46 | ||
47 | #define SECTOR_SIZE 512 | 47 | #define SECTOR_SIZE 512 |
48 | 48 | ||
49 | /* We can currently use up to 20k buffer size. More than that requires | 49 | /* the ARC driver currently supports up to 64k USB transfers. This is |
50 | * transfer chaining in the driver. Tests on sansa c200 show that the 16k | 50 | * enough for efficient mass storage support, as commonly host OSes |
51 | * limitation causes no more than 2% slowdown. | 51 | * don't do larger SCSI transfers anyway, so larger USB transfers |
52 | * wouldn't buy us anything. | ||
52 | */ | 53 | */ |
53 | #define BUFFER_SIZE 16384 | 54 | #define BUFFER_SIZE 65536 |
54 | 55 | ||
55 | /* bulk-only class specific requests */ | 56 | /* bulk-only class specific requests */ |
56 | #define USB_BULK_RESET_REQUEST 0xff | 57 | #define USB_BULK_RESET_REQUEST 0xff |