Commit FS#10015 - Use chained transfer descriptor to speed up USB transfers on PP and iMX31

(not exactly the same. This one actually works)



git-svn-id: svn://svn.rockbox.org/rockbox/trunk@20570 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
Frank Gevaerts 2009-03-29 19:56:21 +00:00
parent d1af8f879a
commit 0ece30a726
2 changed files with 36 additions and 26 deletions

View file

@ -308,6 +308,10 @@
#define DTD_RESERVED_PIPE_OFFSET 20 #define DTD_RESERVED_PIPE_OFFSET 20
/*-------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------*/
/* 4 transfer descriptors per endpoint allow 64k transfers, which is the usual MSC
transfer size, so it seems like a good size */
#define NUM_TDS_PER_EP 4
/* manual: 32.13.2 Endpoint Transfer Descriptor (dTD) */ /* manual: 32.13.2 Endpoint Transfer Descriptor (dTD) */
struct transfer_descriptor { struct transfer_descriptor {
unsigned int next_td_ptr; /* Next TD pointer(31-5), T(0) set unsigned int next_td_ptr; /* Next TD pointer(31-5), T(0) set
@ -322,7 +326,7 @@ struct transfer_descriptor {
unsigned int reserved; unsigned int reserved;
} __attribute__ ((packed)); } __attribute__ ((packed));
static struct transfer_descriptor td_array[USB_NUM_ENDPOINTS*2] static struct transfer_descriptor td_array[USB_NUM_ENDPOINTS*2*NUM_TDS_PER_EP]
USB_DEVBSS_ATTR __attribute__((aligned(32))); USB_DEVBSS_ATTR __attribute__((aligned(32)));
/* manual: 32.13.1 Endpoint Queue Head (dQH) */ /* manual: 32.13.1 Endpoint Queue Head (dQH) */
@ -665,7 +669,7 @@ static int prime_transfer(int endpoint, void* ptr, int len, bool send, bool wait
unsigned int mask = pipe2mask[pipe]; unsigned int mask = pipe2mask[pipe];
struct queue_head* qh = &qh_array[pipe]; struct queue_head* qh = &qh_array[pipe];
static long last_tick; static long last_tick;
struct transfer_descriptor* new_td; struct transfer_descriptor* new_td,*cur_td,*prev_td;
int oldlevel = disable_irq_save(); int oldlevel = disable_irq_save();
/* /*
@ -674,11 +678,23 @@ static int prime_transfer(int endpoint, void* ptr, int len, bool send, bool wait
} }
*/ */
qh->status = 0; qh->status = 0;
qh->length = 0;
qh->wait = wait; qh->wait = wait;
new_td=&td_array[pipe]; new_td=&td_array[pipe*NUM_TDS_PER_EP];
prepare_td(new_td, 0, ptr, len,pipe); cur_td=new_td;
prev_td=0;
int tdlen;
do
{
tdlen=MIN(len,16384);
prepare_td(cur_td, prev_td, ptr, tdlen,pipe);
ptr+=tdlen;
prev_td=cur_td;
cur_td++;
len-=tdlen;
}
while(len>0 );
//logf("starting ep %d %s",endpoint,send?"send":"receive"); //logf("starting ep %d %s",endpoint,send?"send":"receive");
qh->dtd.next_td_ptr = (unsigned int)new_td; qh->dtd.next_td_ptr = (unsigned int)new_td;
@ -807,6 +823,7 @@ static void prepare_td(struct transfer_descriptor* td,
if (previous_td != 0) { if (previous_td != 0) {
previous_td->next_td_ptr=(unsigned int)td; previous_td->next_td_ptr=(unsigned int)td;
previous_td->size_ioc_sts&=~DTD_IOC;
} }
} }
@ -845,27 +862,19 @@ static void transfer_completed(void)
int pipe = ep * 2 + dir; int pipe = ep * 2 + dir;
if (mask & pipe2mask[pipe]) { if (mask & pipe2mask[pipe]) {
struct queue_head* qh = &qh_array[pipe]; struct queue_head* qh = &qh_array[pipe];
struct transfer_descriptor *td = &td_array[pipe];
if(td->size_ioc_sts & DTD_STATUS_ACTIVE) {
/* TODO this shouldn't happen, but...*/
break;
}
if((td->size_ioc_sts & DTD_PACKET_SIZE) >> DTD_LENGTH_BIT_POS != 0 && dir==0) {
/* We got less data than we asked for. */
}
qh->length = (td->reserved & DTD_RESERVED_LENGTH_MASK) -
((td->size_ioc_sts & DTD_PACKET_SIZE) >> DTD_LENGTH_BIT_POS);
if(td->size_ioc_sts & DTD_ERROR_MASK) {
logf("pipe %d err %x", pipe, td->size_ioc_sts & DTD_ERROR_MASK);
qh->status |= td->size_ioc_sts & DTD_ERROR_MASK;
/* TODO we need to handle this somehow. Flush the endpoint ? */
}
if(qh->wait) { if(qh->wait) {
qh->wait=0; qh->wait=0;
wakeup_signal(&transfer_completion_signal[pipe]); wakeup_signal(&transfer_completion_signal[pipe]);
} }
usb_core_transfer_complete(ep, dir?USB_DIR_IN:USB_DIR_OUT, qh->status, qh->length); int length=0;
struct transfer_descriptor* td=&td_array[pipe*NUM_TDS_PER_EP];
while(td!=DTD_NEXT_TERMINATE && td!=0)
{
length += ((td->reserved & DTD_RESERVED_LENGTH_MASK) -
((td->size_ioc_sts & DTD_PACKET_SIZE) >> DTD_LENGTH_BIT_POS));
td=(struct transfer_descriptor*) td->next_td_ptr;
}
usb_core_transfer_complete(ep, dir?USB_DIR_IN:USB_DIR_OUT, qh->status, length);
} }
} }
} }

View file

@ -46,11 +46,12 @@
#define SECTOR_SIZE 512 #define SECTOR_SIZE 512
/* We can currently use up to 20k buffer size. More than that requires /* the ARC driver currently supports up to 64k USB transfers. This is
* transfer chaining in the driver. Tests on sansa c200 show that the 16k * enough for efficient mass storage support, as commonly host OSes
* limitation causes no more than 2% slowdown. * don't do larger SCSI transfers anyway, so larger USB transfers
* wouldn't buy us anything.
*/ */
#define BUFFER_SIZE 16384 #define BUFFER_SIZE 65536
/* bulk-only class specific requests */ /* bulk-only class specific requests */
#define USB_BULK_RESET_REQUEST 0xff #define USB_BULK_RESET_REQUEST 0xff