1
0
Fork 0
forked from len0rd/rockbox

move boosting for usb around a bit, so the cpu is boosted for the entire duration of actual use of storage. On some setups just boosting for the actual transfers doesn't seem to work

git-svn-id: svn://svn.rockbox.org/rockbox/trunk@16726 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
Frank Gevaerts 2008-03-20 22:05:11 +00:00
parent 7e1a22e5be
commit ea5903dbf1
3 changed files with 5 additions and 4 deletions

View file

@ -353,7 +353,6 @@ bool usb_drv_powered(void)
/* manual: 32.14.1 Device Controller Initialization */ /* manual: 32.14.1 Device Controller Initialization */
void usb_drv_init(void) void usb_drv_init(void)
{ {
trigger_cpu_boost();
REG_USBCMD &= ~USBCMD_RUN; REG_USBCMD &= ~USBCMD_RUN;
udelay(50000); udelay(50000);
REG_USBCMD |= USBCMD_CTRL_RESET; REG_USBCMD |= USBCMD_CTRL_RESET;

View file

@ -529,6 +529,10 @@ bool usb_inserted(void)
#ifdef HAVE_USBSTACK #ifdef HAVE_USBSTACK
void usb_request_exclusive_ata(void) void usb_request_exclusive_ata(void)
{ {
/* This is not really a clean place to start boosting the cpu. but it's
* currently the best one. We want to get rid of having to boost the cpu
* for usb anyway */
trigger_cpu_boost();
if(!exclusive_ata_access) { if(!exclusive_ata_access) {
queue_post(&usb_queue, USB_REQUEST_DISK, 0); queue_post(&usb_queue, USB_REQUEST_DISK, 0);
} }
@ -536,6 +540,7 @@ void usb_request_exclusive_ata(void)
void usb_release_exclusive_ata(void) void usb_release_exclusive_ata(void)
{ {
cancel_cpu_boost();
if(exclusive_ata_access) { if(exclusive_ata_access) {
queue_post(&usb_queue, USB_RELEASE_DISK, 0); queue_post(&usb_queue, USB_RELEASE_DISK, 0);
exclusive_ata_access = false; exclusive_ata_access = false;

View file

@ -879,7 +879,6 @@ static void handle_scsi(struct command_block_wrapper* cbw)
cur_sense_data.ascq=0; cur_sense_data.ascq=0;
} }
else { else {
trigger_cpu_boost();
cur_cmd.last_result = ata_read_sectors(IF_MV2(cur_cmd.lun,) cur_cmd.last_result = ata_read_sectors(IF_MV2(cur_cmd.lun,)
cur_cmd.sector, cur_cmd.sector,
MIN(BUFFER_SIZE/SECTOR_SIZE, MIN(BUFFER_SIZE/SECTOR_SIZE,
@ -917,7 +916,6 @@ static void handle_scsi(struct command_block_wrapper* cbw)
cur_sense_data.ascq=0; cur_sense_data.ascq=0;
} }
else { else {
trigger_cpu_boost();
receive_block_data(cur_cmd.data[0], receive_block_data(cur_cmd.data[0],
MIN(BUFFER_SIZE, MIN(BUFFER_SIZE,
cur_cmd.count*SECTOR_SIZE)); cur_cmd.count*SECTOR_SIZE));
@ -953,7 +951,6 @@ static void receive_block_data(void *data,int size)
static void send_csw(int status) static void send_csw(int status)
{ {
cancel_cpu_boost();
tb.csw->signature = htole32(CSW_SIGNATURE); tb.csw->signature = htole32(CSW_SIGNATURE);
tb.csw->tag = cur_cmd.tag; tb.csw->tag = cur_cmd.tag;
tb.csw->data_residue = 0; tb.csw->data_residue = 0;