Coldfire targets: Fixed performance-hitting bug for unaligned transfers. Now unaligned transfers are less than 10% slower than aligned transfers.

git-svn-id: svn://svn.rockbox.org/rockbox/trunk@11679 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
Jens Arnold 2006-12-06 18:44:45 +00:00
parent 4c43d9422c
commit b774bae5fd

View file

@ -63,8 +63,8 @@ copy_read_sectors:
lsr.l #8, %d3 lsr.l #8, %d3
move.b %d3, (%a0)+ /* write high byte of it, aligns dest addr */ move.b %d3, (%a0)+ /* write high byte of it, aligns dest addr */
btst.l #1, %d0 /* longword aligned? */ btst.l #1, %d0 /* longword aligned? (testing old d0 value!) */
beq.b .r_end_u_w1 /* yes, skip leading word handling */ bne.b .r_end_u_w1 /* yes, skip leading word handling */
swap %d2 /* move initial word up */ swap %d2 /* move initial word up */
move.w (%a2), %d2 /* combine with second word */ move.w (%a2), %d2 /* combine with second word */
@ -273,8 +273,8 @@ copy_write_sectors:
move.b (%a0)+, %d2 move.b (%a0)+, %d2
btst.l #1, %d0 /* longword aligned? */ btst.l #1, %d0 /* longword aligned? (testing old d0 value!) */
beq.b .w_end_u_w1 /* yes, skip leading word handling */ bne.b .w_end_u_w1 /* yes, skip leading word handling */
swap %d2 swap %d2
move.w (%a0)+, %d2 move.w (%a0)+, %d2