1
0
Fork 0
forked from len0rd/rockbox

Slightly more optimised memset() for SH1. Especially faster for 4 < length < 12.

git-svn-id: svn://svn.rockbox.org/rockbox/trunk@6594 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
Jens Arnold 2005-06-07 17:27:47 +00:00
parent e83c6f3b24
commit 91c46c818a

View file

@ -39,7 +39,7 @@
* register usage: * register usage:
* r0 - temporary * r0 - temporary
* r1 - bit mask for rounding to long bounds * r1 - bit mask for rounding to long bounds
* r2 - last / first long bound (only if >= 12 bytes) * r2 - start address +11 for main loop
* r4 - start address * r4 - start address
* r5 - data (spread to all 4 bytes if >= 12 bytes) * r5 - data (spread to all 4 bytes if >= 12 bytes)
* r6 - current address (runs down from end to start) * r6 - current address (runs down from end to start)
@ -50,58 +50,57 @@
*/ */
_memset: _memset:
neg r4,r0
and #3,r0 /* r0 = (4 - align_offset) % 4 */
add #4,r0
cmp/hs r0,r6 /* at least one aligned longword to fill? */
add r4,r6 /* r6 = end_address */ add r4,r6 /* r6 = end_address */
bf .no_longs /* no, jump directly to byte loop */
mov r6,r0
add #-12,r0 /* r0 = r6 - 12; don't go below 12 here! */
cmp/hs r4,r0 /* >= 12 bytes to fill? */
bf .start_b2 /* no, jump directly to byte loop */
extu.b r5,r5 /* start: spread data to all 4 bytes */ extu.b r5,r5 /* start: spread data to all 4 bytes */
swap.b r5,r0 swap.b r5,r0
or r0,r5 /* data now in 2 lower bytes of r5 */ or r0,r5 /* data now in 2 lower bytes of r5 */
swap.w r5,r0 swap.w r5,r0
or r0,r5 /* data now in all 4 bytes of r5 */ or r0,r5 /* data now in all 4 bytes of r5 */
mov #-4,r1 /* r1 = 0xFFFFFFFC */ mov #-4,r1 /* r1 = 0xFFFFFFFC */
mov r6,r0
mov r6,r2 and r1,r0 /* r0 = last long bound */
bra .start_b1 cmp/hi r0,r6 /* any leading byte? */
and r1,r2 /* r2 = last long bound */ bf .end_b1 /* no: skip loop */
/* leading byte loop: sets 0..3 bytes */ /* leading byte loop: sets 0..3 bytes */
.loop_b1: .loop_b1:
mov.b r5,@-r6 /* store byte */ mov.b r5,@-r6 /* store byte */
.start_b1: cmp/hi r0,r6
cmp/hi r2,r6 /* runs r6 down to last long bound */ bt .loop_b1 /* runs r6 down to last long bound */
bt .loop_b1
mov r4,r2 .end_b1:
add #11,r2 /* combined for rounding and offset */ mov r4,r2 /* r2 = start_address... */
and r1,r2 /* r2 = first long bound + 8 */ add #11,r2 /* ... + 11, combined for rounding and offset */
xor r2,r0
tst #4,r0 /* bit 2 tells whether an even or odd number of */
bf .loop_odd /* longwords to set */
/* main loop: set 2 longs per pass */ /* main loop: set 2 longs per pass */
.loop2_l: .loop_2l:
mov.l r5,@-r6 /* store first long */ mov.l r5,@-r6 /* store first long */
cmp/hi r2,r6 /* runs r6 down to first or second long bound */ .loop_odd:
cmp/hi r2,r6 /* runs r6 down to first long bound */
mov.l r5,@-r6 /* store second long */ mov.l r5,@-r6 /* store second long */
bt .loop2_l bt .loop_2l
add #-8,r2 /* correct offset */ .no_longs:
cmp/hi r2,r6 /* 1 long left? */ cmp/hi r4,r6 /* any bytes left? */
bf .start_b2 /* no, jump to trailing byte loop */ bf .end_b2 /* no: skip loop */
bra .start_b2 /* jump to trailing byte loop */
mov.l r5,@-r6 /* store last long */
/* trailing byte loop */ /* trailing byte loop */
.align 2
.loop_b2: .loop_b2:
mov.b r5,@-r6 /* store byte */ mov.b r5,@-r6 /* store byte */
.start_b2:
cmp/hi r4,r6 /* runs r6 down to the start address */ cmp/hi r4,r6 /* runs r6 down to the start address */
bt .loop_b2 bt .loop_b2
.end_b2:
rts rts
mov r4,r0 /* return start address */ mov r4,r0 /* return start address */