1
0
Fork 0
forked from len0rd/rockbox

Several tweaks and cleanups: * Use .rept instead of repeated macros for repeating blocks. * Use MUL (variant) instead of MLA (variant) in the first step of the ARM scalarproduct() if there's no loop. * Unroll ARM assembler functions to 32 where not already done, plus the generic scalarproduct().

git-svn-id: svn://svn.rockbox.org/rockbox/trunk@19144 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
Jens Arnold 2008-11-19 21:31:33 +00:00
parent 14d37cb455
commit 2a5053f58c
5 changed files with 171 additions and 148 deletions

View file

@ -117,21 +117,35 @@ static inline void vector_sub(int16_t* v1, int16_t* v2)
* incorrect results (if ARM aligncheck is disabled). */ * incorrect results (if ARM aligncheck is disabled). */
static inline int32_t scalarproduct(int16_t* v1, int16_t* v2) static inline int32_t scalarproduct(int16_t* v1, int16_t* v2)
{ {
int res = 0; int res;
#if ORDER > 32
int cnt = ORDER>>5;
#endif
#if ORDER > 16 #if ORDER > 16
int cnt = ORDER>>4; #define MLA_BLOCKS "3"
#else
#define MLA_BLOCKS "1"
#endif #endif
asm volatile ( asm volatile (
#if ORDER > 32
"mov %[res], #0 \n"
#endif
"tst %[v2], #2 \n" "tst %[v2], #2 \n"
"beq 20f \n" "beq 20f \n"
"10: \n" "10: \n"
"ldrh r7, [%[v2]], #2 \n" "ldrh r7, [%[v2]], #2 \n"
#if ORDER > 32
"mov r7, r7, lsl #16 \n" "mov r7, r7, lsl #16 \n"
"1: \n" "1: \n"
"ldmia %[v1]!, {r0-r3} \n" "ldmia %[v1]!, {r0-r3} \n"
"smlabt %[res], r0, r7, %[res] \n" "smlabt %[res], r0, r7, %[res] \n"
#else
"ldmia %[v1]!, {r0-r3} \n"
"smulbb %[res], r0, r7 \n"
#endif
"ldmia %[v2]!, {r4-r7} \n" "ldmia %[v2]!, {r4-r7} \n"
"smlatb %[res], r0, r4, %[res] \n" "smlatb %[res], r0, r4, %[res] \n"
"smlabt %[res], r1, r4, %[res] \n" "smlabt %[res], r1, r4, %[res] \n"
@ -140,6 +154,8 @@ static inline int32_t scalarproduct(int16_t* v1, int16_t* v2)
"smlatb %[res], r2, r6, %[res] \n" "smlatb %[res], r2, r6, %[res] \n"
"smlabt %[res], r3, r6, %[res] \n" "smlabt %[res], r3, r6, %[res] \n"
"smlatb %[res], r3, r7, %[res] \n" "smlatb %[res], r3, r7, %[res] \n"
".rept " MLA_BLOCKS "\n"
"ldmia %[v1]!, {r0-r3} \n" "ldmia %[v1]!, {r0-r3} \n"
"smlabt %[res], r0, r7, %[res] \n" "smlabt %[res], r0, r7, %[res] \n"
"ldmia %[v2]!, {r4-r7} \n" "ldmia %[v2]!, {r4-r7} \n"
@ -150,7 +166,8 @@ static inline int32_t scalarproduct(int16_t* v1, int16_t* v2)
"smlatb %[res], r2, r6, %[res] \n" "smlatb %[res], r2, r6, %[res] \n"
"smlabt %[res], r3, r6, %[res] \n" "smlabt %[res], r3, r6, %[res] \n"
"smlatb %[res], r3, r7, %[res] \n" "smlatb %[res], r3, r7, %[res] \n"
#if ORDER > 16 ".endr \n"
#if ORDER > 32
"subs %[cnt], %[cnt], #1 \n" "subs %[cnt], %[cnt], #1 \n"
"bne 1b \n" "bne 1b \n"
#endif #endif
@ -160,7 +177,11 @@ static inline int32_t scalarproduct(int16_t* v1, int16_t* v2)
"1: \n" "1: \n"
"ldmia %[v1]!, {r0-r3} \n" "ldmia %[v1]!, {r0-r3} \n"
"ldmia %[v2]!, {r4-r7} \n" "ldmia %[v2]!, {r4-r7} \n"
#if ORDER > 32
"smlabb %[res], r0, r4, %[res] \n" "smlabb %[res], r0, r4, %[res] \n"
#else
"smulbb %[res], r0, r4 \n"
#endif
"smlatt %[res], r0, r4, %[res] \n" "smlatt %[res], r0, r4, %[res] \n"
"smlabb %[res], r1, r5, %[res] \n" "smlabb %[res], r1, r5, %[res] \n"
"smlatt %[res], r1, r5, %[res] \n" "smlatt %[res], r1, r5, %[res] \n"
@ -168,6 +189,8 @@ static inline int32_t scalarproduct(int16_t* v1, int16_t* v2)
"smlatt %[res], r2, r6, %[res] \n" "smlatt %[res], r2, r6, %[res] \n"
"smlabb %[res], r3, r7, %[res] \n" "smlabb %[res], r3, r7, %[res] \n"
"smlatt %[res], r3, r7, %[res] \n" "smlatt %[res], r3, r7, %[res] \n"
".rept " MLA_BLOCKS "\n"
"ldmia %[v1]!, {r0-r3} \n" "ldmia %[v1]!, {r0-r3} \n"
"ldmia %[v2]!, {r4-r7} \n" "ldmia %[v2]!, {r4-r7} \n"
"smlabb %[res], r0, r4, %[res] \n" "smlabb %[res], r0, r4, %[res] \n"
@ -178,19 +201,20 @@ static inline int32_t scalarproduct(int16_t* v1, int16_t* v2)
"smlatt %[res], r2, r6, %[res] \n" "smlatt %[res], r2, r6, %[res] \n"
"smlabb %[res], r3, r7, %[res] \n" "smlabb %[res], r3, r7, %[res] \n"
"smlatt %[res], r3, r7, %[res] \n" "smlatt %[res], r3, r7, %[res] \n"
#if ORDER > 16 ".endr \n"
#if ORDER > 32
"subs %[cnt], %[cnt], #1 \n" "subs %[cnt], %[cnt], #1 \n"
"bne 1b \n" "bne 1b \n"
#endif #endif
"99: \n" "99: \n"
: /* outputs */ : /* outputs */
#if ORDER > 16 #if ORDER > 32
[cnt]"+r"(cnt), [cnt]"+r"(cnt),
#endif #endif
[v1] "+r"(v1), [v1] "+r"(v1),
[v2] "+r"(v2), [v2] "+r"(v2),
[res]"+r"(res) [res]"=r"(res)
: /* inputs */ : /* inputs */
: /* clobbers */ : /* clobbers */
"r0", "r1", "r2", "r3", "r0", "r1", "r2", "r3",

View file

@ -29,8 +29,14 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110, USA
* incorrect results (if ARM aligncheck is disabled). */ * incorrect results (if ARM aligncheck is disabled). */
static inline void vector_add(int16_t* v1, int16_t* v2) static inline void vector_add(int16_t* v1, int16_t* v2)
{ {
#if ORDER > 32
int cnt = ORDER>>5;
#endif
#if ORDER > 16 #if ORDER > 16
int cnt = ORDER>>4; #define ADD_SUB_BLOCKS "4"
#else
#define ADD_SUB_BLOCKS "2"
#endif #endif
asm volatile ( asm volatile (
@ -42,6 +48,7 @@ static inline void vector_add(int16_t* v1, int16_t* v2)
"ldr r5, [%[v2]], #4 \n" "ldr r5, [%[v2]], #4 \n"
"mov r4, r4, lsl #16 \n" "mov r4, r4, lsl #16 \n"
"1: \n" "1: \n"
".rept " ADD_SUB_BLOCKS "\n"
"ldmia %[v2]!, {r6-r7} \n" "ldmia %[v2]!, {r6-r7} \n"
"ldmia %[v1], {r0-r3} \n" "ldmia %[v1], {r0-r3} \n"
"mov r5, r5, ror #16 \n" "mov r5, r5, ror #16 \n"
@ -56,21 +63,8 @@ static inline void vector_add(int16_t* v1, int16_t* v2)
"pkhbt r7, r7, r4, lsl #16 \n" "pkhbt r7, r7, r4, lsl #16 \n"
"sadd16 r3, r3, r7 \n" "sadd16 r3, r3, r7 \n"
"stmia %[v1]!, {r0-r3} \n" "stmia %[v1]!, {r0-r3} \n"
"ldmia %[v2]!, {r6-r7} \n" ".endr \n"
"ldmia %[v1], {r0-r3} \n" #if ORDER > 32
"mov r5, r5, ror #16 \n"
"pkhtb r4, r5, r4, asr #16 \n"
"sadd16 r0, r0, r4 \n"
"pkhbt r5, r5, r6, lsl #16 \n"
"sadd16 r1, r1, r5 \n"
"ldmia %[v2]!, {r4-r5} \n"
"mov r7, r7, ror #16 \n"
"pkhtb r6, r7, r6, asr #16 \n"
"sadd16 r2, r2, r6 \n"
"pkhbt r7, r7, r4, lsl #16 \n"
"sadd16 r3, r3, r7 \n"
"stmia %[v1]!, {r0-r3} \n"
#if ORDER > 16
"subs %[cnt], %[cnt], #1 \n" "subs %[cnt], %[cnt], #1 \n"
"bne 1b \n" "bne 1b \n"
#endif #endif
@ -78,6 +72,7 @@ static inline void vector_add(int16_t* v1, int16_t* v2)
"20: \n" "20: \n"
"1: \n" "1: \n"
".rept " ADD_SUB_BLOCKS "\n"
"ldmia %[v2]!, {r4-r7} \n" "ldmia %[v2]!, {r4-r7} \n"
"ldmia %[v1], {r0-r3} \n" "ldmia %[v1], {r0-r3} \n"
"sadd16 r0, r0, r4 \n" "sadd16 r0, r0, r4 \n"
@ -85,21 +80,15 @@ static inline void vector_add(int16_t* v1, int16_t* v2)
"sadd16 r2, r2, r6 \n" "sadd16 r2, r2, r6 \n"
"sadd16 r3, r3, r7 \n" "sadd16 r3, r3, r7 \n"
"stmia %[v1]!, {r0-r3} \n" "stmia %[v1]!, {r0-r3} \n"
"ldmia %[v2]!, {r4-r7} \n" ".endr \n"
"ldmia %[v1], {r0-r3} \n" #if ORDER > 32
"sadd16 r0, r0, r4 \n"
"sadd16 r1, r1, r5 \n"
"sadd16 r2, r2, r6 \n"
"sadd16 r3, r3, r7 \n"
"stmia %[v1]!, {r0-r3} \n"
#if ORDER > 16
"subs %[cnt], %[cnt], #1 \n" "subs %[cnt], %[cnt], #1 \n"
"bne 1b \n" "bne 1b \n"
#endif #endif
"99: \n" "99: \n"
: /* outputs */ : /* outputs */
#if ORDER > 16 #if ORDER > 32
[cnt]"+r"(cnt), [cnt]"+r"(cnt),
#endif #endif
[v1] "+r"(v1), [v1] "+r"(v1),
@ -116,8 +105,8 @@ static inline void vector_add(int16_t* v1, int16_t* v2)
* incorrect results (if ARM aligncheck is disabled). */ * incorrect results (if ARM aligncheck is disabled). */
static inline void vector_sub(int16_t* v1, int16_t* v2) static inline void vector_sub(int16_t* v1, int16_t* v2)
{ {
#if ORDER > 16 #if ORDER > 32
int cnt = ORDER>>4; int cnt = ORDER>>5;
#endif #endif
asm volatile ( asm volatile (
@ -129,6 +118,7 @@ static inline void vector_sub(int16_t* v1, int16_t* v2)
"ldr r5, [%[v2]], #4 \n" "ldr r5, [%[v2]], #4 \n"
"mov r4, r4, lsl #16 \n" "mov r4, r4, lsl #16 \n"
"1: \n" "1: \n"
".rept " ADD_SUB_BLOCKS "\n"
"ldmia %[v2]!, {r6-r7} \n" "ldmia %[v2]!, {r6-r7} \n"
"ldmia %[v1], {r0-r3} \n" "ldmia %[v1], {r0-r3} \n"
"mov r5, r5, ror #16 \n" "mov r5, r5, ror #16 \n"
@ -143,21 +133,8 @@ static inline void vector_sub(int16_t* v1, int16_t* v2)
"pkhbt r7, r7, r4, lsl #16 \n" "pkhbt r7, r7, r4, lsl #16 \n"
"ssub16 r3, r3, r7 \n" "ssub16 r3, r3, r7 \n"
"stmia %[v1]!, {r0-r3} \n" "stmia %[v1]!, {r0-r3} \n"
"ldmia %[v2]!, {r6-r7} \n" ".endr \n"
"ldmia %[v1], {r0-r3} \n" #if ORDER > 32
"mov r5, r5, ror #16 \n"
"pkhtb r4, r5, r4, asr #16 \n"
"ssub16 r0, r0, r4 \n"
"pkhbt r5, r5, r6, lsl #16 \n"
"ssub16 r1, r1, r5 \n"
"ldmia %[v2]!, {r4-r5} \n"
"mov r7, r7, ror #16 \n"
"pkhtb r6, r7, r6, asr #16 \n"
"ssub16 r2, r2, r6 \n"
"pkhbt r7, r7, r4, lsl #16 \n"
"ssub16 r3, r3, r7 \n"
"stmia %[v1]!, {r0-r3} \n"
#if ORDER > 16
"subs %[cnt], %[cnt], #1 \n" "subs %[cnt], %[cnt], #1 \n"
"bne 1b \n" "bne 1b \n"
#endif #endif
@ -165,6 +142,7 @@ static inline void vector_sub(int16_t* v1, int16_t* v2)
"20: \n" "20: \n"
"1: \n" "1: \n"
".rept " ADD_SUB_BLOCKS "\n"
"ldmia %[v2]!, {r4-r7} \n" "ldmia %[v2]!, {r4-r7} \n"
"ldmia %[v1], {r0-r3} \n" "ldmia %[v1], {r0-r3} \n"
"ssub16 r0, r0, r4 \n" "ssub16 r0, r0, r4 \n"
@ -172,21 +150,15 @@ static inline void vector_sub(int16_t* v1, int16_t* v2)
"ssub16 r2, r2, r6 \n" "ssub16 r2, r2, r6 \n"
"ssub16 r3, r3, r7 \n" "ssub16 r3, r3, r7 \n"
"stmia %[v1]!, {r0-r3} \n" "stmia %[v1]!, {r0-r3} \n"
"ldmia %[v2]!, {r4-r7} \n" ".endr \n"
"ldmia %[v1], {r0-r3} \n" #if ORDER > 32
"ssub16 r0, r0, r4 \n"
"ssub16 r1, r1, r5 \n"
"ssub16 r2, r2, r6 \n"
"ssub16 r3, r3, r7 \n"
"stmia %[v1]!, {r0-r3} \n"
#if ORDER > 16
"subs %[cnt], %[cnt], #1 \n" "subs %[cnt], %[cnt], #1 \n"
"bne 1b \n" "bne 1b \n"
#endif #endif
"99: \n" "99: \n"
: /* outputs */ : /* outputs */
#if ORDER > 16 #if ORDER > 32
[cnt]"+r"(cnt), [cnt]"+r"(cnt),
#endif #endif
[v1] "+r"(v1), [v1] "+r"(v1),
@ -203,12 +175,21 @@ static inline void vector_sub(int16_t* v1, int16_t* v2)
* incorrect results (if ARM aligncheck is disabled). */ * incorrect results (if ARM aligncheck is disabled). */
static inline int32_t scalarproduct(int16_t* v1, int16_t* v2) static inline int32_t scalarproduct(int16_t* v1, int16_t* v2)
{ {
int res = 0; int res;
#if ORDER > 32
int cnt = ORDER>>5;
#endif
#if ORDER > 16 #if ORDER > 16
int cnt = ORDER>>4; #define MLA_BLOCKS "3"
#else
#define MLA_BLOCKS "1"
#endif #endif
asm volatile ( asm volatile (
#if ORDER > 32
"mov %[res], #0 \n"
#endif
"tst %[v2], #2 \n" "tst %[v2], #2 \n"
"beq 20f \n" "beq 20f \n"
@ -216,11 +197,18 @@ static inline int32_t scalarproduct(int16_t* v1, int16_t* v2)
"ldrh r7, [%[v2]], #2 \n" "ldrh r7, [%[v2]], #2 \n"
"ldmia %[v2]!, {r4-r5} \n" "ldmia %[v2]!, {r4-r5} \n"
"ldmia %[v1]!, {r0-r1} \n" "ldmia %[v1]!, {r0-r1} \n"
#if ORDER > 32
"mov r7, r7, lsl #16 \n" "mov r7, r7, lsl #16 \n"
"1: \n" "1: \n"
"pkhbt r8, r4, r7 \n" "pkhbt r8, r4, r7 \n"
"ldmia %[v2]!, {r6-r7} \n" "ldmia %[v2]!, {r6-r7} \n"
"smladx %[res], r0, r8, %[res] \n" "smladx %[res], r0, r8, %[res] \n"
#else
"pkhbt r8, r4, r7, lsl #16 \n"
"ldmia %[v2]!, {r6-r7} \n"
"smuadx %[res], r0, r8 \n"
#endif
".rept " MLA_BLOCKS "\n"
"pkhbt r8, r5, r4 \n" "pkhbt r8, r5, r4 \n"
"ldmia %[v1]!, {r2-r3} \n" "ldmia %[v1]!, {r2-r3} \n"
"smladx %[res], r1, r8, %[res] \n" "smladx %[res], r1, r8, %[res] \n"
@ -233,11 +221,13 @@ static inline int32_t scalarproduct(int16_t* v1, int16_t* v2)
"pkhbt r8, r4, r7 \n" "pkhbt r8, r4, r7 \n"
"ldmia %[v2]!, {r6-r7} \n" "ldmia %[v2]!, {r6-r7} \n"
"smladx %[res], r0, r8, %[res] \n" "smladx %[res], r0, r8, %[res] \n"
".endr \n"
"pkhbt r8, r5, r4 \n" "pkhbt r8, r5, r4 \n"
"ldmia %[v1]!, {r2-r3} \n" "ldmia %[v1]!, {r2-r3} \n"
"smladx %[res], r1, r8, %[res] \n" "smladx %[res], r1, r8, %[res] \n"
"pkhbt r8, r6, r5 \n" "pkhbt r8, r6, r5 \n"
#if ORDER > 16 #if ORDER > 32
"subs %[cnt], %[cnt], #1 \n" "subs %[cnt], %[cnt], #1 \n"
"ldmneia %[v2]!, {r4-r5} \n" "ldmneia %[v2]!, {r4-r5} \n"
"smladx %[res], r2, r8, %[res] \n" "smladx %[res], r2, r8, %[res] \n"
@ -257,7 +247,12 @@ static inline int32_t scalarproduct(int16_t* v1, int16_t* v2)
"ldmia %[v2]!, {r5-r7} \n" "ldmia %[v2]!, {r5-r7} \n"
"1: \n" "1: \n"
"ldmia %[v1]!, {r2-r3} \n" "ldmia %[v1]!, {r2-r3} \n"
#if ORDER > 32
"smlad %[res], r0, r5, %[res] \n" "smlad %[res], r0, r5, %[res] \n"
#else
"smuad %[res], r0, r5 \n"
#endif
".rept " MLA_BLOCKS "\n"
"ldmia %[v2]!, {r4-r5} \n" "ldmia %[v2]!, {r4-r5} \n"
"smlad %[res], r1, r6, %[res] \n" "smlad %[res], r1, r6, %[res] \n"
"ldmia %[v1]!, {r0-r1} \n" "ldmia %[v1]!, {r0-r1} \n"
@ -266,9 +261,11 @@ static inline int32_t scalarproduct(int16_t* v1, int16_t* v2)
"smlad %[res], r3, r4, %[res] \n" "smlad %[res], r3, r4, %[res] \n"
"ldmia %[v1]!, {r2-r3} \n" "ldmia %[v1]!, {r2-r3} \n"
"smlad %[res], r0, r5, %[res] \n" "smlad %[res], r0, r5, %[res] \n"
".endr \n"
"ldmia %[v2]!, {r4-r5} \n" "ldmia %[v2]!, {r4-r5} \n"
"smlad %[res], r1, r6, %[res] \n" "smlad %[res], r1, r6, %[res] \n"
#if ORDER > 16 #if ORDER > 32
"subs %[cnt], %[cnt], #1 \n" "subs %[cnt], %[cnt], #1 \n"
"ldmneia %[v1]!, {r0-r1} \n" "ldmneia %[v1]!, {r0-r1} \n"
"smlad %[res], r2, r7, %[res] \n" "smlad %[res], r2, r7, %[res] \n"
@ -282,12 +279,12 @@ static inline int32_t scalarproduct(int16_t* v1, int16_t* v2)
"99: \n" "99: \n"
: /* outputs */ : /* outputs */
#if ORDER > 16 #if ORDER > 32
[cnt]"+r"(cnt), [cnt]"+r"(cnt),
#endif #endif
[v1] "+r"(v1), [v1] "+r"(v1),
[v2] "+r"(v2), [v2] "+r"(v2),
[res]"+r"(res) [res]"=r"(res)
: /* inputs */ : /* inputs */
: /* clobbers */ : /* clobbers */
"r0", "r1", "r2", "r3", "r4", "r0", "r1", "r2", "r3", "r4",

View file

@ -67,7 +67,7 @@ static inline void vector_add(int16_t* v1, int16_t* v2)
"move.l %%d3, (%[v1])+ \n" "move.l %%d3, (%[v1])+ \n"
"lea.l (16, %[v2]), %[v2] \n" "lea.l (16, %[v2]), %[v2] \n"
"move.l %%d4, %%d0 \n" "move.l %%d4, %%d0 \n"
"movem.l (%[v1]), %%a0-%%a3 \n" "movem.l (%[v1]), %%a0-%%a3 \n"
"movem.l (%[v2]), %%d1-%%d4 \n" "movem.l (%[v2]), %%d1-%%d4 \n"
ADDHALFXREGS(%%a0, %%d1, %%d0) ADDHALFXREGS(%%a0, %%d1, %%d0)
@ -175,7 +175,7 @@ static inline void vector_sub(int16_t* v1, int16_t* v2)
"move.l %%d3, (%[v1])+ \n" "move.l %%d3, (%[v1])+ \n"
"lea.l (16, %[v2]), %[v2] \n" "lea.l (16, %[v2]), %[v2] \n"
"move.l %%d4, %%d0 \n" "move.l %%d4, %%d0 \n"
"movem.l (%[v2]), %%d1-%%d4 \n" "movem.l (%[v2]), %%d1-%%d4 \n"
"movem.l (%[v1]), %%a0-%%a3 \n" "movem.l (%[v1]), %%a0-%%a3 \n"
SUBHALFXREGS(%%a0, %%d1, %%d0) SUBHALFXREGS(%%a0, %%d1, %%d0)
@ -207,7 +207,6 @@ static inline void vector_sub(int16_t* v1, int16_t* v2)
"move.l %%d2, (%[v1])+ \n" "move.l %%d2, (%[v1])+ \n"
SUBHALFREGS(%%a3, %%d4, %%d3) SUBHALFREGS(%%a3, %%d4, %%d3)
"move.l %%d3, (%[v1])+ \n" "move.l %%d3, (%[v1])+ \n"
"lea.l (16, %[v2]), %[v2] \n" "lea.l (16, %[v2]), %[v2] \n"
"movem.l (%[v2]), %%d1-%%d4 \n" "movem.l (%[v2]), %%d1-%%d4 \n"
@ -248,22 +247,16 @@ static inline void vector_sub(int16_t* v1, int16_t* v2)
* in signed integer mode - call above macro before use. */ * in signed integer mode - call above macro before use. */
static inline int32_t scalarproduct(int16_t* v1, int16_t* v2) static inline int32_t scalarproduct(int16_t* v1, int16_t* v2)
{ {
int res = 0; int res;
#if ORDER > 32 #if ORDER > 32
int cnt = ORDER>>5; int cnt = ORDER>>5;
#endif #endif
#define MACBLOCK4 \ #if ORDER > 16
"mac.w %%d0u, %%d1u, (%[v1])+, %%d2, %%acc0\n" \ #define MAC_BLOCKS "7"
"mac.w %%d0l, %%d1l, (%[v2])+, %%d1, %%acc0\n" \ #else
"mac.w %%d2u, %%d1u, (%[v1])+, %%d0, %%acc0\n" \ #define MAC_BLOCKS "3"
"mac.w %%d2l, %%d1l, (%[v2])+, %%d1, %%acc0\n" #endif
#define MACBLOCK4_U2 \
"mac.w %%d0u, %%d1l, (%[v2])+, %%d1, %%acc0\n" \
"mac.w %%d0l, %%d1u, (%[v1])+, %%d0, %%acc0\n" \
"mac.w %%d0u, %%d1l, (%[v2])+, %%d1, %%acc0\n" \
"mac.w %%d0l, %%d1u, (%[v1])+, %%d0, %%acc0\n"
asm volatile ( asm volatile (
"move.l %[v2], %%d0 \n" "move.l %[v2], %%d0 \n"
@ -274,15 +267,13 @@ static inline int32_t scalarproduct(int16_t* v1, int16_t* v2)
"move.l (%[v1])+, %%d0 \n" "move.l (%[v1])+, %%d0 \n"
"move.w (%[v2])+, %%d1 \n" "move.w (%[v2])+, %%d1 \n"
"1: \n" "1: \n"
#if ORDER > 16 ".rept " MAC_BLOCKS "\n"
MACBLOCK4_U2 "mac.w %%d0u, %%d1l, (%[v2])+, %%d1, %%acc0\n"
MACBLOCK4_U2 "mac.w %%d0l, %%d1u, (%[v1])+, %%d0, %%acc0\n"
MACBLOCK4_U2 "mac.w %%d0u, %%d1l, (%[v2])+, %%d1, %%acc0\n"
MACBLOCK4_U2 "mac.w %%d0l, %%d1u, (%[v1])+, %%d0, %%acc0\n"
#endif ".endr \n"
MACBLOCK4_U2
MACBLOCK4_U2
MACBLOCK4_U2
"mac.w %%d0u, %%d1l, (%[v2])+, %%d1, %%acc0\n" "mac.w %%d0u, %%d1l, (%[v2])+, %%d1, %%acc0\n"
"mac.w %%d0l, %%d1u, (%[v1])+, %%d0, %%acc0\n" "mac.w %%d0l, %%d1u, (%[v1])+, %%d0, %%acc0\n"
"mac.w %%d0u, %%d1l, (%[v2])+, %%d1, %%acc0\n" "mac.w %%d0u, %%d1l, (%[v2])+, %%d1, %%acc0\n"
@ -299,15 +290,13 @@ static inline int32_t scalarproduct(int16_t* v1, int16_t* v2)
"move.l (%[v1])+, %%d0 \n" "move.l (%[v1])+, %%d0 \n"
"move.l (%[v2])+, %%d1 \n" "move.l (%[v2])+, %%d1 \n"
"1: \n" "1: \n"
#if ORDER > 16 ".rept " MAC_BLOCKS "\n"
MACBLOCK4 "mac.w %%d0u, %%d1u, (%[v1])+, %%d2, %%acc0\n"
MACBLOCK4 "mac.w %%d0l, %%d1l, (%[v2])+, %%d1, %%acc0\n"
MACBLOCK4 "mac.w %%d2u, %%d1u, (%[v1])+, %%d0, %%acc0\n"
MACBLOCK4 "mac.w %%d2l, %%d1l, (%[v2])+, %%d1, %%acc0\n"
#endif ".endr \n"
MACBLOCK4
MACBLOCK4
MACBLOCK4
"mac.w %%d0u, %%d1u, (%[v1])+, %%d2, %%acc0\n" "mac.w %%d0u, %%d1u, (%[v1])+, %%d2, %%acc0\n"
"mac.w %%d0l, %%d1l, (%[v2])+, %%d1, %%acc0\n" "mac.w %%d0l, %%d1l, (%[v2])+, %%d1, %%acc0\n"
#if ORDER > 32 #if ORDER > 32

View file

@ -30,27 +30,23 @@ static inline void vector_add(int32_t* v1, int32_t* v2)
int cnt = ORDER>>5; int cnt = ORDER>>5;
#endif #endif
#define ADDBLOCK4 \ #if ORDER > 16
"ldmia %[v1], {r0-r3} \n" \ #define ADD_SUB_BLOCKS "8"
"ldmia %[v2]!, {r4-r7} \n" \ #else
"add r0, r0, r4 \n" \ #define ADD_SUB_BLOCKS "4"
"add r1, r1, r5 \n" \ #endif
"add r2, r2, r6 \n" \
"add r3, r3, r7 \n" \
"stmia %[v1]!, {r0-r3} \n"
asm volatile ( asm volatile (
"1: \n" "1: \n"
ADDBLOCK4 ".rept " ADD_SUB_BLOCKS "\n"
ADDBLOCK4 "ldmia %[v1], {r0-r3} \n"
ADDBLOCK4 "ldmia %[v2]!, {r4-r7} \n"
ADDBLOCK4 "add r0, r0, r4 \n"
#if ORDER > 16 "add r1, r1, r5 \n"
ADDBLOCK4 "add r2, r2, r6 \n"
ADDBLOCK4 "add r3, r3, r7 \n"
ADDBLOCK4 "stmia %[v1]!, {r0-r3} \n"
ADDBLOCK4 ".endr \n"
#endif
#if ORDER > 32 #if ORDER > 32
"subs %[cnt], %[cnt], #1 \n" "subs %[cnt], %[cnt], #1 \n"
"bne 1b \n" "bne 1b \n"
@ -74,27 +70,17 @@ static inline void vector_sub(int32_t* v1, int32_t* v2)
int cnt = ORDER>>5; int cnt = ORDER>>5;
#endif #endif
#define SUBBLOCK4 \
"ldmia %[v1], {r0-r3} \n" \
"ldmia %[v2]!, {r4-r7} \n" \
"sub r0, r0, r4 \n" \
"sub r1, r1, r5 \n" \
"sub r2, r2, r6 \n" \
"sub r3, r3, r7 \n" \
"stmia %[v1]!, {r0-r3} \n"
asm volatile ( asm volatile (
"1: \n" "1: \n"
SUBBLOCK4 ".rept " ADD_SUB_BLOCKS "\n"
SUBBLOCK4 "ldmia %[v1], {r0-r3} \n"
SUBBLOCK4 "ldmia %[v2]!, {r4-r7} \n"
SUBBLOCK4 "sub r0, r0, r4 \n"
#if ORDER > 16 "sub r1, r1, r5 \n"
SUBBLOCK4 "sub r2, r2, r6 \n"
SUBBLOCK4 "sub r3, r3, r7 \n"
SUBBLOCK4 "stmia %[v1]!, {r0-r3} \n"
SUBBLOCK4 ".endr \n"
#endif
#if ORDER > 32 #if ORDER > 32
"subs %[cnt], %[cnt], #1 \n" "subs %[cnt], %[cnt], #1 \n"
"bne 1b \n" "bne 1b \n"
@ -114,17 +100,24 @@ static inline void vector_sub(int32_t* v1, int32_t* v2)
static inline int32_t scalarproduct(int32_t* v1, int32_t* v2) static inline int32_t scalarproduct(int32_t* v1, int32_t* v2)
{ {
int res = 0; int res;
#if ORDER > 32 #if ORDER > 32
int cnt = ORDER>>5; int cnt = ORDER>>5;
#endif #endif
asm volatile ( asm volatile (
#if ORDER > 16 #if ORDER > 16
#if ORDER > 32
"mov %[res], #0 \n"
#endif
"ldmia %[v2]!, {r6-r7} \n" "ldmia %[v2]!, {r6-r7} \n"
"1: \n" "1: \n"
"ldmia %[v1]!, {r0,r1,r3-r5} \n" "ldmia %[v1]!, {r0,r1,r3-r5} \n"
#if ORDER > 32
"mla %[res], r6, r0, %[res] \n" "mla %[res], r6, r0, %[res] \n"
#else
"mul %[res], r6, r0 \n"
#endif
"mla %[res], r7, r1, %[res] \n" "mla %[res], r7, r1, %[res] \n"
"ldmia %[v2]!, {r0-r2,r6-r8} \n" "ldmia %[v2]!, {r0-r2,r6-r8} \n"
"mla %[res], r0, r3, %[res] \n" "mla %[res], r0, r3, %[res] \n"
@ -177,19 +170,21 @@ static inline int32_t scalarproduct(int32_t* v1, int32_t* v2)
#endif #endif
#else /* ORDER <= 16 */ #else /* ORDER <= 16 */
"ldmia %[v1]!, {r0-r3} \n"
#define MLABLOCK4 \ "ldmia %[v2]!, {r4-r7} \n"
"ldmia %[v1]!, {r0-r3} \n" \ "mul %[res], r4, r0 \n"
"ldmia %[v2]!, {r4-r7} \n" \ "mla %[res], r5, r1, %[res] \n"
"mla %[res], r4, r0, %[res] \n" \ "mla %[res], r6, r2, %[res] \n"
"mla %[res], r5, r1, %[res] \n" \
"mla %[res], r6, r2, %[res] \n" \
"mla %[res], r7, r3, %[res] \n" "mla %[res], r7, r3, %[res] \n"
MLABLOCK4 ".rept 3 \n"
MLABLOCK4 "ldmia %[v1]!, {r0-r3} \n"
MLABLOCK4 "ldmia %[v2]!, {r4-r7} \n"
MLABLOCK4 "mla %[res], r4, r0, %[res] \n"
"mla %[res], r5, r1, %[res] \n"
"mla %[res], r6, r2, %[res] \n"
"mla %[res], r7, r3, %[res] \n"
".endr \n"
#endif /* ORDER <= 16 */ #endif /* ORDER <= 16 */
: /* outputs */ : /* outputs */
#if ORDER > 32 #if ORDER > 32
@ -197,7 +192,7 @@ static inline int32_t scalarproduct(int32_t* v1, int32_t* v2)
#endif #endif
[v1] "+r"(v1), [v1] "+r"(v1),
[v2] "+r"(v2), [v2] "+r"(v2),
[res]"+r"(res) [res]"=r"(res)
: /* inputs */ : /* inputs */
: /* clobbers */ : /* clobbers */
"r0", "r1", "r2", "r3", "r0", "r1", "r2", "r3",

View file

@ -116,8 +116,8 @@ static inline int32_t scalarproduct(filter_int* v1, filter_int* v2)
{ {
int res = 0; int res = 0;
#if ORDER > 16 #if ORDER > 32
int order = (ORDER >> 4); int order = (ORDER >> 5);
while (order--) while (order--)
#endif #endif
{ {
@ -137,6 +137,24 @@ static inline int32_t scalarproduct(filter_int* v1, filter_int* v2)
res += *v1++ * *v2++; res += *v1++ * *v2++;
res += *v1++ * *v2++; res += *v1++ * *v2++;
res += *v1++ * *v2++; res += *v1++ * *v2++;
#if ORDER > 16
res += *v1++ * *v2++;
res += *v1++ * *v2++;
res += *v1++ * *v2++;
res += *v1++ * *v2++;
res += *v1++ * *v2++;
res += *v1++ * *v2++;
res += *v1++ * *v2++;
res += *v1++ * *v2++;
res += *v1++ * *v2++;
res += *v1++ * *v2++;
res += *v1++ * *v2++;
res += *v1++ * *v2++;
res += *v1++ * *v2++;
res += *v1++ * *v2++;
res += *v1++ * *v2++;
res += *v1++ * *v2++;
#endif
} }
return res; return res;
} }