1
0
Fork 0
forked from len0rd/rockbox

libwmapro: slightly shorter and faster inline asm fixed point multiplication routines, speedup is ~0.5%. Also don't lie to gcc about which vars are changed by the asm.

git-svn-id: svn://svn.rockbox.org/rockbox/trunk@27584 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
Nils Wallménius 2010-07-26 23:00:22 +00:00
parent 5dd8c53b96
commit 30d286d859

View file

@ -95,37 +95,34 @@
/* Calculates: result = (X*Y)>>16 */ /* Calculates: result = (X*Y)>>16 */
#define fixmul16(X,Y) \ #define fixmul16(X,Y) \
({ \ ({ \
int32_t t1, t2; \ int32_t t, x = (X); \
asm volatile ( \ asm volatile ( \
"mac.l %[x],%[y],%%acc0\n\t" /* multiply */ \ "mac.l %[x],%[y],%%acc0\n\t" /* multiply */ \
"mulu.l %[y],%[x] \n\t" /* get lower half, avoid emac stall */ \ "mulu.l %[y],%[x] \n\t" /* get lower half, avoid emac stall */ \
"movclr.l %%acc0,%[t1] \n\t" /* get higher half */ \ "movclr.l %%acc0,%[t] \n\t" /* get higher half */ \
"moveq.l #15,%[t2] \n\t" \ "lsr.l #1,%[t] \n\t" /* hi >>= 1 to compensate emac shift */ \
"asl.l %[t2],%[t1] \n\t" /* hi <<= 15, plus one free */ \ "move.w %[t],%[x] \n\t" /* combine halfwords */\
"moveq.l #16,%[t2] \n\t" \ "swap %[x] \n\t" \
"lsr.l %[t2],%[x] \n\t" /* (unsigned)lo >>= 16 */ \ : [t]"=&d"(t), [x] "+d" (x) \
"or.l %[x],%[t1] \n\t" /* combine result */ \ : [y] "d" ((Y))); \
: [t1]"=&d"(t1), [t2]"=&d"(t2) \ x; \
: [x] "d" ((X)), [y] "d" ((Y))); \
t1; \
}) })
/* Calculates: result = (X*Y)>>24 */ /* Calculates: result = (X*Y)>>24 */
#define fixmul24(X,Y) \ #define fixmul24(X,Y) \
({ \ ({ \
int32_t t1, t2; \ int32_t t, x = (X); \
asm volatile ( \ asm volatile ( \
"mac.l %[x],%[y],%%acc0\n\t" /* multiply */ \ "mac.l %[x],%[y],%%acc0\n\t" /* multiply */ \
"mulu.l %[y],%[x] \n\t" /* get lower half, avoid emac stall */ \ "mulu.l %[y],%[x] \n\t" /* get lower half, avoid emac stall */ \
"movclr.l %%acc0,%[t1] \n\t" /* get higher half */ \ "moveq.l #24,%[t] \n\t" \
"moveq.l #7,%[t2] \n\t" \ "lsr.l %[t],%[x] \n\t" /* (unsigned)lo >>= 24 */ \
"asl.l %[t2],%[t1] \n\t" /* hi <<= 7, plus one free */ \ "movclr.l %%acc0,%[t] \n\t" /* get higher half */ \
"moveq.l #24,%[t2] \n\t" \ "asl.l #7,%[t] \n\t" /* hi <<= 7, plus one free */ \
"lsr.l %[t2],%[x] \n\t" /* (unsigned)lo >>= 24 */ \ "or.l %[x],%[t] \n\t" /* combine result */ \
"or.l %[x],%[t1] \n\t" /* combine result */ \ : [t]"=&d"(t), [x] "+d" (x) \
: [t1]"=&d"(t1), [t2]"=&d"(t2) \ : [y] "d" ((Y))); \
: [x] "d" ((X)), [y] "d" ((Y))); \ t; \
t1; \
}) })
/* Calculates: result = (X*Y)>>32 */ /* Calculates: result = (X*Y)>>32 */
@ -239,7 +236,7 @@ static inline void vector_fixmul_scalar(int32_t *dst, const int32_t *src,
{ {
int i; int i;
for(i=0; i<len; i++) for(i=0; i<len; i++)
dst[i] = fixmul24(src[i], mul); dst[i] = fixmul24(src[i], mul);
} }
static inline int av_clip(int a, int amin, int amax) static inline int av_clip(int a, int amin, int amax)