1
0
Fork 0
forked from len0rd/rockbox

Restructured DSP code for readability and speed. Simplified the API.

git-svn-id: svn://svn.rockbox.org/rockbox/trunk@7174 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
Magnus Holmgren 2005-07-16 12:25:28 +00:00
parent a0266a8bfe
commit 08761aaa52
3 changed files with 501 additions and 420 deletions

View file

@ -17,158 +17,31 @@
* *
****************************************************************************/ ****************************************************************************/
#include <string.h> #include <string.h>
#include "kernel.h"
#include "logf.h"
#include "dsp.h" #include "dsp.h"
#include "kernel.h"
#include "playback.h" #include "playback.h"
#include "system.h" #include "system.h"
/* The "dither" code to convert the 24-bit samples produced by libmad was /* The "dither" code to convert the 24-bit samples produced by libmad was
taken from the coolplayer project - coolplayer.sourceforge.net */ * taken from the coolplayer project - coolplayer.sourceforge.net
struct s_dither {
int error[3];
int random;
};
static struct s_dither dither[2];
struct dsp_configuration dsp_config;
static int channel;
static int fracbits;
#define SAMPLE_DEPTH 16
/*
* NAME: prng()
* DESCRIPTION: 32-bit pseudo-random number generator
*/ */
static __inline
unsigned long prng(unsigned long state)
{
return (state * 0x0019660dL + 0x3c6ef35fL) & 0xffffffffL;
}
inline long dsp_noiseshape(long sample) /* 16-bit samples are scaled based on these constants. The shift should be
{ * no more than 15.
sample += dither[channel].error[0] - dither[channel].error[1]
+ dither[channel].error[2];
dither[channel].error[2] = dither[channel].error[1];
dither[channel].error[1] = dither[channel].error[0]/2;
return sample;
}
inline long dsp_bias(long sample)
{
sample = sample + (1L << (fracbits - SAMPLE_DEPTH));
return sample;
}
inline long dsp_dither(long *mask)
{
long random, output;
random = prng(dither[channel].random);
output = (random & *mask) - (dither[channel].random & *mask);
dither[channel].random = random;
return output;
}
inline void dsp_clip(long *sample, long *output)
{
if (*output > dsp_config.clip_max) {
*output = dsp_config.clip_max;
if (*sample > dsp_config.clip_max)
*sample = dsp_config.clip_max;
} else if (*output < dsp_config.clip_min) {
*output = dsp_config.clip_min;
if (*sample < dsp_config.clip_min)
*sample = dsp_config.clip_min;
}
}
/*
* NAME: dither()
* DESCRIPTION: dither and scale sample
*/ */
inline int scale_dither_clip(long sample) #define WORD_SHIFT 12
{ #define WORD_FRACBITS 27
unsigned int scalebits;
long output, mask;
/* noise shape */ #define NATIVE_DEPTH 16
sample = dsp_noiseshape(sample); #define SAMPLE_BUF_SIZE 256
#define RESAMPLE_BUF_SIZE (256 * 4) /* Enough for 11,025 Hz -> 44,100 Hz*/
/* bias */
output = dsp_bias(sample);
scalebits = fracbits + 1 - SAMPLE_DEPTH;
mask = (1L << scalebits) - 1;
/* dither */
output += dsp_dither(&mask);
/* clip */
dsp_clip(&sample, &output);
/* quantize */
output &= ~mask;
/* error feedback */
dither->error[0] = sample - output;
/* scale */
return output >> scalebits;
}
inline int scale_clip(long sample)
{
unsigned int scalebits;
long output, mask;
output = sample;
scalebits = fracbits + 1 - SAMPLE_DEPTH;
mask = (1L << scalebits) - 1;
dsp_clip(&sample, &output);
output &= ~mask;
return output >> scalebits;
}
void dsp_scale_dither_clip(short *dest, long *src, int samplecount)
{
dest += channel;
while (samplecount-- > 0) {
*dest = scale_dither_clip(*src);
src++;
dest += 2;
}
}
void dsp_scale_clip(short *dest, long *src, int samplecount)
{
dest += channel;
while (samplecount-- > 0) {
*dest = scale_clip(*src);
src++;
dest += 2;
}
}
struct resampler {
long last_sample, phase, delta;
};
static struct resampler resample[2];
#if CONFIG_CPU == MCF5249 && !defined(SIMULATOR) #if CONFIG_CPU == MCF5249 && !defined(SIMULATOR)
#define INIT() asm volatile ("move.l #0xb0, %macsr") /* frac, round, clip */ #define INIT() asm volatile ("move.l #0xb0, %macsr") /* frac, round, clip */
/* Multiply 2 32-bit integers and return the 32 most significant bits of the
* result.
*/
#define FRACMUL(x, y) \ #define FRACMUL(x, y) \
({ \ ({ \
long t; \ long t; \
@ -181,220 +54,474 @@ static struct resampler resample[2];
#else #else
#define INIT() #define INIT()
#define FRACMUL(x, y) (long)(((long long)(x)*(long long)(y)) << 1) #define FRACMUL(x, y) (long) (((((long long) (x)) * ((long long) (y))) >> 32))
#endif #endif
/* linear resampling, introduces one sample delay, because of our inability to struct dsp_config
look into the future at the end of a frame */
long downsample(long *out, long *in, int num, struct resampler *s)
{ {
long i = 1, pos; long frequency;
long last = s->last_sample; long clip_min;
long clip_max;
int sample_depth;
int sample_bytes;
int stereo_mode;
int frac_bits;
bool dither_enabled;
};
struct resample_data
{
long last_sample;
long phase;
long delta;
};
struct dither_data
{
long error[3];
long random;
};
static struct dsp_config dsp;
static struct dither_data dither_data[2] IDATA_ATTR;
static struct resample_data resample_data[2] IDATA_ATTR;
/* The internal format is 32-bit samples, non-interleaved, stereo. This
* format is similar to the raw output from several codecs, so the amount
* of copying needed is minimized for that case.
*/
static long sample_buf[SAMPLE_BUF_SIZE] IDATA_ATTR;
static long resample_buf[RESAMPLE_BUF_SIZE] IDATA_ATTR;
/* Convert at most count samples to the internal format, if needed. Returns
* number of samples ready for further processing. Updates src to point
* past the samples "consumed" and dst is set to point to the samples to
* consume. Note that for mono, dst[0] equals dst[1], as there is no point
* in processing the same data twice.
*/
static int convert_to_internal(char* src[], int count, long* dst[])
{
count = MIN(SAMPLE_BUF_SIZE / 2, count);
if ((dsp.sample_depth <= NATIVE_DEPTH)
|| (dsp.stereo_mode == STEREO_INTERLEAVED))
{
dst[0] = &sample_buf[0];
dst[1] = (dsp.stereo_mode == STEREO_MONO)
? dst[0] : &sample_buf[SAMPLE_BUF_SIZE / 2];
}
else
{
dst[0] = (long*) src[0];
dst[1] = (long*) ((dsp.stereo_mode == STEREO_MONO) ? src[0] : src[1]);
}
if (dsp.sample_depth <= NATIVE_DEPTH)
{
short* s0 = (short*) src[0];
long* d0 = dst[0];
long* d1 = dst[1];
int scale = WORD_SHIFT;
int i;
if (dsp.stereo_mode == STEREO_INTERLEAVED)
{
for (i = 0; i < count; i++)
{
*d0++ = *s0++ << scale;
*d1++ = *s0++ << scale;
}
}
else if (dsp.stereo_mode == STEREO_NONINTERLEAVED)
{
short* s1 = (short*) src[1];
for (i = 0; i < count; i++)
{
*d0++ = *s0++ << scale;
*d1++ = *s1++ << scale;
}
}
else
{
for (i = 0; i < count; i++)
{
*d0++ = *s0++ << scale;
}
}
}
else if (dsp.stereo_mode == STEREO_INTERLEAVED)
{
long* s0 = (long*) src[0];
long* d0 = dst[0];
long* d1 = dst[1];
int i;
for (i = 0; i < count; i++)
{
*d0++ = *s0++;
*d1++ = *s0++;
}
}
if (dsp.stereo_mode == STEREO_NONINTERLEAVED)
{
src[0] += count * dsp.sample_bytes;
src[1] += count * dsp.sample_bytes;
}
else if (dsp.stereo_mode == STEREO_INTERLEAVED)
{
src[0] += count * dsp.sample_bytes * 2;
}
else
{
src[0] += count * dsp.sample_bytes;
}
return count;
}
/* Linear resampling that introduces a one sample delay, because of our
* inability to look into the future at the end of a frame.
*/
static long downsample(long *dst, long *src, int count,
struct resample_data *r)
{
long phase = r->phase;
long delta = r->delta;
long last_sample = r->last_sample;
int pos = phase >> 16;
int i = 1;
INIT(); INIT();
pos = s->phase >> 16;
/* check if we need last sample of previous frame for interpolation */ /* Do we need last sample of previous frame for interpolation? */
if (pos > 0) if (pos > 0)
last = in[pos - 1]; {
out[0] = last + FRACMUL((s->phase & 0xffff) << 15, in[pos] - last); last_sample = src[pos - 1];
s->phase += s->delta;
while ((pos = s->phase >> 16) < num) {
out[i++] = in[pos - 1] + FRACMUL((s->phase & 0xffff) << 15, in[pos] - in[pos - 1]);
s->phase += s->delta;
} }
/* wrap phase accumulator back to start of next frame */
s->phase -= num << 16; *dst++ = last_sample + FRACMUL((phase & 0xffff) << 15,
s->last_sample = in[num - 1]; src[pos] - last_sample);
phase += delta;
while ((pos = phase >> 16) < count)
{
*dst++ = src[pos - 1] + FRACMUL((phase & 0xffff) << 15,
src[pos] - src[pos - 1]);
phase += delta;
i++;
}
/* Wrap phase accumulator back to start of next frame. */
r->phase = phase - (count << 16);
r->delta = delta;
r->last_sample = src[count - 1];
return i; return i;
} }
long upsample(long *out, long *in, int num, struct resampler *s) static long upsample(long *dst, long *src, int count, struct resample_data *r)
{ {
long i = 0, pos; long phase = r->phase;
long delta = r->delta;
long last_sample = r->last_sample;
int i = 0;
int pos;
INIT(); INIT();
while ((pos = s->phase >> 16) == 0) {
out[i++] = s->last_sample + FRACMUL((s->phase & 0xffff) << 15, in[pos] - s->last_sample); while ((pos = phase >> 16) == 0)
s->phase += s->delta; {
*dst++ = last_sample + FRACMUL((phase & 0xffff) << 15,
src[pos] - last_sample);
phase += delta;
i++;
} }
while ((pos = s->phase >> 16) < num) {
out[i++] = in[pos - 1] + FRACMUL((s->phase & 0xffff) << 15, in[pos] - in[pos - 1]); while ((pos = phase >> 16) < count)
s->phase += s->delta; {
*dst++ = src[pos - 1] + FRACMUL((phase & 0xffff) << 15,
src[pos] - src[pos - 1]);
phase += delta;
i++;
} }
/* wrap phase accumulator back to start of next frame */
s->phase -= num << 16; /* Wrap phase accumulator back to start of next frame. */
s->last_sample = in[num - 1]; r->phase = phase - (count << 16);
r->delta = delta;
r->last_sample = src[count - 1];
return i; return i;
} }
#define MAX_CHUNK_SIZE 1024 /* Resample count stereo samples. Updates the src array, if resampling is
static char samplebuf[MAX_CHUNK_SIZE]; * done, to refer to the resampled data. Returns number of stereo samples
/* enough to cope with 11khz upsampling */ * for further processing.
long resampled[MAX_CHUNK_SIZE * 4]; */
static inline int resample(long* src[], int count)
int process(short *dest, long *src, int samplecount)
{ {
long *p; long new_count;
int length = samplecount;
p = resampled; if (dsp.frequency != NATIVE_FREQUENCY)
{
long* d0 = &resample_buf[0];
/* Only process the second channel if needed. */
long* d1 = (src[0] == src[1]) ? d0
: &resample_buf[RESAMPLE_BUF_SIZE / 2];
/* Resample as necessary */ if (dsp.frequency < NATIVE_FREQUENCY)
if (dsp_config.frequency > NATIVE_FREQUENCY) {
length = downsample(resampled, src, samplecount, &resample[channel]); new_count = upsample(d0, src[0], count, &resample_data[0]);
else if (dsp_config.frequency < NATIVE_FREQUENCY)
length = upsample(resampled, src, samplecount, &resample[channel]); if (d0 != d1)
{
upsample(d1, src[1], count, &resample_data[1]);
}
}
else else
p = src;
/* Scale & dither */
if (dsp_config.dither_enabled) {
dsp_scale_dither_clip(dest, p, length);
} else {
dsp_scale_clip(dest, p, length);
}
return length;
}
void convert_stereo_mode(long *dest, long *src, int samplecount)
{ {
int i; new_count = downsample(d0, src[0], count, &resample_data[0]);
samplecount /= 2; if (d0 != d1)
for (i = 0; i < samplecount; i++) {
dest[i] = src[i*2 + 0];
dest[i+samplecount] = src[i*2 + 1];
}
}
void scale_up(long *dest, short *src, int samplecount)
{ {
int i; downsample(d1, src[1], count, &resample_data[1]);
}
for (i = 0; i < samplecount; i++)
dest[i] = (long)(src[i] << 8);
} }
void scale_up_convert_stereo_mode(long *dest, short *src, int samplecount) src[0] = d0;
src[1] = d1;
}
else
{ {
int i; new_count = count;
samplecount /= 2;
for (i = 0; i < samplecount; i++) {
dest[i] = (long)(src[i*2+0] << SAMPLE_DEPTH);
dest[i+samplecount] = (long)(src[i*2+1] << SAMPLE_DEPTH);
}
} }
int dsp_process(char *dest, char *src, int samplecount) return new_count;
}
static inline long clip_sample(long sample)
{ {
int copy_n, rc; if (sample > dsp.clip_max)
char *p; {
int processed_bytes = 0; sample = dsp.clip_max;
}
else if (sample < dsp.clip_min)
{
sample = dsp.clip_min;
}
fracbits = dsp_config.sample_depth; return sample;
}
while (samplecount > 0) { /* The "dither" code to convert the 24-bit samples produced by libmad was
* taken from the coolplayer project - coolplayer.sourceforge.net
*/
static long dither_sample(long sample, long bias, long mask,
struct dither_data* dither)
{
long output;
long random;
/* Noise shape and bias */
sample += dither->error[0] - dither->error[1] + dither->error[2];
dither->error[2] = dither->error[1];
dither->error[1] = dither->error[0] / 2;
output = sample + bias;
/* Dither */
random = dither->random * 0x0019660dL + 0x3c6ef35fL;
sample += (random & mask) - (dither->random & mask);
dither->random = random;
/* Clip and quantize */
sample = clip_sample(sample);
output = clip_sample(output) & ~mask;
/* Error feedback */
dither->error[0] = sample - output;
return output;
}
static void write_samples(short* dst, long* src[], int count)
{
long* s0 = src[0];
long* s1 = src[1];
int scale = dsp.frac_bits + 1 - NATIVE_DEPTH;
if (dsp.dither_enabled)
{
long bias = (1L << (dsp.frac_bits - NATIVE_DEPTH));
long mask = (1L << scale) - 1;
while (count-- > 0)
{
*dst++ = (short) (dither_sample(*s0++, bias, mask, &dither_data[0])
>> scale);
*dst++ = (short) (dither_sample(*s1++, bias, mask, &dither_data[1])
>> scale);
}
}
else
{
while (count-- > 0)
{
*dst++ = (short) (clip_sample(*s0++) >> scale);
*dst++ = (short) (clip_sample(*s1++) >> scale);
}
}
}
/* Process and convert src audio to dst based on the DSP configuration,
* reading size bytes of audio data. dst is assumed to be large enough; use
* dst_get_dest_size() to get the required size. src is an array of
* pointers; for mono and interleaved stereo, it contains one pointer to the
* start of the audio data; for non-interleaved stereo, it contains two
* pointers, one for each audio channel. Returns number of bytes written to
* dest.
*/
long dsp_process(char* dst, char* src[], long size)
{
long* tmp[2];
long written = 0;
long factor = (dsp.stereo_mode != STEREO_MONO) ? 2 : 1;
int samples;
size /= dsp.sample_bytes * factor;
while (size > 0)
{
samples = convert_to_internal(src, size, tmp);
size -= samples;
samples = resample(tmp, samples);
write_samples((short*) dst, tmp, samples);
written += samples;
dst += samples * sizeof(short) * 2;
yield(); yield();
copy_n = MIN(MAX_CHUNK_SIZE / 4, samplecount);
p = src;
/* Scale up to 32-bit samples. */
if (dsp_config.sample_depth <= SAMPLE_DEPTH) {
if (dsp_config.stereo_mode == STEREO_INTERLEAVED) {
scale_up_convert_stereo_mode((long *)samplebuf,
(short *)p, copy_n);
} else {
scale_up((long *)samplebuf, (short *)p, copy_n);
}
p = samplebuf;
fracbits = 31;
} }
/* Convert to non-interleaved stereo. */ return written * sizeof(short) * 2;
else if (dsp_config.stereo_mode == STEREO_INTERLEAVED) {
convert_stereo_mode((long *)samplebuf, (long *)p, copy_n / 2);
p = samplebuf;
} }
/* Apply DSP functions. */ /* Given size bytes of input data, calculate the maximum number of bytes of
if (dsp_config.stereo_mode == STEREO_INTERLEAVED) { * output data that would be generated (the calculation is not entirely
channel = 0; * exact and rounds upwards to be on the safe side; during resampling,
rc = process((short *)dest, (long *)p, copy_n / 2) * 4; * the number of samples generated depends on the current state of the
p += copy_n * 2; * resampler).
channel = 1; */
process((short *)dest, (long *)p, copy_n / 2); long dsp_output_size(long size)
dest += rc; {
} else if (dsp_config.stereo_mode == STEREO_MONO) { if (dsp.stereo_mode == STEREO_MONO)
channel = 0; {
rc = process((short *)dest, (long *)p, copy_n) * 4; size *= 2;
channel = 1;
process((short *)dest, (long *)p, copy_n);
dest += rc;
} else {
rc = process((short *)dest, (long *)p, copy_n) * 2;
dest += rc * 2;
} }
samplecount -= copy_n; if (dsp.sample_depth > NATIVE_DEPTH)
if (dsp_config.sample_depth <= SAMPLE_DEPTH) {
src += copy_n * 2; size /= 2;
else
src += copy_n * 4;
processed_bytes += rc;
} }
/* Set stereo channel */ if (dsp.frequency != NATIVE_FREQUENCY)
channel = channel ? 0 : 1; {
size = (long) ((((unsigned long) size * NATIVE_FREQUENCY)
+ (dsp.frequency - 1)) / dsp.frequency);
}
return processed_bytes; return (size + 3) & ~3;
}
/* Given size bytes of output buffer, calculate number of bytes of input
* data that would be consumed in order to fill the output buffer.
*/
long dsp_input_size(long size)
{
if (dsp.stereo_mode == STEREO_MONO)
{
size /= 2;
}
if (dsp.sample_depth > NATIVE_DEPTH)
{
size *= 2;
}
if (dsp.frequency != NATIVE_FREQUENCY)
{
size = (long) ((((unsigned long) size * dsp.frequency)
+ (NATIVE_FREQUENCY - 1)) / NATIVE_FREQUENCY);
}
return size;
}
int dsp_stereo_mode(void)
{
return dsp.stereo_mode;
} }
bool dsp_configure(int setting, void *value) bool dsp_configure(int setting, void *value)
{ {
switch (setting) { switch (setting)
{
case DSP_SET_FREQUENCY: case DSP_SET_FREQUENCY:
if ((int)value == 0) { dsp.frequency = ((int) value == 0) ? NATIVE_FREQUENCY : (int) value;
dsp_config.frequency = NATIVE_FREQUENCY; memset(resample_data, 0, sizeof(resample_data));
break ; resample_data[0].delta = resample_data[1].delta =
} (unsigned long) dsp.frequency * 65536 / NATIVE_FREQUENCY;
memset(resample, 0, sizeof(resample));
dsp_config.frequency = (int)value;
resample[0].delta = resample[1].delta =
(unsigned long)value*65536/NATIVE_FREQUENCY;
break; break;
case DSP_SET_CLIP_MIN: case DSP_SET_CLIP_MIN:
dsp_config.clip_min = (long)value; dsp.clip_min = (long) value;
break; break;
case DSP_SET_CLIP_MAX: case DSP_SET_CLIP_MAX:
dsp_config.clip_max = (long)value; dsp.clip_max = (long) value;
break; break;
case DSP_SET_SAMPLE_DEPTH: case DSP_SET_SAMPLE_DEPTH:
dsp_config.sample_depth = (long)value; dsp.sample_depth = (long) value;
if (dsp.sample_depth <= NATIVE_DEPTH)
{
dsp.frac_bits = WORD_FRACBITS;
dsp.sample_bytes = sizeof(short);
dsp.clip_max = ((1 << WORD_FRACBITS) - 1);
dsp.clip_min = -((1 << WORD_FRACBITS));
}
else
{
dsp.frac_bits = (long) value;
dsp.sample_bytes = sizeof(long);
}
break; break;
case DSP_SET_STEREO_MODE: case DSP_SET_STEREO_MODE:
dsp_config.stereo_mode = (long)value; dsp.stereo_mode = (long) value;
channel = 0;
break; break;
case DSP_RESET: case DSP_RESET:
dsp_config.dither_enabled = false; dsp.dither_enabled = false;
dsp_config.clip_max = 0x7fffffff; dsp.stereo_mode = STEREO_NONINTERLEAVED;
dsp_config.clip_min = 0x80000000; dsp.clip_max = ((1 << WORD_FRACBITS) - 1);
dsp_config.frequency = NATIVE_FREQUENCY; dsp.clip_min = -((1 << WORD_FRACBITS));
channel = 0; dsp.frequency = NATIVE_FREQUENCY;
dsp.sample_depth = NATIVE_DEPTH;
dsp.frac_bits = WORD_FRACBITS;
break; break;
case DSP_DITHER: case DSP_DITHER:
dsp_config.dither_enabled = (bool)value; memset(dither_data, 0, sizeof(dither_data));
dsp.dither_enabled = (bool) value;
break; break;
default: default:
@ -403,5 +530,3 @@ bool dsp_configure(int setting, void *value)
return 1; return 1;
} }

View file

@ -21,7 +21,6 @@
#define _DSP_H #define _DSP_H
#include <stdlib.h> #include <stdlib.h>
#include <ctype.h>
#include <stdbool.h> #include <stdbool.h>
#define NATIVE_FREQUENCY 44100 #define NATIVE_FREQUENCY 44100
@ -29,19 +28,10 @@
#define STEREO_NONINTERLEAVED 1 #define STEREO_NONINTERLEAVED 1
#define STEREO_MONO 2 #define STEREO_MONO 2
struct dsp_configuration { long dsp_process(char *dest, char *src[], long size);
long frequency; long dsp_input_size(long size);
long clip_min, clip_max; long dsp_output_size(long size);
int sample_depth; int dsp_stereo_mode(void);
bool dither_enabled;
int stereo_mode;
};
extern struct dsp_configuration dsp_config;
int dsp_process(char *dest, char *src, int samplecount);
bool dsp_configure(int setting, void *value); bool dsp_configure(int setting, void *value);
#endif #endif

View file

@ -178,96 +178,62 @@ static bool v1first = false;
static void mp3_set_elapsed(struct mp3entry* id3); static void mp3_set_elapsed(struct mp3entry* id3);
int mp3_get_file_pos(void); int mp3_get_file_pos(void);
bool codec_pcmbuf_insert_callback(char *buf, long length)
{
char *dest;
long realsize;
int factor;
int next_channel = 0;
int processed_length;
int mono = 0;
/* If non-interleaved stereo mode. */
if (dsp_config.stereo_mode == STEREO_NONINTERLEAVED)
next_channel = length / 2;
else if (dsp_config.stereo_mode == STEREO_MONO) {
length *= 2;
mono = 1;
}
if (dsp_config.sample_depth > 16) {
length /= 2;
factor = 1;
} else {
factor = 0;
}
while (length > 0) {
/* Request a few extra bytes for resampling. */
/* FIXME: Required extra bytes SHOULD be calculated. */
while ((dest = pcmbuf_request_buffer(length+16384, &realsize)) == NULL)
yield();
if (realsize < 16384) {
pcmbuf_flush_buffer(0);
continue ;
}
realsize -= 16384;
if (next_channel) {
processed_length = dsp_process(dest, buf, realsize / 4) * 2;
dsp_process(dest, buf + next_channel, realsize / 4);
} else {
processed_length = dsp_process(dest, buf, realsize >> (mono + 1));
}
pcmbuf_flush_buffer(processed_length);
length -= realsize;
buf += realsize << (factor + mono);
}
return true;
}
bool codec_pcmbuf_insert_split_callback(void *ch1, void *ch2, bool codec_pcmbuf_insert_split_callback(void *ch1, void *ch2,
long length) long length)
{ {
char* src[2];
char *dest; char *dest;
long realsize; long input_size;
int factor; long output_size;
int processed_length;
/* non-interleaved stereo mode. */ src[0] = ch1;
if (dsp_config.sample_depth > 16) { src[1] = ch2;
factor = 0;
} else { if (dsp_stereo_mode() == STEREO_NONINTERLEAVED)
length /= 2; {
factor = 1; length *= 2; /* Length is per channel */
} }
while (length > 0) { while (length > 0) {
/* Request a few extra bytes for resampling. */ while ((dest = pcmbuf_request_buffer(dsp_output_size(length),
while ((dest = pcmbuf_request_buffer(length+4096, &realsize)) == NULL) &output_size)) == NULL) {
yield(); yield();
}
if (realsize < 4096) { input_size = dsp_input_size(output_size);
/* Guard against rounding errors (output_size can be too large). */
input_size = MIN(input_size, length);
if (input_size <= 0) {
pcmbuf_flush_buffer(0); pcmbuf_flush_buffer(0);
continue; continue;
} }
realsize -= 4096; output_size = dsp_process(dest, src, input_size);
pcmbuf_flush_buffer(output_size);
processed_length = dsp_process(dest, ch1, realsize / 4) * 2; length -= input_size;
dsp_process(dest, ch2, realsize / 4);
pcmbuf_flush_buffer(processed_length);
length -= realsize;
ch1 += realsize >> factor;
ch2 += realsize >> factor;
} }
return true; return true;
} }
bool codec_pcmbuf_insert_callback(char *buf, long length)
{
/* TODO: The audiobuffer API should probably be updated, and be based on
* pcmbuf_insert_split().
*/
long real_length = length;
if (dsp_stereo_mode() == STEREO_NONINTERLEAVED)
{
length /= 2; /* Length is per channel */
}
/* Second channel is only used for non-interleaved stereo. */
return codec_pcmbuf_insert_split_callback(buf, buf + (real_length / 2),
length);
}
void* get_codec_memory_callback(long *size) void* get_codec_memory_callback(long *size)
{ {
*size = MALLOC_BUFSIZE; *size = MALLOC_BUFSIZE;