1
0
Fork 0
forked from len0rd/rockbox

Gigabeat S: Implement an SDMA API and use it in the PCM driver. Some other miscellaneous adjustments to recording and PCM buffer to accomodate use of physical addresses and cache coherency.

git-svn-id: svn://svn.rockbox.org/rockbox/trunk@19949 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
Michael Sevakis 2009-02-08 22:32:41 +00:00
parent 0222d0a5eb
commit 94537f954e
21 changed files with 3002 additions and 141 deletions

View file

@ -24,6 +24,11 @@
#include "cpu.h"
#include "clkctl-imx31.h"
unsigned int imx31_get_src_pll(void)
{
return (CLKCTL_PMCR0 & 0xC0000000) == 0 ? PLL_SERIAL : PLL_MCU;
}
void imx31_clkctl_module_clock_gating(enum IMX31_CG_LIST cg,
enum IMX31_CG_MODES mode)
{
@ -72,8 +77,8 @@ unsigned int imx31_clkctl_get_pll(enum IMX31_PLLS pll)
unsigned int imx31_clkctl_get_ipg_clk(void)
{
unsigned int pll = imx31_clkctl_get_pll((CLKCTL_PMCR0 & 0xC0000000) == 0 ?
PLL_SERIAL : PLL_MCU);
unsigned int pllnum = imx31_get_src_pll();
unsigned int pll = imx31_clkctl_get_pll(pllnum);
uint32_t reg = CLKCTL_PDR0;
unsigned int max_pdf = ((reg >> 3) & 0x7) + 1;
unsigned int ipg_pdf = ((reg >> 6) & 0x3) + 1;
@ -81,6 +86,15 @@ unsigned int imx31_clkctl_get_ipg_clk(void)
return pll / (max_pdf * ipg_pdf);
}
unsigned int imx31_clkctl_get_ahb_clk(void)
{
unsigned int pllnum = imx31_get_src_pll();
unsigned int pll = imx31_clkctl_get_pll(pllnum);
unsigned int max_pdf = ((CLKCTL_PDR0 >> 3) & 0x7) + 1;
return pll / max_pdf;
}
unsigned int imx31_clkctl_get_ata_clk(void)
{
return imx31_clkctl_get_ipg_clk();

View file

@ -105,6 +105,9 @@ unsigned int imx31_clkctl_get_pll(enum IMX31_PLLS pll);
/* Return ipg_clk in HZ */
unsigned int imx31_clkctl_get_ipg_clk(void);
/* Return ahb_clk in HZ */
unsigned int imx31_clkctl_get_ahb_clk(void);
/* Return the ATA frequency in HZ */
unsigned int imx31_clkctl_get_ata_clk(void);

View file

@ -1,8 +0,0 @@
#include <sys/types.h>
void dma_start(const void* addr, size_t size) {
(void) addr;
(void) size;
//TODO:
}

View file

@ -24,6 +24,7 @@
#include "spi-imx31.h"
#include "mc13783.h"
#include "clkctl-imx31.h"
#include "sdma-imx31.h"
#include "kernel.h"
#include "thread.h"
@ -64,6 +65,9 @@ void tick_start(unsigned int interval_in_ms)
void kernel_device_init(void)
{
#ifndef BOOTLOADER
sdma_init();
#endif
spi_init();
mc13783_init();
}

View file

@ -22,21 +22,12 @@
#include "mmu-imx31.h"
#include "mmu-arm.h"
void memory_init(void) {
#if 0
ttb_init();
set_page_tables();
enable_mmu();
#endif
unsigned long addr_virt_to_phys(unsigned long addr)
{
return addr | CSD0_BASE_ADDR;
}
void set_page_tables() {
#if 0
map_section(0, 0, 0x1000, CACHE_NONE); /* map every memory region to itself */
/*This pa *might* change*/
map_section(0x80000000, 0, 64, CACHE_ALL); /* map RAM to 0 and enable caching for it */
map_section((int)FRAME1, (int)FRAME1, 1, BUFFERED); /* enable buffered writing for the framebuffer */
map_section((int)FRAME2, (int)FRAME2, 1, BUFFERED);
#endif
unsigned long addr_phys_to_virt(unsigned long addr)
{
return addr & ~CSD0_BASE_ADDR;
}

View file

@ -23,5 +23,7 @@
void memory_init(void);
void set_page_tables(void);
unsigned long addr_virt_to_phys(unsigned long addr);
unsigned long addr_phys_to_virt(unsigned long addr);
#endif /* MMU_IMX31_H */

View file

@ -23,78 +23,93 @@
#include "kernel.h"
#include "audio.h"
#include "sound.h"
#include "avic-imx31.h"
#include "clkctl-imx31.h"
#include "sdma-imx31.h"
#include "mmu-imx31.h"
/* This isn't DMA-based at the moment and is handled like Portal Player but
* will suffice for starters. */
#define DMA_PLAY_CH_NUM 2
#define DMA_REC_CH_NUM 1
static struct buffer_descriptor dma_play_bd DEVBSS_ATTR;
static struct channel_descriptor dma_play_cd DEVBSS_ATTR;
struct dma_data
{
uint16_t *p;
size_t size;
int locked;
int callback_pending; /* DMA interrupt happened while locked */
int state;
};
static struct dma_data dma_play_data =
{
/* Initialize to a locked, stopped state */
.p = NULL,
.size = 0,
.locked = 0,
.callback_pending = 0,
.state = 0
};
static void play_dma_callback(void)
{
unsigned char *start;
size_t size;
pcm_more_callback_type get_more = pcm_callback_for_more;
if (dma_play_data.locked)
{
/* Callback is locked out */
dma_play_data.callback_pending = 1;
return;
}
if (get_more == NULL || (get_more(&start, &size), size == 0))
{
/* Callback missing or no more DMA to do */
pcm_play_dma_stop();
pcm_play_dma_stopped_callback();
}
else
{
start = (void*)(((unsigned long)start + 3) & ~3);
size &= ~3;
/* Flush any pending cache writes */
clean_dcache_range(start, size);
dma_play_bd.buf_addr = (void *)addr_virt_to_phys((unsigned long)start);
dma_play_bd.mode.count = size;
dma_play_bd.mode.command = TRANSFER_16BIT;
dma_play_bd.mode.status = BD_DONE | BD_WRAP | BD_INTR;
sdma_channel_run(DMA_PLAY_CH_NUM);
}
}
void pcm_play_lock(void)
{
if (++dma_play_data.locked == 1)
{
/* Atomically disable transmit interrupt */
imx31_regclr32(&SSI_SIER1, SSI_SIER_TIE);
}
imx31_regclr32(&SSI_SIER1, SSI_SIER_TDMAE);
}
void pcm_play_unlock(void)
{
if (--dma_play_data.locked == 0 && dma_play_data.state != 0)
{
/* Atomically enable transmit interrupt */
imx31_regset32(&SSI_SIER1, SSI_SIER_TIE);
}
}
bool pending = false;
int oldstatus = disable_irq_save();
static void __attribute__((interrupt("IRQ"))) SSI1_HANDLER(void)
{
register pcm_more_callback_type get_more;
do
{
while (dma_play_data.size > 0)
if (dma_play_data.callback_pending)
{
if (SSI_SFCSR_TFCNT0r(SSI_SFCSR1) > 6)
{
return;
}
SSI_STX0_1 = *dma_play_data.p++;
SSI_STX0_1 = *dma_play_data.p++;
dma_play_data.size -= 4;
pending = true;
dma_play_data.callback_pending = 0;
}
/* p is empty, get some more data */
get_more = pcm_callback_for_more;
SSI_SIER1 |= SSI_SIER_TDMAE;
restore_irq(oldstatus);
if (get_more)
{
get_more((unsigned char **)&dma_play_data.p,
&dma_play_data.size);
}
/* Should an interrupt be forced instead? The upper pcm layer can
* call producer's callback in thread context so technically this is
* acceptable. */
if (pending)
play_dma_callback();
}
while (dma_play_data.size > 0);
/* No more data, so disable the FIFO/interrupt */
pcm_play_dma_stop();
pcm_play_dma_stopped_callback();
}
void pcm_dma_apply_settings(void)
@ -104,6 +119,17 @@ void pcm_dma_apply_settings(void)
void pcm_play_dma_init(void)
{
/* Init channel information */
dma_play_cd.bd_count = 1;
dma_play_cd.callback = play_dma_callback;
dma_play_cd.shp_addr = SDMA_PER_ADDR_SSI1_TX1;
dma_play_cd.wml = SDMA_SSI_TXFIFO_WML*2;
dma_play_cd.per_type = SDMA_PER_SSI;
dma_play_cd.tran_type = SDMA_TRAN_EMI_2_PER;
dma_play_cd.event_id1 = SDMA_REQ_SSI1_TX1;
sdma_channel_init(DMA_PLAY_CH_NUM, &dma_play_cd, &dma_play_bd);
imx31_clkctl_module_clock_gating(CG_SSI1, CGM_ON_ALL);
imx31_clkctl_module_clock_gating(CG_SSI2, CGM_ON_ALL);
@ -111,8 +137,8 @@ void pcm_play_dma_init(void)
SSI_SCR2 &= ~SSI_SCR_SSIEN;
SSI_SCR1 &= ~SSI_SCR_SSIEN;
SSI_SIER1 = SSI_SIER_TFE0; /* TX0 can issue an interrupt */
SSI_SIER2 = SSI_SIER_RFF0; /* RX0 can issue an interrupt */
SSI_SIER1 = 0;
SSI_SIER2 = 0;
/* Set up audio mux */
@ -155,8 +181,9 @@ void pcm_play_dma_init(void)
SSI_STCCR1 = SSI_STRCCR_WL16 | SSI_STRCCR_DCw(2-1) |
SSI_STRCCR_PMw(4-1);
/* Transmit low watermark - 2 samples in FIFO */
SSI_SFCSR1 = SSI_SFCSR_TFWM1w(1) | SSI_SFCSR_TFWM0w(2);
/* Transmit low watermark */
SSI_SFCSR1 = (SSI_SFCSR1 & ~SSI_SFCSR_TFWM0) |
SSI_SFCSR_TFWM0w(8-SDMA_SSI_TXFIFO_WML);
SSI_STMSK1 = 0;
/* SSI2 - provides MCLK to codec. Receives data from codec. */
@ -186,8 +213,9 @@ void pcm_play_dma_init(void)
SSI_SRCCR2 = SSI_STRCCR_WL16 | SSI_STRCCR_DCw(2-1) |
SSI_STRCCR_PMw(4-1);
/* Receive high watermark - 6 samples in FIFO */
SSI_SFCSR2 = SSI_SFCSR_RFWM1w(8) | SSI_SFCSR_RFWM0w(6);
/* Receive high watermark */
SSI_SFCSR2 = (SSI_SFCSR2 & ~SSI_SFCSR_RFWM0) |
SSI_SFCSR_RFWM0w(SDMA_SSI_RXFIFO_WML);
SSI_SRMSK2 = 0;
/* Enable SSI2 (codec clock) */
@ -199,7 +227,6 @@ void pcm_play_dma_init(void)
void pcm_postinit(void)
{
audiohw_postinit();
avic_enable_int(SSI1, IRQ, 8, SSI1_HANDLER);
}
static void play_start_pcm(void)
@ -207,32 +234,23 @@ static void play_start_pcm(void)
/* Stop transmission (if in progress) */
SSI_SCR1 &= ~SSI_SCR_TE;
/* Enable interrupt on unlock */
dma_play_data.state = 1;
/* Fill the FIFO or start when data is used up */
SSI_SCR1 |= SSI_SCR_SSIEN; /* Enable SSI */
SSI_STCR1 |= SSI_STCR_TFEN0; /* Enable TX FIFO */
while (1)
{
if (SSI_SFCSR_TFCNT0r(SSI_SFCSR1) > 6 || dma_play_data.size == 0)
{
SSI_SCR1 |= SSI_SCR_TE; /* Start transmitting */
return;
}
dma_play_data.state = 1; /* Enable DMA requests on unlock */
SSI_STX0_1 = *dma_play_data.p++;
SSI_STX0_1 = *dma_play_data.p++;
dma_play_data.size -= 4;
}
/* Do prefill to prevent swapped channels (see TLSbo61214 in MCIMX31CE).
* No actual solution was offered but this appears to work. */
SSI_STX0_1 = 0;
SSI_STX0_1 = 0;
SSI_STX0_1 = 0;
SSI_STX0_1 = 0;
SSI_SCR1 |= SSI_SCR_TE; /* Start transmitting */
}
static void play_stop_pcm(void)
{
/* Disable interrupt */
SSI_SIER1 &= ~SSI_SIER_TIE;
/* Wait for FIFO to empty */
while (SSI_SFCSR_TFCNT0r(SSI_SFCSR1) > 0);
@ -240,135 +258,227 @@ static void play_stop_pcm(void)
SSI_STCR1 &= ~SSI_STCR_TFEN0;
SSI_SCR1 &= ~(SSI_SCR_TE | SSI_SCR_SSIEN);
/* Do not enable interrupt on unlock */
/* Do not enable DMA requests on unlock */
dma_play_data.state = 0;
dma_play_data.callback_pending = 0;
}
void pcm_play_dma_start(const void *addr, size_t size)
{
dma_play_data.p = (void *)(((uintptr_t)addr + 3) & ~3);
dma_play_data.size = (size & ~3);
sdma_channel_stop(DMA_PLAY_CH_NUM);
/* Disable transmission */
SSI_STCR1 &= ~SSI_STCR_TFEN0;
SSI_SCR1 &= ~(SSI_SCR_TE | SSI_SCR_SSIEN);
addr = (void *)(((unsigned long)addr + 3) & ~3);
size &= ~3;
clean_dcache_range(addr, size);
dma_play_bd.buf_addr =
(void *)addr_virt_to_phys((unsigned long)(void *)addr);
dma_play_bd.mode.count = size;
dma_play_bd.mode.command = TRANSFER_16BIT;
dma_play_bd.mode.status = BD_DONE | BD_WRAP | BD_INTR;
play_start_pcm();
sdma_channel_start(DMA_PLAY_CH_NUM);
}
void pcm_play_dma_stop(void)
{
sdma_channel_stop(DMA_PLAY_CH_NUM);
play_stop_pcm();
dma_play_data.size = 0;
}
void pcm_play_dma_pause(bool pause)
{
if (pause)
{
sdma_channel_pause(DMA_PLAY_CH_NUM);
play_stop_pcm();
}
else
{
uint32_t addr = (uint32_t)dma_play_data.p;
dma_play_data.p = (void *)((addr + 2) & ~3);
dma_play_data.size &= ~3;
play_start_pcm();
sdma_channel_run(DMA_PLAY_CH_NUM);
}
}
/* Return the number of bytes waiting - full L-R sample pairs only */
size_t pcm_get_bytes_waiting(void)
{
return dma_play_data.size & ~3;
static unsigned long dsa DEVBSS_ATTR;
long offs, size;
int oldstatus;
/* read burst dma source address register in channel context */
sdma_read_words(&dsa, CHANNEL_CONTEXT_ADDR(DMA_PLAY_CH_NUM)+0x0b, 1);
oldstatus = disable_irq_save();
offs = dsa - (unsigned long)dma_play_bd.buf_addr;
size = dma_play_bd.mode.count;
restore_irq(oldstatus);
/* Be addresses are coherent (no buffer change during read) */
if (offs >= 0 && offs < size)
{
return (size - offs) & ~3;
}
return 0;
}
/* Return a pointer to the samples and the number of them in *count */
const void * pcm_play_dma_get_peak_buffer(int *count)
{
uint32_t addr = (uint32_t)dma_play_data.p;
size_t cnt = dma_play_data.size;
*count = cnt >> 2;
return (void *)((addr + 2) & ~3);
static unsigned long dsa DEVBSS_ATTR;
unsigned long addr;
long offs, size;
int oldstatus;
/* read burst dma source address register in channel context */
sdma_read_words(&dsa, CHANNEL_CONTEXT_ADDR(DMA_PLAY_CH_NUM)+0x0b, 1);
oldstatus = disable_irq_save();
addr = dsa;
offs = addr - (unsigned long)dma_play_bd.buf_addr;
size = dma_play_bd.mode.count;
restore_irq(oldstatus);
/* Be addresses are coherent (no buffer change during read) */
if (offs >= 0 && offs < size)
{
*count = (size - offs) >> 2;
return (void *)((addr + 2) & ~3);
}
*count = 0;
return NULL;
}
void * pcm_dma_addr(void *addr)
{
return (void *)addr_virt_to_phys((unsigned long)addr);
}
#ifdef HAVE_RECORDING
static struct buffer_descriptor dma_rec_bd DEVBSS_ATTR;
static struct channel_descriptor dma_rec_cd DEVBSS_ATTR;
static struct dma_data dma_rec_data =
{
/* Initialize to a locked, stopped state */
.p = NULL,
.size = 0,
.locked = 0,
.state = 0
};
static void __attribute__((interrupt("IRQ"))) SSI2_HANDLER(void)
static void rec_dma_callback(void)
{
register pcm_more_callback_type2 more_ready;
pcm_more_callback_type2 more_ready;
int status = 0;
while (dma_rec_data.size > 0)
if (dma_rec_data.locked)
{
if (SSI_SFCSR_RFCNT0r(SSI_SFCSR2) < 2)
return;
*dma_rec_data.p++ = SSI_SRX0_2;
*dma_rec_data.p++ = SSI_SRX0_2;
dma_rec_data.size -= 4;
dma_rec_data.callback_pending = 1;
return; /* Callback is locked out */
}
if (dma_rec_bd.mode.status & BD_RROR)
status = DMA_REC_ERROR_DMA;
more_ready = pcm_callback_more_ready;
if (more_ready == NULL || more_ready(0) < 0) {
/* Finished recording */
pcm_rec_dma_stop();
pcm_rec_dma_stopped_callback();
if (more_ready != NULL && more_ready(status) >= 0)
{
sdma_channel_run(DMA_REC_CH_NUM);
return;
}
/* Finished recording */
pcm_rec_dma_stop();
pcm_rec_dma_stopped_callback();
}
void pcm_rec_lock(void)
{
if (++dma_rec_data.locked == 1)
{
/* Atomically disable receive interrupt */
imx31_regclr32(&SSI_SIER2, SSI_SIER_RIE);
}
imx31_regclr32(&SSI_SIER2, SSI_SIER_RDMAE);
}
void pcm_rec_unlock(void)
{
if (--dma_rec_data.locked == 0 && dma_rec_data.state != 0)
{
/* Atomically enable receive interrupt */
imx31_regset32(&SSI_SIER2, SSI_SIER_RIE);
bool pending = false;
int oldstatus = disable_irq_save();
if (dma_rec_data.callback_pending)
{
pending = true;
dma_rec_data.callback_pending = 0;
}
SSI_SIER2 |= SSI_SIER_RDMAE;
restore_irq(oldstatus);
/* Should an interrupt be forced instead? The upper pcm layer can
* call consumer's callback in thread context so technically this is
* acceptable. */
if (pending)
rec_dma_callback();
}
}
void pcm_record_more(void *start, size_t size)
{
pcm_rec_peak_addr = start; /* Start peaking at dest */
dma_rec_data.p = start; /* Start of RX buffer */
dma_rec_data.size = size; /* Bytes to transfer */
start = (void *)(((unsigned long)start + 3) & ~3);
size &= ~3;
/* Write back and invalidate - buffer must be coherent */
invalidate_dcache_range(start, size);
start = (void *)addr_virt_to_phys((unsigned long)start);
pcm_rec_peak_addr = start;
dma_rec_bd.buf_addr = start;
dma_rec_bd.mode.count = size;
dma_rec_bd.mode.command = TRANSFER_16BIT;
dma_rec_bd.mode.status = BD_DONE | BD_WRAP | BD_INTR;
}
void pcm_rec_dma_stop(void)
{
/* Stop receiving data */
sdma_channel_stop(DMA_REC_CH_NUM);
imx31_regclr32(&SSI_SIER2, SSI_SIER_RDMAE);
SSI_SCR2 &= ~SSI_SCR_RE; /* Disable RX */
SSI_SRCR2 &= ~SSI_SRCR_RFEN0; /* Disable RX FIFO */
dma_rec_data.state = 0;
avic_disable_int(SSI2);
dma_rec_data.callback_pending = 0;
}
void pcm_rec_dma_start(void *addr, size_t size)
{
pcm_rec_dma_stop();
addr = (void *)(((unsigned long)addr + 3) & ~3);
size &= ~3;
invalidate_dcache_range(addr, size);
addr = (void *)addr_virt_to_phys((unsigned long)addr);
pcm_rec_peak_addr = addr;
dma_rec_data.p = addr;
dma_rec_data.size = size;
dma_rec_bd.buf_addr = addr;
dma_rec_bd.mode.count = size;
dma_rec_bd.mode.command = TRANSFER_16BIT;
dma_rec_bd.mode.status = BD_DONE | BD_WRAP | BD_INTR;
dma_rec_data.state = 1;
avic_enable_int(SSI2, IRQ, 9, SSI2_HANDLER);
SSI_SRCR2 |= SSI_SRCR_RFEN0; /* Enable RX FIFO */
/* Ensure clear FIFO */
@ -377,24 +487,58 @@ void pcm_rec_dma_start(void *addr, size_t size)
/* Enable receive */
SSI_SCR2 |= SSI_SCR_RE;
sdma_channel_start(DMA_REC_CH_NUM);
}
void pcm_rec_dma_close(void)
{
pcm_rec_dma_stop();
sdma_channel_close(DMA_REC_CH_NUM);
}
void pcm_rec_dma_init(void)
{
pcm_rec_dma_stop();
/* Init channel information */
dma_rec_cd.bd_count = 1;
dma_rec_cd.callback = rec_dma_callback;
dma_rec_cd.shp_addr = SDMA_PER_ADDR_SSI2_RX1;
dma_rec_cd.wml = SDMA_SSI_RXFIFO_WML*2;
dma_rec_cd.per_type = SDMA_PER_SSI;
dma_rec_cd.tran_type = SDMA_TRAN_PER_2_EMI;
dma_rec_cd.event_id1 = SDMA_REQ_SSI2_RX1;
sdma_channel_init(DMA_REC_CH_NUM, &dma_rec_cd, &dma_rec_bd);
}
const void * pcm_rec_dma_get_peak_buffer(int *count)
{
unsigned long addr = (uint32_t)pcm_rec_peak_addr;
unsigned long end = (uint32_t)dma_rec_data.p;
*count = (end >> 2) - (addr >> 2);
return (void *)(addr & ~3);
static unsigned long pda DEVBSS_ATTR;
unsigned long buf, addr, end, bufend;
int oldstatus;
/* read burst dma destination address register in channel context */
sdma_read_words(&pda, CHANNEL_CONTEXT_ADDR(DMA_REC_CH_NUM)+0x0a, 1);
oldstatus = disable_irq_save();
end = pda;
buf = (unsigned long)dma_rec_bd.buf_addr;
addr = (unsigned long)pcm_rec_peak_addr;
bufend = buf + dma_rec_bd.mode.count;
restore_irq(oldstatus);
/* Be addresses are coherent (no buffer change during read) */
if (addr >= buf && addr < bufend &&
end >= buf && end < bufend)
{
*count = (end >> 2) - (addr >> 2);
return (void *)(addr & ~3);
}
*count = 0;
return NULL;
}
#endif /* HAVE_RECORDING */

View file

@ -32,6 +32,28 @@
#include "clkctl-imx31.h"
#include "mc13783.h"
static unsigned long product_rev;
static unsigned long system_rev;
/** IC revision info routines **/
unsigned int iim_system_rev(void)
{
return system_rev & IIM_SREV_SREV;
}
unsigned int iim_prod_rev(void)
{
return product_rev;
}
static void iim_init(void)
{
/* Initialize the IC revision info (required by SDMA) */
imx31_clkctl_module_clock_gating(CG_IIM, CGM_ON_ALL);
product_rev = IIM_PREV;
system_rev = IIM_SREV;
}
/** Watchdog timer routines **/
/* Initialize the watchdog timer */
@ -155,6 +177,8 @@ void system_init(void)
/* MCR WFI enables wait mode */
CLKCTL_CCMR &= ~(3 << 14);
iim_init();
imx31_regset32(&SDHC1_CLOCK_CONTROL, STOP_CLK);
imx31_regset32(&SDHC2_CLOCK_CONTROL, STOP_CLK);
imx31_regset32(&RNGA_CONTROL, RNGA_CONTROL_SLEEP);

View file

@ -43,6 +43,8 @@ void watchdog_service(void);
void gpt_start(void);
void gpt_stop(void);
unsigned int iim_system_rev(void);
/* Prepare for transition to firmware */
void system_prepare_fw_start(void);
void tick_stop(void);