1
0
Fork 0
forked from len0rd/rockbox

i.MX31/Gigabeat S: Actually enable DPTC which can set optimal voltage for 528MHz. Requires an SPI and PMIC interface rework because of the low-latency needs for the DPTC to work best with minimal panicing. SPI can work with multitasking and asynchronously from interrupt handlers or normal code.

git-svn-id: svn://svn.rockbox.org/rockbox/trunk@25800 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
Michael Sevakis 2010-05-04 10:07:53 +00:00
parent 7480afb3c5
commit 931e06de64
11 changed files with 579 additions and 415 deletions

View file

@ -104,8 +104,8 @@ int rtc_read_datetime(struct tm *tm)
* greater */
do
{
if (mc13783_read_regset(rtc_registers, regs,
RTC_NUM_REGS) < RTC_NUM_REGS)
if (mc13783_read_regs(rtc_registers, regs,
RTC_NUM_REGS) < RTC_NUM_REGS)
{
/* Couldn't read registers */
return 0;
@ -204,7 +204,7 @@ int rtc_write_datetime(const struct tm *tm)
regs[RTC_REG_DAY] = day + tm->tm_mday - 1 - base_yearday;
if (mc13783_write_regset(rtc_registers, regs, 2) == 2)
if (mc13783_write_regs(rtc_registers, regs, 2) == 2)
{
return 7;
}

View file

@ -147,7 +147,7 @@
#define GPIO_EVENT_MASK (USE_GPIO1_EVENTS)
/* Define this if target has an additional number of threads specific to it */
#define TARGET_EXTRA_THREADS 3
#define TARGET_EXTRA_THREADS 2
/* Type of mobile power - check this out */
#define BATTERY_CAPACITY_DEFAULT 700 /* default battery capacity */

View file

@ -21,6 +21,8 @@
#ifndef _MC13783_H_
#define _MC13783_H_
#include "spi-imx31.h"
enum mc13783_regs_enum
{
MC13783_INTERRUPT_STATUS0 = 0,
@ -1261,11 +1263,21 @@ void mc13783_init(void);
void mc13783_close(void);
uint32_t mc13783_set(unsigned address, uint32_t bits);
uint32_t mc13783_clear(unsigned address, uint32_t bits);
uint32_t mc13783_read(unsigned address);
int mc13783_write(unsigned address, uint32_t data);
uint32_t mc13783_write_masked(unsigned address, uint32_t data, uint32_t mask);
int mc13783_write_regset(const unsigned char *regs, const uint32_t *data, int count);
uint32_t mc13783_read(unsigned address);
int mc13783_read_regset(const unsigned char *regs, uint32_t *buffer, int count);
/* buffer must be available as packet workspace */
int mc13783_read_regs(const unsigned char *regs, uint32_t *buffer, int count);
/* buffer must be available as packet workspace */
int mc13783_write_regs(const unsigned char *regs, uint32_t *buffer, int count);
/* buffer must be available as packet workspace */
bool mc13783_read_async(struct spi_transfer_desc *xfer,
const unsigned char *regs, uint32_t *buffer,
int count, spi_transfer_cb_fn_type callback);
/* buffer must be available as packet workspace */
bool mc13783_write_async(struct spi_transfer_desc *xfer,
const unsigned char *regs, uint32_t *buffer,
int count, spi_transfer_cb_fn_type callback);
#define MC13783_DATA_ERROR UINT32_MAX

View file

@ -213,7 +213,7 @@ bool __dbg_ports(void)
lcd_puts(0, line++, "PMIC Registers");
line++;
mc13783_read_regset(pmic_regset, pmic_regs, ARRAYLEN(pmic_regs));
mc13783_read_regs(pmic_regset, pmic_regs, ARRAYLEN(pmic_regs));
for (i = 0; i < (int)ARRAYLEN(pmic_regs); i++)
{

View file

@ -53,7 +53,7 @@ static void update_dptc_counts(unsigned int level, unsigned int wp)
}
static inline uint32_t check_regulator_setting(uint32_t setting)
static uint32_t check_regulator_setting(uint32_t setting)
{
/* Simply a safety check *in case* table gets scrambled */
if (setting < VOLTAGE_SETTING_MIN)
@ -374,10 +374,18 @@ static void dvfs_stop(void)
/** DPTC **/
/* Request tracking since boot */
static bool dptc_running = false; /* Has driver enabled DPTC? */
unsigned int dptc_nr_dn = 0;
unsigned int dptc_nr_up = 0;
unsigned int dptc_nr_pnc = 0;
static struct spi_transfer_desc dptc_pmic_xfer; /* Transfer descriptor */
static const unsigned char dptc_pmic_regs[2] = /* Register subaddresses */
{ MC13783_SWITCHERS0, MC13783_SWITCHERS1 };
static uint32_t dptc_reg_shadows[2]; /* shadow regs */
static uint32_t dptc_regs_buf[2]; /* buffer for async write */
/* Enable DPTC and unmask interrupt. */
static void enable_dptc(void)
@ -397,123 +405,91 @@ static void enable_dptc(void)
}
static void dptc_new_wp(unsigned int wp)
/* Called after final PMIC read is completed */
static void dptc_transfer_done_callback(struct spi_transfer_desc *xfer)
{
unsigned int level = dvfs_level;
const union dvfs_dptc_voltage_table_entry *entry = &dvfs_dptc_voltage_table[wp];
if (xfer->count != 0)
return;
uint32_t sw1a = check_regulator_setting(entry->sw1a);
uint32_t sw1advs = check_regulator_setting(entry->sw1advs);
uint32_t sw1bdvs = check_regulator_setting(entry->sw1bdvs);
uint32_t sw1bstby = check_regulator_setting(entry->sw1bstby);
update_dptc_counts(dvfs_level, dptc_wp);
dptc_wp = wp;
mc13783_write_masked(MC13783_SWITCHERS0,
sw1a << MC13783_SW1A_POS | /* SW1A */
sw1advs << MC13783_SW1ADVS_POS, /* SW1ADVS */
MC13783_SW1A | MC13783_SW1ADVS);
mc13783_write_masked(MC13783_SWITCHERS1,
sw1bdvs << MC13783_SW1BDVS_POS | /* SW1BDVS */
sw1bstby << MC13783_SW1BSTBY_POS, /* SW1BSTBY */
MC13783_SW1BDVS | MC13783_SW1BSTBY);
udelay(100); /* Wait to settle */
update_dptc_counts(level, wp);
if (dptc_running)
enable_dptc();
}
/* DPTC service thread */
#ifdef ROCKBOX_HAS_LOGF
#define DPTC_STACK_SIZE DEFAULT_STACK_SIZE
#else
#define DPTC_STACK_SIZE 160
#endif
static int dptc_thread_stack[DPTC_STACK_SIZE/sizeof(int)];
static const char * const dptc_thread_name = "dptc";
static struct wakeup dptc_wakeup; /* Object to signal upon DPTC event */
static struct mutex dptc_mutex; /* Avoid mutually disrupting voltage updates */
static unsigned long dptc_int_data; /* Data passed to thread for each event */
static bool dptc_running = false; /* Has driver enabled DPTC? */
static void dptc_interrupt_thread(void)
/* Handle the DPTC interrupt and sometimes the manual setting */
static void dptc_int(unsigned long pmcr0)
{
int wp;
const union dvfs_dptc_voltage_table_entry *entry;
uint32_t sw1a, sw1advs, sw1bdvs, sw1bstby;
mutex_lock(&dptc_mutex);
int wp = dptc_wp;
while (1)
/* Mask DPTC interrupt and disable DPTC until the change request is
* serviced. */
CCM_PMCR0 = (pmcr0 & ~CCM_PMCR0_DPTEN) | CCM_PMCR0_PTVAIM;
switch (pmcr0 & CCM_PMCR0_PTVAI)
{
mutex_unlock(&dptc_mutex);
case CCM_PMCR0_PTVAI_DECREASE:
wp++;
dptc_nr_dn++;
break;
wakeup_wait(&dptc_wakeup, TIMEOUT_BLOCK);
case CCM_PMCR0_PTVAI_INCREASE:
wp--;
dptc_nr_up++;
break;
mutex_lock(&dptc_mutex);
if (!dptc_running)
continue;
wp = dptc_wp;
switch (dptc_int_data & CCM_PMCR0_PTVAI)
{
case CCM_PMCR0_PTVAI_DECREASE:
wp++;
dptc_nr_dn++;
break;
case CCM_PMCR0_PTVAI_INCREASE:
wp--;
dptc_nr_up++;
break;
case CCM_PMCR0_PTVAI_INCREASE_NOW:
case CCM_PMCR0_PTVAI_INCREASE_NOW:
if (--wp > DPTC_WP_PANIC)
wp = DPTC_WP_PANIC;
dptc_nr_pnc++;
break;
dptc_nr_pnc++;
break;
case CCM_PMCR0_PTVAI_NO_INT:
logf("DPTC: unexpected INT");
continue;
}
if (wp < 0)
{
wp = 0;
logf("DPTC: already @ highest (%d)", wp);
}
else if (wp >= DPTC_NUM_WP)
{
wp = DPTC_NUM_WP - 1;
logf("DPTC: already @ lowest (%d)", wp);
}
else
{
logf("DPTC: new wp (%d)", wp);
}
dptc_new_wp(wp);
enable_dptc();
case CCM_PMCR0_PTVAI_NO_INT:
break; /* Just maintain at global level */
}
if (wp < 0)
wp = 0;
else if (wp >= DPTC_NUM_WP)
wp = DPTC_NUM_WP - 1;
entry = &dvfs_dptc_voltage_table[wp];
sw1a = check_regulator_setting(entry->sw1a);
sw1advs = check_regulator_setting(entry->sw1advs);
sw1bdvs = check_regulator_setting(entry->sw1bdvs);
sw1bstby = check_regulator_setting(entry->sw1bstby);
dptc_regs_buf[0] = dptc_reg_shadows[0] |
sw1a << MC13783_SW1A_POS | /* SW1A */
sw1advs << MC13783_SW1ADVS_POS; /* SW1ADVS */
dptc_regs_buf[1] = dptc_reg_shadows[1] |
sw1bdvs << MC13783_SW1BDVS_POS | /* SW1BDVS */
sw1bstby << MC13783_SW1BSTBY_POS; /* SW1BSTBY */
dptc_wp = wp;
mc13783_write_async(&dptc_pmic_xfer, dptc_pmic_regs,
dptc_regs_buf, 2, dptc_transfer_done_callback);
}
static void dptc_new_wp(unsigned int wp)
{
dptc_wp = wp;
/* "NO_INT" so the working point isn't incremented, just set. */
dptc_int((CCM_PMCR0 & ~CCM_PMCR0_PTVAI) | CCM_PMCR0_PTVAI_NO_INT);
}
/* Interrupt vector for DPTC */
static __attribute__((interrupt("IRQ"))) void CCM_CLK_HANDLER(void)
{
/* Snapshot the interrupt cause */
unsigned long pmcr0 = CCM_PMCR0;
dptc_int_data = pmcr0;
/* Mask DPTC interrupt and disable DPTC until the change request is
* serviced. */
CCM_PMCR0 = (pmcr0 & ~CCM_PMCR0_DPTEN) | CCM_PMCR0_PTVAIM;
wakeup_signal(&dptc_wakeup);
dptc_int(CCM_PMCR0);
}
@ -524,23 +500,27 @@ static void dptc_init(void)
imx31_regmod32(&CCM_PMCR0, CCM_PMCR0_PTVAIM,
CCM_PMCR0_PTVAIM | CCM_PMCR0_DPTEN);
/* Shadow the regulator registers */
mc13783_read_regs(dptc_pmic_regs, dptc_reg_shadows, 2);
/* Pre-mask the fields we change */
dptc_reg_shadows[0] &= ~(MC13783_SW1A | MC13783_SW1ADVS);
dptc_reg_shadows[1] &= ~(MC13783_SW1BDVS | MC13783_SW1BSTBY);
/* Set default, safe working point. */
dptc_new_wp(DPTC_WP_DEFAULT);
/* Interrupt goes to MCU, specified reference circuits enabled when
* DPTC is active. */
imx31_regset32(&CCM_PMCR0, CCM_PMCR0_PTVIS | DPTC_DRCE_MASK);
imx31_regset32(&CCM_PMCR0, CCM_PMCR0_PTVIS);
imx31_regmod32(&CCM_PMCR0, DPTC_DRCE_MASK,
CCM_PMCR0_DRCE0 | CCM_PMCR0_DRCE1 |
CCM_PMCR0_DRCE2 | CCM_PMCR0_DRCE3);
/* DPTC counting range = 256 system clocks */
imx31_regclr32(&CCM_PMCR0, CCM_PMCR0_DCR);
/* Create PMIC regulator service. */
wakeup_init(&dptc_wakeup);
mutex_init(&dptc_mutex);
create_thread(dptc_interrupt_thread,
dptc_thread_stack, sizeof(dptc_thread_stack), 0,
dptc_thread_name IF_PRIO(, PRIORITY_REALTIME_1) IF_COP(, CPU));
logf("DPTC: Initialized");
}
@ -548,11 +528,7 @@ static void dptc_init(void)
/* Start DPTC module */
static void dptc_start(void)
{
int oldstate;
mutex_lock(&dptc_mutex);
oldstate = disable_irq_save();
int oldlevel = disable_irq_save();
if (!dptc_running)
{
@ -566,9 +542,7 @@ static void dptc_start(void)
enable_dptc();
}
restore_irq(oldstate);
mutex_unlock(&dptc_mutex);
restore_irq(oldlevel);
logf("DPTC: started");
}
@ -577,28 +551,20 @@ static void dptc_start(void)
/* Stop the DPTC hardware if running and go back to default working point */
static void dptc_stop(void)
{
int oldlevel;
mutex_lock(&dptc_mutex);
oldlevel = disable_irq_save();
int oldlevel = disable_irq_save();
if (dptc_running)
{
/* Disable DPTC and mask interrupt. */
CCM_PMCR0 = (CCM_PMCR0 & ~CCM_PMCR0_DPTEN) | CCM_PMCR0_PTVAIM;
avic_disable_int(INT_CCM_CLK);
dptc_int_data = 0;
dptc_running = false;
}
restore_irq(oldlevel);
/* Go back to default working point. */
dptc_new_wp(DPTC_WP_DEFAULT);
mutex_unlock(&dptc_mutex);
restore_irq(oldlevel);
logf("DPTC: stopped");
}
@ -618,10 +584,7 @@ void dvfs_dptc_init(void)
void dvfs_dptc_start(void)
{
dvfs_start();
if (0) /* Hold off for now */
{
dptc_start();
}
dptc_start();
}
@ -731,12 +694,10 @@ unsigned int dptc_get_wp(void)
/* If DPTC is not running, set the working point explicitly */
void dptc_set_wp(unsigned int wp)
{
mutex_lock(&dptc_mutex);
int oldlevel = disable_irq_save();
if (!dptc_running && wp < DPTC_NUM_WP)
{
dptc_new_wp(wp);
}
mutex_unlock(&dptc_mutex);
restore_irq(oldlevel);
}

View file

@ -71,7 +71,7 @@ unsigned short adc_read(int channel)
/* Read all 8 channels that are converted - two channels in each
* word. */
mc13783_read_regset(reg_array, channels[input_select], 4);
mc13783_read_regs(reg_array, channels[input_select], 4);
last_adc_read[input_select] = current_tick;
}

View file

@ -143,7 +143,7 @@ void _backlight_on(void)
data[1] |= backlight_pwm_bits;
/* Write regs within 30us of each other (requires single xfer) */
mc13783_write_regset(regs, data, 2);
mc13783_write_regs(regs, data, 2);
}
}

View file

@ -75,9 +75,6 @@
/* Define mask of which reference circuits are employed for DPTC */
#define DPTC_DRCE_MASK (CCM_PMCR0_DRCE1 | CCM_PMCR0_DRCE3)
/* When panicing, this working point is used */
#define DPTC_PANIC_WP
/* Due to a hardware bug in chip revisions < 2.0, when switching between
* Serial and MCU PLLs, DVFS forces the target PLL to go into reset and
* relock, only post divider frequency scaling is possible.

View file

@ -20,7 +20,6 @@
****************************************************************************/
#include "system.h"
#include "cpu.h"
#include "spi-imx31.h"
#include "gpio-imx31.h"
#include "mc13783.h"
#include "debug.h"
@ -29,9 +28,14 @@
extern const struct mc13783_event_list mc13783_event_list;
extern struct spi_node mc13783_spi;
/* PMIC event service data */
static int mc13783_thread_stack[DEFAULT_STACK_SIZE/sizeof(int)];
static const char *mc13783_thread_name = "pmic";
static struct wakeup mc13783_wake;
static struct wakeup mc13783_svc_wake;
/* Synchronous thread communication objects */
static struct mutex mc13783_spi_mutex;
static struct wakeup mc13783_spi_wake;
/* Tracking for which interrupts are enabled */
static uint32_t pmic_int_enabled[2] =
@ -45,6 +49,34 @@ static const unsigned char pmic_ints_regs[2] =
static volatile unsigned int mc13783_thread_id = 0;
static void mc13783_xfer_complete_cb(struct spi_transfer_desc *trans);
/* Transfer descriptor for synchronous reads and writes */
static struct spi_transfer_desc mc13783_transfer =
{
.node = &mc13783_spi,
.txbuf = NULL,
.rxbuf = NULL,
.count = 0,
.callback = mc13783_xfer_complete_cb,
.next = NULL,
};
/* Called when a transfer is finished and data is ready/written */
static void mc13783_xfer_complete_cb(struct spi_transfer_desc *xfer)
{
if (xfer->count != 0)
return;
wakeup_signal(&mc13783_spi_wake);
}
static inline bool wait_for_transfer_complete(void)
{
return wakeup_wait(&mc13783_spi_wake, HZ*2) == OBJ_WAIT_SUCCEEDED &&
mc13783_transfer.count == 0;
}
static void mc13783_interrupt_thread(void)
{
uint32_t pending[2];
@ -56,18 +88,18 @@ static void mc13783_interrupt_thread(void)
{
const struct mc13783_event *event, *event_last;
wakeup_wait(&mc13783_wake, TIMEOUT_BLOCK);
wakeup_wait(&mc13783_svc_wake, TIMEOUT_BLOCK);
if (mc13783_thread_id == 0)
break;
mc13783_read_regset(pmic_ints_regs, pending, 2);
mc13783_read_regs(pmic_ints_regs, pending, 2);
/* Only clear interrupts being dispatched */
pending[0] &= pmic_int_enabled[0];
pending[1] &= pmic_int_enabled[1];
mc13783_write_regset(pmic_ints_regs, pending, 2);
mc13783_write_regs(pmic_ints_regs, pending, 2);
/* Whatever is going to be serviced in this loop has been
* acknowledged. Reenable interrupt and if anything was still
@ -93,7 +125,7 @@ static void mc13783_interrupt_thread(void)
}
if ((pending[0] | pending[1]) == 0)
break; /* Teminate early if nothing more to service */
break; /* Terminate early if nothing more to service */
}
while (++event < event_last);
}
@ -107,13 +139,16 @@ void mc13783_event(void)
/* Mask the interrupt (unmasked when PMIC thread services it). */
imx31_regclr32(&MC13783_GPIO_IMR, 1ul << MC13783_GPIO_LINE);
MC13783_GPIO_ISR = (1ul << MC13783_GPIO_LINE);
wakeup_signal(&mc13783_wake);
wakeup_signal(&mc13783_svc_wake);
}
void mc13783_init(void)
{
/* Serial interface must have been initialized first! */
wakeup_init(&mc13783_wake);
wakeup_init(&mc13783_svc_wake);
mutex_init(&mc13783_spi_mutex);
wakeup_init(&mc13783_spi_wake);
/* Enable the PMIC SPI module */
spi_enable_module(&mc13783_spi);
@ -139,8 +174,9 @@ void mc13783_close(void)
return;
mc13783_thread_id = 0;
wakeup_signal(&mc13783_wake);
wakeup_signal(&mc13783_svc_wake);
thread_wait(thread_id);
spi_disable_module(&mc13783_spi);
}
bool mc13783_enable_event(enum mc13783_event_ids id)
@ -150,12 +186,12 @@ bool mc13783_enable_event(enum mc13783_event_ids id)
int set = event->set;
uint32_t mask = event->mask;
spi_lock(&mc13783_spi);
mutex_lock(&mc13783_spi_mutex);
pmic_int_enabled[set] |= mask;
mc13783_clear(pmic_intm_regs[set], mask);
spi_unlock(&mc13783_spi);
mutex_unlock(&mc13783_spi_mutex);
return true;
}
@ -167,66 +203,77 @@ void mc13783_disable_event(enum mc13783_event_ids id)
int set = event->set;
uint32_t mask = event->mask;
spi_lock(&mc13783_spi);
mutex_lock(&mc13783_spi_mutex);
pmic_int_enabled[set] &= ~mask;
mc13783_set(pmic_intm_regs[set], mask);
spi_unlock(&mc13783_spi);
mutex_unlock(&mc13783_spi_mutex);
}
uint32_t mc13783_set(unsigned address, uint32_t bits)
{
spi_lock(&mc13783_spi);
uint32_t data;
uint32_t data = mc13783_read(address);
mutex_lock(&mc13783_spi_mutex);
data = mc13783_read(address);
if (data != MC13783_DATA_ERROR)
mc13783_write(address, data | bits);
spi_unlock(&mc13783_spi);
mutex_unlock(&mc13783_spi_mutex);
return data;
}
uint32_t mc13783_clear(unsigned address, uint32_t bits)
{
spi_lock(&mc13783_spi);
uint32_t data;
uint32_t data = mc13783_read(address);
mutex_lock(&mc13783_spi_mutex);
data = mc13783_read(address);
if (data != MC13783_DATA_ERROR)
mc13783_write(address, data & ~bits);
spi_unlock(&mc13783_spi);
mutex_unlock(&mc13783_spi_mutex);
return data;
}
int mc13783_write(unsigned address, uint32_t data)
{
struct spi_transfer xfer;
uint32_t packet;
int i;
if (address >= MC13783_NUM_REGS)
return -1;
packet = (1 << 31) | (address << 25) | (data & 0xffffff);
xfer.txbuf = &packet;
xfer.rxbuf = &packet;
xfer.count = 1;
if (!spi_transfer(&mc13783_spi, &xfer))
return -1;
mutex_lock(&mc13783_spi_mutex);
return 1 - xfer.count;
mc13783_transfer.txbuf = &packet;
mc13783_transfer.rxbuf = NULL;
mc13783_transfer.count = 1;
i = -1;
if (spi_transfer(&mc13783_transfer) && wait_for_transfer_complete())
i = 1 - mc13783_transfer.count;
mutex_unlock(&mc13783_spi_mutex);
return i;
}
uint32_t mc13783_write_masked(unsigned address, uint32_t data, uint32_t mask)
{
uint32_t old;
spi_lock(&mc13783_spi);
mutex_lock(&mc13783_spi_mutex);
old = mc13783_read(address);
@ -238,69 +285,38 @@ uint32_t mc13783_write_masked(unsigned address, uint32_t data, uint32_t mask)
old = MC13783_DATA_ERROR;
}
spi_unlock(&mc13783_spi);
mutex_unlock(&mc13783_spi_mutex);
return old;
}
int mc13783_write_regset(const unsigned char *regs, const uint32_t *data,
int count)
{
int i;
struct spi_transfer xfer;
uint32_t packets[MC13783_NUM_REGS];
if ((unsigned)count > MC13783_NUM_REGS)
return -1;
for (i = 0; i < count; i++)
{
uint32_t reg = regs[i];
if (reg >= MC13783_NUM_REGS)
return -1;
packets[i] = (1 << 31) | (reg << 25) | (data[i] & 0xffffff);
}
xfer.txbuf = packets;
xfer.rxbuf = packets;
xfer.count = count;
if (!spi_transfer(&mc13783_spi, &xfer))
return -1;
return count - xfer.count;
}
uint32_t mc13783_read(unsigned address)
{
uint32_t packet;
struct spi_transfer xfer;
if (address >= MC13783_NUM_REGS)
return MC13783_DATA_ERROR;
packet = address << 25;
xfer.txbuf = &packet;
xfer.rxbuf = &packet;
xfer.count = 1;
mutex_lock(&mc13783_spi_mutex);
if (!spi_transfer(&mc13783_spi, &xfer))
return MC13783_DATA_ERROR;
mc13783_transfer.txbuf = &packet;
mc13783_transfer.rxbuf = &packet;
mc13783_transfer.count = 1;
if (!spi_transfer(&mc13783_transfer) || !wait_for_transfer_complete())
packet = MC13783_DATA_ERROR;
mutex_unlock(&mc13783_spi_mutex);
return packet;
}
int mc13783_read_regset(const unsigned char *regs, uint32_t *buffer,
int count)
int mc13783_read_regs(const unsigned char *regs, uint32_t *buffer,
int count)
{
int i;
struct spi_transfer xfer;
if ((unsigned)count > MC13783_NUM_REGS)
return -1;
for (i = 0; i < count; i++)
{
@ -312,12 +328,101 @@ int mc13783_read_regset(const unsigned char *regs, uint32_t *buffer,
buffer[i] = reg << 25;
}
xfer.txbuf = buffer;
xfer.rxbuf = buffer;
xfer.count = count;
mutex_lock(&mc13783_spi_mutex);
if (!spi_transfer(&mc13783_spi, &xfer))
return -1;
mc13783_transfer.txbuf = buffer;
mc13783_transfer.rxbuf = buffer;
mc13783_transfer.count = count;
return count - xfer.count;
i = -1;
if (spi_transfer(&mc13783_transfer) && wait_for_transfer_complete())
i = count - mc13783_transfer.count;
mutex_unlock(&mc13783_spi_mutex);
return i;
}
int mc13783_write_regs(const unsigned char *regs, uint32_t *buffer,
int count)
{
int i;
for (i = 0; i < count; i++)
{
unsigned reg = regs[i];
if (reg >= MC13783_NUM_REGS)
return -1;
buffer[i] = (1 << 31) | (reg << 25) | (buffer[i] & 0xffffff);
}
mutex_lock(&mc13783_spi_mutex);
mc13783_transfer.txbuf = buffer;
mc13783_transfer.rxbuf = NULL;
mc13783_transfer.count = count;
i = -1;
if (spi_transfer(&mc13783_transfer) && wait_for_transfer_complete())
i = count - mc13783_transfer.count;
mutex_unlock(&mc13783_spi_mutex);
return i;
}
#if 0 /* Not needed right now */
bool mc13783_read_async(struct spi_transfer_desc *xfer,
const unsigned char *regs, uint32_t *buffer,
int count, spi_transfer_cb_fn_type callback)
{
int i;
for (i = 0; i < count; i++)
{
unsigned reg = regs[i];
if (reg >= MC13783_NUM_REGS)
return false;
buffer[i] = reg << 25;
}
xfer->node = &mc13783_spi;
xfer->txbuf = buffer;
xfer->rxbuf = buffer;
xfer->count = count;
xfer->callback = callback;
return spi_transfer(xfer);
}
#endif
bool mc13783_write_async(struct spi_transfer_desc *xfer,
const unsigned char *regs, uint32_t *buffer,
int count, spi_transfer_cb_fn_type callback)
{
int i;
for (i = 0; i < count; i++)
{
unsigned reg = regs[i];
if (reg >= MC13783_NUM_REGS)
return false;
buffer[i] = (1 << 31) | (reg << 25) | (buffer[i] & 0xffffff);
}
xfer->node = &mc13783_spi;
xfer->txbuf = buffer;
xfer->rxbuf = NULL;
xfer->count = count;
xfer->callback = callback;
return spi_transfer(xfer);
}

View file

@ -38,19 +38,18 @@ static __attribute__((interrupt("IRQ"))) void CSPI3_HANDLER(void);
#endif
/* State data associatated with each CSPI module */
static struct spi_module_descriptor
static struct spi_module_desc
{
struct cspi_map * const base;
int enab;
struct spi_node *last;
enum IMX31_CG_LIST cg;
enum IMX31_INT_LIST ints;
int byte_size;
void (*handler)(void);
struct mutex m;
struct wakeup w;
struct spi_transfer *trans;
int rxcount;
struct cspi_map * const base; /* CSPI module address */
struct spi_transfer_desc *head; /* Running job */
struct spi_transfer_desc *tail; /* Most recent job added */
const struct spi_node *last_node; /* Last node used for module */
void (*handler)(void); /* Interrupt handler */
int rxcount; /* Independent copy of txcount */
int8_t enab; /* Enable count */
int8_t byte_size; /* Size of transfers in bytes */
int8_t cg; /* Clock-gating value */
int8_t ints; /* AVIC vector number */
} spi_descs[SPI_NUM_CSPI] =
/* Init non-zero members */
{
@ -80,93 +79,224 @@ static struct spi_module_descriptor
#endif
};
/* Reset the module */
static void spi_reset(struct spi_module_desc * const desc)
{
/* Reset by leaving it disabled */
struct cspi_map * const base = desc->base;
base->conreg &= ~CSPI_CONREG_EN;
}
/* Write the context for the node and remember it to avoid unneeded reconfigure */
static bool spi_set_context(struct spi_module_desc *desc,
struct spi_transfer_desc *xfer)
{
const struct spi_node * const node = xfer->node;
struct cspi_map * const base = desc->base;
if (desc->enab == 0)
return false;
if (node == desc->last_node)
return true;
/* Errata says CSPI should be disabled when writing PERIODREG. */
base->conreg &= ~CSPI_CONREG_EN;
/* Switch the module's node */
desc->last_node = node;
desc->byte_size = (((node->conreg >> 8) & 0x1f) + 1 + 7) / 8 - 1;
/* Set the wait-states */
base->periodreg = node->periodreg & 0xffff;
/* Keep reserved and start bits cleared. Keep enabled bit. */
base->conreg =
(node->conreg & ~(0xfcc8e000 | CSPI_CONREG_XCH | CSPI_CONREG_SMC));
return true;
}
/* Fill the TX fifo. Returns the number of remaining words. */
static int tx_fill_fifo(struct spi_module_desc * const desc,
struct cspi_map * const base,
struct spi_transfer_desc * const xfer)
{
int count = xfer->count;
int size = desc->byte_size;
while ((base->statreg & CSPI_STATREG_TF) == 0)
{
uint32_t word = 0;
switch (size & 3)
{
case 3:
word = *(unsigned char *)(xfer->txbuf + 3) << 24;
case 2:
word |= *(unsigned char *)(xfer->txbuf + 2) << 16;
case 1:
word |= *(unsigned char *)(xfer->txbuf + 1) << 8;
case 0:
word |= *(unsigned char *)(xfer->txbuf + 0);
}
xfer->txbuf += size + 1; /* Increment buffer */
base->txdata = word; /* Write to FIFO */
if (--count == 0)
break;
}
xfer->count = count;
return count;
}
/* Start a transfer on the SPI */
static bool start_transfer(struct spi_module_desc * const desc,
struct spi_transfer_desc * const xfer)
{
struct cspi_map * const base = desc->base;
unsigned long intreg;
if (!spi_set_context(desc, xfer))
return false;
base->conreg |= CSPI_CONREG_EN; /* Enable module */
desc->rxcount = xfer->count;
intreg = (xfer->count < 8) ?
CSPI_INTREG_TCEN : /* Trans. complete: TX will run out in prefill */
CSPI_INTREG_THEN; /* INT when TX half-empty */
intreg |= (xfer->count < 4) ?
CSPI_INTREG_RREN : /* Must grab data on every word */
CSPI_INTREG_RHEN; /* Enough data to wait for half-full */
tx_fill_fifo(desc, base, xfer);
base->statreg = CSPI_STATREG_TC; /* Ack 'complete' */
base->intreg = intreg; /* Enable interrupts */
base->conreg |= CSPI_CONREG_XCH; /* Begin transfer */
return true;
}
/* Common code for interrupt handlers */
static void spi_interrupt(enum spi_module_number spi)
{
struct spi_module_descriptor *desc = &spi_descs[spi];
struct spi_module_desc *desc = &spi_descs[spi];
struct cspi_map * const base = desc->base;
struct spi_transfer *trans = desc->trans;
unsigned long intreg = base->intreg;
struct spi_transfer_desc *xfer = desc->head;
int inc = desc->byte_size + 1;
if (desc->rxcount > 0)
/* Data received - empty out RXFIFO */
while ((base->statreg & CSPI_STATREG_RR) != 0)
{
/* Data received - empty out RXFIFO */
while ((base->statreg & CSPI_STATREG_RR) != 0)
{
uint32_t word = base->rxdata;
uint32_t word = base->rxdata;
if (desc->rxcount <= 0)
continue;
if (xfer->rxbuf != NULL)
{
/* There is a receive buffer */
switch (desc->byte_size & 3)
{
case 3:
*(unsigned char *)(trans->rxbuf + 3) = word >> 24;
*(unsigned char *)(xfer->rxbuf + 3) = word >> 24;
case 2:
*(unsigned char *)(trans->rxbuf + 2) = word >> 16;
*(unsigned char *)(xfer->rxbuf + 2) = word >> 16;
case 1:
*(unsigned char *)(trans->rxbuf + 1) = word >> 8;
*(unsigned char *)(xfer->rxbuf + 1) = word >> 8;
case 0:
*(unsigned char *)(trans->rxbuf + 0) = word;
*(unsigned char *)(xfer->rxbuf + 0) = word;
}
trans->rxbuf += inc;
xfer->rxbuf += inc;
}
if (--desc->rxcount < 4)
if (--desc->rxcount < 4)
{
if (desc->rxcount == 0)
{
unsigned long intreg = base->intreg;
if (desc->rxcount <= 0)
{
/* No more to receive - stop RX interrupts */
intreg &= ~(CSPI_INTREG_RHEN | CSPI_INTREG_RREN);
base->intreg = intreg;
break;
}
else if (!(intreg & CSPI_INTREG_RREN))
{
/* < 4 words expected - switch to RX ready */
intreg &= ~CSPI_INTREG_RHEN;
base->intreg = intreg | CSPI_INTREG_RREN;
}
/* No more to receive - stop RX interrupts */
intreg &= ~(CSPI_INTREG_RHEN | CSPI_INTREG_RREN);
base->intreg = intreg;
}
else if (intreg & CSPI_INTREG_RHEN)
{
/* < 4 words expected - switch to RX ready */
intreg &= ~CSPI_INTREG_RHEN;
intreg |= CSPI_INTREG_RREN;
base->intreg = intreg;
}
}
}
if (trans->count > 0)
if (xfer->count > 0)
{
/* Data to transmit - fill TXFIFO or write until exhausted */
while ((base->statreg & CSPI_STATREG_TF) == 0)
{
uint32_t word = 0;
/* Data to transmit - fill TXFIFO or write until exhausted. */
if (tx_fill_fifo(desc, base, xfer) != 0)
return;
switch (desc->byte_size & 3)
{
case 3:
word = *(unsigned char *)(trans->txbuf + 3) << 24;
case 2:
word |= *(unsigned char *)(trans->txbuf + 2) << 16;
case 1:
word |= *(unsigned char *)(trans->txbuf + 1) << 8;
case 0:
word |= *(unsigned char *)(trans->txbuf + 0);
}
trans->txbuf += inc;
base->txdata = word;
if (--trans->count <= 0)
{
/* Out of data - stop TX interrupts */
base->intreg &= ~CSPI_INTREG_THEN;
break;
}
}
/* Out of data - stop TX interrupts, enable TC interrupt. */
intreg &= ~CSPI_INTREG_THEN;
intreg |= CSPI_INTREG_TCEN;
base->intreg = intreg;
}
/* If all interrupts have been remasked - we're done */
if (base->intreg == 0)
if ((intreg & CSPI_INTREG_TCEN) && (base->statreg & CSPI_STATREG_TC))
{
base->statreg = CSPI_STATREG_TC | CSPI_STATREG_BO;
wakeup_signal(&desc->w);
/* Outbound transfer is complete. */
intreg &= ~CSPI_INTREG_TCEN;
base->intreg = intreg;
base->statreg = CSPI_STATREG_TC; /* Ack 'complete' */
}
if (intreg != 0)
return;
/* All interrupts are masked; we're done with current transfer. */
for (;;)
{
struct spi_transfer_desc *next = xfer->next;
spi_transfer_cb_fn_type callback = xfer->callback;
xfer->next = NULL;
base->conreg &= ~CSPI_CONREG_EN; /* Disable module */
if (next == xfer)
{
/* Last job on queue */
desc->head = NULL;
if (callback != NULL)
callback(xfer);
/* Callback may have restarted transfers. */
}
else
{
/* Queue next job. */
desc->head = next;
if (callback != NULL)
callback(xfer);
if (!start_transfer(desc, next))
{
xfer = next;
xfer->count = -1;
continue; /* Failed: try next */
}
}
break;
}
}
@ -192,105 +322,50 @@ static __attribute__((interrupt("IRQ"))) void CSPI3_HANDLER(void)
}
#endif
/* Write the context for the node and remember it to avoid unneeded reconfigure */
static bool spi_set_context(struct spi_node *node,
struct spi_module_descriptor *desc)
{
struct cspi_map * const base = desc->base;
if ((base->conreg & CSPI_CONREG_EN) == 0)
return false;
if (node != desc->last)
{
/* Switch the module's node */
desc->last = node;
desc->byte_size = (((node->conreg >> 8) & 0x1f) + 1 + 7) / 8 - 1;
/* Keep reserved and start bits cleared. Keep enabled bit. */
base->conreg =
(node->conreg & ~(0xfcc8e000 | CSPI_CONREG_XCH | CSPI_CONREG_SMC))
| CSPI_CONREG_EN;
/* Set the wait-states */
base->periodreg = node->periodreg & 0xffff;
/* Clear out any spuriously-pending interrupts */
base->statreg = CSPI_STATREG_TC | CSPI_STATREG_BO;
}
return true;
}
static void spi_reset(struct cspi_map * const base)
{
/* Reset */
base->conreg &= ~CSPI_CONREG_EN;
base->conreg |= CSPI_CONREG_EN;
base->intreg = 0;
base->statreg = CSPI_STATREG_TC | CSPI_STATREG_BO;
}
/* Initialize each of the used SPI descriptors */
/* Initialize the SPI driver */
void spi_init(void)
{
int i;
unsigned i;
for (i = 0; i < SPI_NUM_CSPI; i++)
{
struct spi_module_descriptor * const desc = &spi_descs[i];
mutex_init(&desc->m);
wakeup_init(&desc->w);
struct spi_module_desc * const desc = &spi_descs[i];
ccm_module_clock_gating(desc->cg, CGM_ON_RUN_WAIT);
spi_reset(desc);
ccm_module_clock_gating(desc->cg, CGM_OFF);
}
}
/* Get mutually-exclusive access to the node */
void spi_lock(struct spi_node *node)
{
mutex_lock(&spi_descs[node->num].m);
}
/* Release mutual exclusion */
void spi_unlock(struct spi_node *node)
{
mutex_unlock(&spi_descs[node->num].m);
}
/* Enable the specified module for the node */
void spi_enable_module(struct spi_node *node)
void spi_enable_module(const struct spi_node *node)
{
struct spi_module_descriptor * const desc = &spi_descs[node->num];
mutex_lock(&desc->m);
struct spi_module_desc * const desc = &spi_descs[node->num];
if (++desc->enab == 1)
{
/* First enable for this module */
struct cspi_map * const base = desc->base;
/* Enable clock-gating register */
ccm_module_clock_gating(desc->cg, CGM_ON_RUN_WAIT);
/* Reset */
spi_reset(base);
desc->last = NULL;
spi_reset(desc);
desc->last_node = NULL;
/* Enable interrupt at controller level */
avic_enable_int(desc->ints, INT_TYPE_IRQ, INT_PRIO_DEFAULT,
desc->handler);
}
mutex_unlock(&desc->m);
}
/* Disabled the specified module for the node */
void spi_disable_module(struct spi_node *node)
/* Disable the specified module for the node */
void spi_disable_module(const struct spi_node *node)
{
struct spi_module_descriptor * const desc = &spi_descs[node->num];
mutex_lock(&desc->m);
struct spi_module_desc * const desc = &spi_descs[node->num];
if (desc->enab > 0 && --desc->enab == 0)
{
/* Last enable for this module */
struct cspi_map * const base = desc->base;
/* Wait for outstanding transactions */
while (*(void ** volatile)&desc->head != NULL);
/* Disable interrupt at controller level */
avic_disable_int(desc->ints);
@ -300,53 +375,57 @@ void spi_disable_module(struct spi_node *node)
/* Disable interface clock */
ccm_module_clock_gating(desc->cg, CGM_OFF);
}
mutex_unlock(&desc->m);
}
/* Send and/or receive data on the specified node */
int spi_transfer(struct spi_node *node, struct spi_transfer *trans)
bool spi_transfer(struct spi_transfer_desc *xfer)
{
struct spi_module_descriptor * const desc = &spi_descs[node->num];
int retval;
bool retval;
struct spi_module_desc * desc;
int oldlevel;
if (trans->count <= 0)
return true;
if (xfer->count == 0)
return true; /* No data? No problem. */
mutex_lock(&desc->m);
retval = spi_set_context(node, desc);
if (retval)
if (xfer->count < 0 || xfer->next != NULL || xfer->node == NULL)
{
struct cspi_map * const base = desc->base;
unsigned long intreg;
desc->trans = trans;
desc->rxcount = trans->count;
/* Enable needed interrupts - FIFOs will start filling */
intreg = CSPI_INTREG_THEN;
intreg |= (trans->count < 4) ?
CSPI_INTREG_RREN : /* Must grab data on every word */
CSPI_INTREG_RHEN; /* Enough data to wait for half-full */
base->intreg = intreg;
/* Start transfer */
base->conreg |= CSPI_CONREG_XCH;
if (wakeup_wait(&desc->w, HZ) != OBJ_WAIT_SUCCEEDED)
{
base->intreg = 0; /* Stop SPI ints */
spi_reset(base); /* Reset module (esp. to empty FIFOs) */
desc->last = NULL; /* Force reconfigure */
retval = false;
}
/* Can't pass a busy descriptor, requires a node and negative size
* is invalid to pass. */
return false;
}
mutex_unlock(&desc->m);
oldlevel = disable_irq_save();
desc = &spi_descs[xfer->node->num];
if (desc->head == NULL)
{
/* No transfers in progress; start interface. */
retval = start_transfer(desc, xfer);
if (retval)
{
/* Start ok: actually put it in the queue. */
desc->head = xfer;
desc->tail = xfer;
xfer->next = xfer; /* First, self-reference terminate */
}
else
{
xfer->count = -1; /* Signal error */
}
}
else
{
/* Already running: simply add to end and the final INT on the
* running transfer will pick it up. */
desc->tail->next = xfer; /* Add to tail */
desc->tail = xfer; /* New tail */
xfer->next = xfer; /* Self-reference terminate */
retval = true;
}
restore_irq(oldlevel);
return retval;
}

View file

@ -61,29 +61,39 @@ struct spi_node
unsigned long periodreg; /* CSPI periodreg setup */
};
struct spi_transfer
struct spi_transfer_desc;
typedef void (*spi_transfer_cb_fn_type)(struct spi_transfer_desc *);
struct spi_transfer_desc
{
const void *txbuf;
void *rxbuf;
int count;
const struct spi_node *node; /* node for this transfer */
const void *txbuf; /* buffer to transmit */
void *rxbuf; /* buffer to receive */
int count; /* number of elements */
spi_transfer_cb_fn_type callback; /* function to call when done */
struct spi_transfer_desc *next; /* next transfer queued,
spi layer sets this */
};
/* NOTE: SPI updates the descrptor during the operation. Do not write
* to it until completion notification is received. If no callback is
* specified, the caller must find a way to ensure integrity.
*
* -1 will be written to 'count' if an error occurs, otherwise it will
* be zero when completed.
*/
/* One-time init of SPI driver */
void spi_init(void);
/* Enable the specified module for the node */
void spi_enable_module(struct spi_node *node);
void spi_enable_module(const struct spi_node *node);
/* Disabled the specified module for the node */
void spi_disable_module(struct spi_node *node);
void spi_disable_module(const struct spi_node *node);
/* Lock module mutex */
void spi_lock(struct spi_node *node);
/* Unlock module mutex */
void spi_unlock(struct spi_node *node);
/* Send and/or receive data on the specified node */
int spi_transfer(struct spi_node *node, struct spi_transfer *trans);
/* Send and/or receive data on the specified node (asychronous) */
bool spi_transfer(struct spi_transfer_desc *xfer);
#endif /* SPI_IMX31_H */