msm: dma: Convert spinlocks to mutexes for new clock apis

Converting the dma driver to mutexes is a requirement for using the new
clk_prepare and clk_unprepare apis. In order to deal with callers who
run in atomic context, refactor the enqueuing code to run in a
workqueue. Introduce a new spinlock that only protects the command lists
and not the hardware so the enqueue routine can add to the lists in
atomic context.

Change-Id: I4f2ecdb8b6d2a25e6069bf9da61f9cf9d5c9e2b0
Signed-off-by: Jeff Ohlstein <johlstei@codeaurora.org>
This commit is contained in:
Jeff Ohlstein 2012-06-28 19:42:03 -07:00 committed by Stephen Boyd
parent 97cdeb84bf
commit ee5a467055
2 changed files with 146 additions and 69 deletions

View file

@ -59,18 +59,21 @@ struct msm_dmov_conf {
int channel_active;
int sd;
size_t sd_size;
struct list_head staged_commands[MSM_DMOV_CHANNEL_COUNT];
struct list_head ready_commands[MSM_DMOV_CHANNEL_COUNT];
struct list_head active_commands[MSM_DMOV_CHANNEL_COUNT];
spinlock_t lock;
struct mutex lock;
spinlock_t list_lock;
unsigned int irq;
struct clk *clk;
struct clk *pclk;
struct clk *ebiclk;
unsigned int clk_ctl;
struct timer_list timer;
struct delayed_work work;
struct workqueue_struct *cmd_wq;
};
static void msm_dmov_clock_timer(unsigned long);
static void msm_dmov_clock_work(struct work_struct *);
static int msm_dmov_clk_toggle(int, int);
#ifdef CONFIG_ARCH_MSM8X60
@ -163,15 +166,19 @@ static struct msm_dmov_conf dmov_conf[] = {
{
.crci_conf = adm0_crci_conf,
.chan_conf = adm0_chan_conf,
.lock = __SPIN_LOCK_UNLOCKED(dmov_lock),
.lock = __MUTEX_INITIALIZER(dmov_conf[0].lock),
.list_lock = __SPIN_LOCK_UNLOCKED(dmov_list_lock),
.clk_ctl = CLK_DIS,
.timer = TIMER_INITIALIZER(msm_dmov_clock_timer, 0, 0),
.work = __DELAYED_WORK_INITIALIZER(dmov_conf[0].work,
msm_dmov_clock_work),
}, {
.crci_conf = adm1_crci_conf,
.chan_conf = adm1_chan_conf,
.lock = __SPIN_LOCK_UNLOCKED(dmov_lock),
.lock = __MUTEX_INITIALIZER(dmov_conf[1].lock),
.list_lock = __SPIN_LOCK_UNLOCKED(dmov_list_lock),
.clk_ctl = CLK_DIS,
.timer = TIMER_INITIALIZER(msm_dmov_clock_timer, 0, 1),
.work = __DELAYED_WORK_INITIALIZER(dmov_conf[1].work,
msm_dmov_clock_work),
}
};
#else
@ -179,9 +186,11 @@ static struct msm_dmov_conf dmov_conf[] = {
{
.crci_conf = NULL,
.chan_conf = NULL,
.lock = __SPIN_LOCK_UNLOCKED(dmov_lock),
.lock = __MUTEX_INITIALIZER(dmov_conf[0].lock),
.list_lock = __SPIN_LOCK_UNLOCKED(dmov_list_lock),
.clk_ctl = CLK_DIS,
.timer = TIMER_INITIALIZER(msm_dmov_clock_timer, 0, 0),
.work = __DELAYED_WORK_INITIALIZER(dmov_conf[0].work,
msm_dmov_clock_work),
}
};
#endif
@ -257,60 +266,123 @@ err:
return ret;
}
static void msm_dmov_clock_timer(unsigned long adm)
static void msm_dmov_clock_work(struct work_struct *work)
{
unsigned long irq_flags;
spin_lock_irqsave(&dmov_conf[adm].lock, irq_flags);
if (dmov_conf[adm].clk_ctl == CLK_TO_BE_DIS) {
BUG_ON(dmov_conf[adm].channel_active);
struct msm_dmov_conf *conf =
container_of(to_delayed_work(work), struct msm_dmov_conf, work);
int adm = DMOV_IRQ_TO_ADM(conf->irq);
mutex_lock(&conf->lock);
if (conf->clk_ctl == CLK_TO_BE_DIS) {
BUG_ON(conf->channel_active);
msm_dmov_clk_toggle(adm, 0);
dmov_conf[adm].clk_ctl = CLK_DIS;
conf->clk_ctl = CLK_DIS;
}
spin_unlock_irqrestore(&dmov_conf[adm].lock, irq_flags);
mutex_unlock(&conf->lock);
}
void msm_dmov_enqueue_cmd_ext(unsigned id, struct msm_dmov_cmd *cmd)
enum {
NOFLUSH = 0,
GRACEFUL,
NONGRACEFUL,
};
/* Caller must hold the list lock */
static struct msm_dmov_cmd *start_ready_cmd(unsigned ch, int adm)
{
unsigned long irq_flags;
unsigned int status;
struct msm_dmov_cmd *cmd;
if (list_empty(&dmov_conf[adm].ready_commands[ch]))
return NULL;
cmd = list_entry(dmov_conf[adm].ready_commands[ch].next, typeof(*cmd),
list);
list_del(&cmd->list);
if (cmd->exec_func)
cmd->exec_func(cmd);
list_add_tail(&cmd->list, &dmov_conf[adm].active_commands[ch]);
if (!dmov_conf[adm].channel_active)
enable_irq(dmov_conf[adm].irq);
dmov_conf[adm].channel_active |= BIT(ch);
PRINT_IO("msm dmov enqueue command, %x, ch %d\n", cmd->cmdptr, ch);
writel_relaxed(cmd->cmdptr, DMOV_REG(DMOV_CMD_PTR(ch), adm));
return cmd;
}
static void msm_dmov_enqueue_cmd_ext_work(struct work_struct *work)
{
struct msm_dmov_cmd *cmd =
container_of(work, struct msm_dmov_cmd, work);
unsigned id = cmd->id;
unsigned status;
unsigned long flags;
int adm = DMOV_ID_TO_ADM(id);
int ch = DMOV_ID_TO_CHAN(id);
spin_lock_irqsave(&dmov_conf[adm].lock, irq_flags);
mutex_lock(&dmov_conf[adm].lock);
if (dmov_conf[adm].clk_ctl == CLK_DIS) {
status = msm_dmov_clk_toggle(adm, 1);
if (status != 0)
goto error;
} else if (dmov_conf[adm].clk_ctl == CLK_TO_BE_DIS)
del_timer(&dmov_conf[adm].timer);
}
dmov_conf[adm].clk_ctl = CLK_EN;
spin_lock_irqsave(&dmov_conf[adm].list_lock, flags);
cmd = list_entry(dmov_conf[adm].staged_commands[ch].next, typeof(*cmd),
list);
list_del(&cmd->list);
list_add_tail(&cmd->list, &dmov_conf[adm].ready_commands[ch]);
status = readl_relaxed(DMOV_REG(DMOV_STATUS(ch), adm));
if (status & DMOV_STATUS_CMD_PTR_RDY) {
PRINT_IO("msm_dmov_enqueue_cmd(%d), start command, status %x\n",
id, status);
if (cmd->exec_func)
cmd->exec_func(cmd);
list_add_tail(&cmd->list, &dmov_conf[adm].active_commands[ch]);
if (!dmov_conf[adm].channel_active)
enable_irq(dmov_conf[adm].irq);
dmov_conf[adm].channel_active |= 1U << ch;
PRINT_IO("Writing %x exactly to register", cmd->cmdptr);
writel_relaxed(cmd->cmdptr, DMOV_REG(DMOV_CMD_PTR(ch), adm));
} else {
if (!dmov_conf[adm].channel_active) {
dmov_conf[adm].clk_ctl = CLK_TO_BE_DIS;
mod_timer(&dmov_conf[adm].timer, jiffies + HZ);
cmd = start_ready_cmd(ch, adm);
/*
* We added something to the ready list, and still hold the
* list lock. Thus, no need to check for cmd == NULL
*/
if (cmd->toflush) {
int flush = (cmd->toflush == GRACEFUL) ? 1 << 31 : 0;
writel_relaxed(flush, DMOV_REG(DMOV_FLUSH0(ch), adm));
}
if (list_empty(&dmov_conf[adm].active_commands[ch]))
} else {
cmd->toflush = 0;
if (list_empty(&dmov_conf[adm].active_commands[ch]) &&
!list_empty(&dmov_conf[adm].ready_commands[ch]))
PRINT_ERROR("msm_dmov_enqueue_cmd_ext(%d), stalled, "
"status %x\n", id, status);
PRINT_IO("msm_dmov_enqueue_cmd(%d), enqueue command, status "
"%x\n", id, status);
list_add_tail(&cmd->list, &dmov_conf[adm].ready_commands[ch]);
}
if (!dmov_conf[adm].channel_active) {
dmov_conf[adm].clk_ctl = CLK_TO_BE_DIS;
schedule_delayed_work(&dmov_conf[adm].work, HZ);
}
spin_unlock_irqrestore(&dmov_conf[adm].list_lock, flags);
error:
spin_unlock_irqrestore(&dmov_conf[adm].lock, irq_flags);
mutex_unlock(&dmov_conf[adm].lock);
}
static void __msm_dmov_enqueue_cmd_ext(unsigned id, struct msm_dmov_cmd *cmd)
{
int adm = DMOV_ID_TO_ADM(id);
int ch = DMOV_ID_TO_CHAN(id);
unsigned long flags;
cmd->id = id;
cmd->toflush = 0;
spin_lock_irqsave(&dmov_conf[adm].list_lock, flags);
list_add_tail(&cmd->list, &dmov_conf[adm].staged_commands[ch]);
spin_unlock_irqrestore(&dmov_conf[adm].list_lock, flags);
queue_work(dmov_conf[adm].cmd_wq, &cmd->work);
}
void msm_dmov_enqueue_cmd_ext(unsigned id, struct msm_dmov_cmd *cmd)
{
INIT_WORK(&cmd->work, msm_dmov_enqueue_cmd_ext_work);
__msm_dmov_enqueue_cmd_ext(id, cmd);
}
EXPORT_SYMBOL(msm_dmov_enqueue_cmd_ext);
@ -318,8 +390,8 @@ void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd)
{
/* Disable callback function (for backwards compatibility) */
cmd->exec_func = NULL;
msm_dmov_enqueue_cmd_ext(id, cmd);
INIT_WORK(&cmd->work, msm_dmov_enqueue_cmd_ext_work);
__msm_dmov_enqueue_cmd_ext(id, cmd);
}
EXPORT_SYMBOL(msm_dmov_enqueue_cmd);
@ -329,14 +401,18 @@ void msm_dmov_flush(unsigned int id, int graceful)
int ch = DMOV_ID_TO_CHAN(id);
int adm = DMOV_ID_TO_ADM(id);
int flush = graceful ? DMOV_FLUSH_TYPE : 0;
spin_lock_irqsave(&dmov_conf[adm].lock, irq_flags);
struct msm_dmov_cmd *cmd;
spin_lock_irqsave(&dmov_conf[adm].list_lock, irq_flags);
/* XXX not checking if flush cmd sent already */
if (!list_empty(&dmov_conf[adm].active_commands[ch])) {
PRINT_IO("msm_dmov_flush(%d), send flush cmd\n", id);
writel_relaxed(flush, DMOV_REG(DMOV_FLUSH0(ch), adm));
}
list_for_each_entry(cmd, &dmov_conf[adm].staged_commands[ch], list)
cmd->toflush = graceful ? GRACEFUL : NONGRACEFUL;
/* spin_unlock_irqrestore has the necessary barrier */
spin_unlock_irqrestore(&dmov_conf[adm].lock, irq_flags);
spin_unlock_irqrestore(&dmov_conf[adm].list_lock, irq_flags);
}
EXPORT_SYMBOL(msm_dmov_flush);
@ -372,9 +448,10 @@ int msm_dmov_exec_cmd(unsigned id, unsigned int cmdptr)
cmd.dmov_cmd.exec_func = NULL;
cmd.id = id;
cmd.result = 0;
INIT_WORK_ONSTACK(&cmd.dmov_cmd.work, msm_dmov_enqueue_cmd_ext_work);
init_completion(&cmd.complete);
msm_dmov_enqueue_cmd(id, &cmd.dmov_cmd);
__msm_dmov_enqueue_cmd_ext(id, &cmd.dmov_cmd);
wait_for_completion_io(&cmd.complete);
if (cmd.result != 0x80000002) {
@ -398,7 +475,7 @@ static void fill_errdata(struct msm_dmov_errdata *errdata, int ch, int adm)
errdata->flush[5] = readl_relaxed(DMOV_REG(DMOV_FLUSH5(ch), adm));
}
static irqreturn_t msm_datamover_irq_handler(int irq, void *dev_id)
static irqreturn_t msm_dmov_isr(int irq, void *dev_id)
{
unsigned int int_status;
unsigned int mask;
@ -411,11 +488,12 @@ static irqreturn_t msm_datamover_irq_handler(int irq, void *dev_id)
struct msm_dmov_cmd *cmd;
int adm = DMOV_IRQ_TO_ADM(irq);
spin_lock_irqsave(&dmov_conf[adm].lock, irq_flags);
mutex_lock(&dmov_conf[adm].lock);
/* read and clear isr */
int_status = readl_relaxed(DMOV_REG(DMOV_ISR, adm));
PRINT_FLOW("msm_datamover_irq_handler: DMOV_ISR %x\n", int_status);
spin_lock_irqsave(&dmov_conf[adm].list_lock, irq_flags);
while (int_status) {
mask = int_status & -int_status;
ch = fls(mask) - 1;
@ -483,51 +561,37 @@ static irqreturn_t msm_datamover_irq_handler(int irq, void *dev_id)
ch_status = readl_relaxed(DMOV_REG(DMOV_STATUS(ch),
adm));
PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
if ((ch_status & DMOV_STATUS_CMD_PTR_RDY) &&
!list_empty(&dmov_conf[adm].ready_commands[ch])) {
cmd = list_entry(dmov_conf[adm].
ready_commands[ch].next, typeof(*cmd),
list);
list_del(&cmd->list);
if (cmd->exec_func)
cmd->exec_func(cmd);
list_add_tail(&cmd->list,
&dmov_conf[adm].active_commands[ch]);
PRINT_FLOW("msm_datamover_irq_handler id %d,"
"start command\n", id);
writel_relaxed(cmd->cmdptr,
DMOV_REG(DMOV_CMD_PTR(ch), adm));
}
if (ch_status & DMOV_STATUS_CMD_PTR_RDY)
start_ready_cmd(ch, adm);
} while (ch_status & DMOV_STATUS_RSLT_VALID);
if (list_empty(&dmov_conf[adm].active_commands[ch]) &&
list_empty(&dmov_conf[adm].ready_commands[ch]))
list_empty(&dmov_conf[adm].ready_commands[ch]))
dmov_conf[adm].channel_active &= ~(1U << ch);
PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
}
spin_unlock_irqrestore(&dmov_conf[adm].list_lock, irq_flags);
if (!dmov_conf[adm].channel_active && valid) {
disable_irq_nosync(dmov_conf[adm].irq);
dmov_conf[adm].clk_ctl = CLK_TO_BE_DIS;
mod_timer(&dmov_conf[adm].timer, jiffies + HZ);
schedule_delayed_work(&dmov_conf[adm].work, HZ);
}
spin_unlock_irqrestore(&dmov_conf[adm].lock, irq_flags);
mutex_unlock(&dmov_conf[adm].lock);
return valid ? IRQ_HANDLED : IRQ_NONE;
}
static int msm_dmov_suspend_late(struct device *dev)
{
unsigned long irq_flags;
struct platform_device *pdev = to_platform_device(dev);
int adm = (pdev->id >= 0) ? pdev->id : 0;
spin_lock_irqsave(&dmov_conf[adm].lock, irq_flags);
mutex_lock(&dmov_conf[adm].lock);
if (dmov_conf[adm].clk_ctl == CLK_TO_BE_DIS) {
BUG_ON(dmov_conf[adm].channel_active);
del_timer(&dmov_conf[adm].timer);
msm_dmov_clk_toggle(adm, 0);
dmov_conf[adm].clk_ctl = CLK_DIS;
}
spin_unlock_irqrestore(&dmov_conf[adm].lock, irq_flags);
mutex_unlock(&dmov_conf[adm].lock);
return 0;
}
@ -642,12 +706,19 @@ static int msm_dmov_probe(struct platform_device *pdev)
if (!dmov_conf[adm].base)
return -ENOMEM;
ret = request_irq(dmov_conf[adm].irq, msm_datamover_irq_handler,
0, "msmdatamover", NULL);
dmov_conf[adm].cmd_wq = alloc_ordered_workqueue("dmov%d_wq", 0, adm);
if (!dmov_conf[adm].cmd_wq) {
PRINT_ERROR("Couldn't allocate ADM%d workqueue.\n", adm);
ret = -ENOMEM;
goto out_map;
}
ret = request_threaded_irq(dmov_conf[adm].irq, NULL, msm_dmov_isr,
IRQF_ONESHOT, "msmdatamover", NULL);
if (ret) {
PRINT_ERROR("Requesting ADM%d irq %d failed\n", adm,
dmov_conf[adm].irq);
goto out_map;
goto out_wq;
}
disable_irq(dmov_conf[adm].irq);
ret = msm_dmov_init_clocks(pdev);
@ -663,6 +734,7 @@ static int msm_dmov_probe(struct platform_device *pdev)
config_datamover(adm);
for (i = 0; i < MSM_DMOV_CHANNEL_COUNT; i++) {
INIT_LIST_HEAD(&dmov_conf[adm].staged_commands[i]);
INIT_LIST_HEAD(&dmov_conf[adm].ready_commands[i]);
INIT_LIST_HEAD(&dmov_conf[adm].active_commands[i]);
@ -675,6 +747,8 @@ static int msm_dmov_probe(struct platform_device *pdev)
return ret;
out_irq:
free_irq(dmov_conf[adm].irq, NULL);
out_wq:
destroy_workqueue(dmov_conf[adm].cmd_wq);
out_map:
iounmap(dmov_conf[adm].base);
return ret;

View file

@ -35,7 +35,10 @@ struct msm_dmov_cmd {
unsigned int result,
struct msm_dmov_errdata *err);
void (*exec_func)(struct msm_dmov_cmd *cmd);
struct work_struct work;
unsigned id; /* For internal use */
void *user; /* Pointer for caller's reference */
u8 toflush;
};
struct msm_dmov_pdata {