mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-11-07 04:09:21 +00:00
Merge branches 'dma40', 'pl08x', 'fsldma', 'imx' and 'intel-mid' into dmaengine
This commit is contained in:
commit
6391987d6f
19 changed files with 5001 additions and 437 deletions
|
@ -27,6 +27,8 @@
|
|||
|
||||
#define imx_has_dma_v1() (cpu_is_mx1() || cpu_is_mx21() || cpu_is_mx27())
|
||||
|
||||
#include <mach/dma.h>
|
||||
|
||||
#define IMX_DMA_CHANNELS 16
|
||||
|
||||
#define DMA_MODE_READ 0
|
||||
|
@ -96,12 +98,6 @@ int imx_dma_request(int channel, const char *name);
|
|||
|
||||
void imx_dma_free(int channel);
|
||||
|
||||
enum imx_dma_prio {
|
||||
DMA_PRIO_HIGH = 0,
|
||||
DMA_PRIO_MEDIUM = 1,
|
||||
DMA_PRIO_LOW = 2
|
||||
};
|
||||
|
||||
int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio);
|
||||
|
||||
#endif /* __MACH_DMA_V1_H__ */
|
||||
|
|
67
arch/arm/plat-mxc/include/mach/dma.h
Normal file
67
arch/arm/plat-mxc/include/mach/dma.h
Normal file
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
* Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef __ASM_ARCH_MXC_DMA_H__
|
||||
#define __ASM_ARCH_MXC_DMA_H__
|
||||
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/dmaengine.h>
|
||||
|
||||
/*
|
||||
* This enumerates peripheral types. Used for SDMA.
|
||||
*/
|
||||
enum sdma_peripheral_type {
|
||||
IMX_DMATYPE_SSI, /* MCU domain SSI */
|
||||
IMX_DMATYPE_SSI_SP, /* Shared SSI */
|
||||
IMX_DMATYPE_MMC, /* MMC */
|
||||
IMX_DMATYPE_SDHC, /* SDHC */
|
||||
IMX_DMATYPE_UART, /* MCU domain UART */
|
||||
IMX_DMATYPE_UART_SP, /* Shared UART */
|
||||
IMX_DMATYPE_FIRI, /* FIRI */
|
||||
IMX_DMATYPE_CSPI, /* MCU domain CSPI */
|
||||
IMX_DMATYPE_CSPI_SP, /* Shared CSPI */
|
||||
IMX_DMATYPE_SIM, /* SIM */
|
||||
IMX_DMATYPE_ATA, /* ATA */
|
||||
IMX_DMATYPE_CCM, /* CCM */
|
||||
IMX_DMATYPE_EXT, /* External peripheral */
|
||||
IMX_DMATYPE_MSHC, /* Memory Stick Host Controller */
|
||||
IMX_DMATYPE_MSHC_SP, /* Shared Memory Stick Host Controller */
|
||||
IMX_DMATYPE_DSP, /* DSP */
|
||||
IMX_DMATYPE_MEMORY, /* Memory */
|
||||
IMX_DMATYPE_FIFO_MEMORY,/* FIFO type Memory */
|
||||
IMX_DMATYPE_SPDIF, /* SPDIF */
|
||||
IMX_DMATYPE_IPU_MEMORY, /* IPU Memory */
|
||||
IMX_DMATYPE_ASRC, /* ASRC */
|
||||
IMX_DMATYPE_ESAI, /* ESAI */
|
||||
};
|
||||
|
||||
enum imx_dma_prio {
|
||||
DMA_PRIO_HIGH = 0,
|
||||
DMA_PRIO_MEDIUM = 1,
|
||||
DMA_PRIO_LOW = 2
|
||||
};
|
||||
|
||||
struct imx_dma_data {
|
||||
int dma_request; /* DMA request line */
|
||||
enum sdma_peripheral_type peripheral_type;
|
||||
int priority;
|
||||
};
|
||||
|
||||
static inline int imx_dma_is_ipu(struct dma_chan *chan)
|
||||
{
|
||||
return !strcmp(dev_name(chan->device->dev), "ipu-core");
|
||||
}
|
||||
|
||||
static inline int imx_dma_is_general_purpose(struct dma_chan *chan)
|
||||
{
|
||||
return !strcmp(dev_name(chan->device->dev), "imx-sdma") ||
|
||||
!strcmp(dev_name(chan->device->dev), "imx-dma");
|
||||
}
|
||||
|
||||
#endif
|
17
arch/arm/plat-mxc/include/mach/sdma.h
Normal file
17
arch/arm/plat-mxc/include/mach/sdma.h
Normal file
|
@ -0,0 +1,17 @@
|
|||
#ifndef __MACH_MXC_SDMA_H__
|
||||
#define __MACH_MXC_SDMA_H__
|
||||
|
||||
/**
|
||||
* struct sdma_platform_data - platform specific data for SDMA engine
|
||||
*
|
||||
* @sdma_version The version of this SDMA engine
|
||||
* @cpu_name used to generate the firmware name
|
||||
* @to_version CPU Tape out version
|
||||
*/
|
||||
struct sdma_platform_data {
|
||||
int sdma_version;
|
||||
char *cpu_name;
|
||||
int to_version;
|
||||
};
|
||||
|
||||
#endif /* __MACH_MXC_SDMA_H__ */
|
|
@ -1,137 +0,0 @@
|
|||
/*
|
||||
* Freescale MPC83XX / MPC85XX DMA Controller
|
||||
*
|
||||
* Copyright (c) 2009 Ira W. Snyder <iws@ovro.caltech.edu>
|
||||
*
|
||||
* This file is licensed under the terms of the GNU General Public License
|
||||
* version 2. This program is licensed "as is" without any warranty of any
|
||||
* kind, whether express or implied.
|
||||
*/
|
||||
|
||||
#ifndef __ARCH_POWERPC_ASM_FSLDMA_H__
|
||||
#define __ARCH_POWERPC_ASM_FSLDMA_H__
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/dmaengine.h>
|
||||
|
||||
/*
|
||||
* Definitions for the Freescale DMA controller's DMA_SLAVE implemention
|
||||
*
|
||||
* The Freescale DMA_SLAVE implementation was designed to handle many-to-many
|
||||
* transfers. An example usage would be an accelerated copy between two
|
||||
* scatterlists. Another example use would be an accelerated copy from
|
||||
* multiple non-contiguous device buffers into a single scatterlist.
|
||||
*
|
||||
* A DMA_SLAVE transaction is defined by a struct fsl_dma_slave. This
|
||||
* structure contains a list of hardware addresses that should be copied
|
||||
* to/from the scatterlist passed into device_prep_slave_sg(). The structure
|
||||
* also has some fields to enable hardware-specific features.
|
||||
*/
|
||||
|
||||
/**
|
||||
* struct fsl_dma_hw_addr
|
||||
* @entry: linked list entry
|
||||
* @address: the hardware address
|
||||
* @length: length to transfer
|
||||
*
|
||||
* Holds a single physical hardware address / length pair for use
|
||||
* with the DMAEngine DMA_SLAVE API.
|
||||
*/
|
||||
struct fsl_dma_hw_addr {
|
||||
struct list_head entry;
|
||||
|
||||
dma_addr_t address;
|
||||
size_t length;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct fsl_dma_slave
|
||||
* @addresses: a linked list of struct fsl_dma_hw_addr structures
|
||||
* @request_count: value for DMA request count
|
||||
* @src_loop_size: setup and enable constant source-address DMA transfers
|
||||
* @dst_loop_size: setup and enable constant destination address DMA transfers
|
||||
* @external_start: enable externally started DMA transfers
|
||||
* @external_pause: enable externally paused DMA transfers
|
||||
*
|
||||
* Holds a list of address / length pairs for use with the DMAEngine
|
||||
* DMA_SLAVE API implementation for the Freescale DMA controller.
|
||||
*/
|
||||
struct fsl_dma_slave {
|
||||
|
||||
/* List of hardware address/length pairs */
|
||||
struct list_head addresses;
|
||||
|
||||
/* Support for extra controller features */
|
||||
unsigned int request_count;
|
||||
unsigned int src_loop_size;
|
||||
unsigned int dst_loop_size;
|
||||
bool external_start;
|
||||
bool external_pause;
|
||||
};
|
||||
|
||||
/**
|
||||
* fsl_dma_slave_append - add an address/length pair to a struct fsl_dma_slave
|
||||
* @slave: the &struct fsl_dma_slave to add to
|
||||
* @address: the hardware address to add
|
||||
* @length: the length of bytes to transfer from @address
|
||||
*
|
||||
* Add a hardware address/length pair to a struct fsl_dma_slave. Returns 0 on
|
||||
* success, -ERRNO otherwise.
|
||||
*/
|
||||
static inline int fsl_dma_slave_append(struct fsl_dma_slave *slave,
|
||||
dma_addr_t address, size_t length)
|
||||
{
|
||||
struct fsl_dma_hw_addr *addr;
|
||||
|
||||
addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
|
||||
if (!addr)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&addr->entry);
|
||||
addr->address = address;
|
||||
addr->length = length;
|
||||
|
||||
list_add_tail(&addr->entry, &slave->addresses);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* fsl_dma_slave_free - free a struct fsl_dma_slave
|
||||
* @slave: the struct fsl_dma_slave to free
|
||||
*
|
||||
* Free a struct fsl_dma_slave and all associated address/length pairs
|
||||
*/
|
||||
static inline void fsl_dma_slave_free(struct fsl_dma_slave *slave)
|
||||
{
|
||||
struct fsl_dma_hw_addr *addr, *tmp;
|
||||
|
||||
if (slave) {
|
||||
list_for_each_entry_safe(addr, tmp, &slave->addresses, entry) {
|
||||
list_del(&addr->entry);
|
||||
kfree(addr);
|
||||
}
|
||||
|
||||
kfree(slave);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* fsl_dma_slave_alloc - allocate a struct fsl_dma_slave
|
||||
* @gfp: the flags to pass to kmalloc when allocating this structure
|
||||
*
|
||||
* Allocate a struct fsl_dma_slave for use by the DMA_SLAVE API. Returns a new
|
||||
* struct fsl_dma_slave on success, or NULL on failure.
|
||||
*/
|
||||
static inline struct fsl_dma_slave *fsl_dma_slave_alloc(gfp_t gfp)
|
||||
{
|
||||
struct fsl_dma_slave *slave;
|
||||
|
||||
slave = kzalloc(sizeof(*slave), gfp);
|
||||
if (!slave)
|
||||
return NULL;
|
||||
|
||||
INIT_LIST_HEAD(&slave->addresses);
|
||||
return slave;
|
||||
}
|
||||
|
||||
#endif /* __ARCH_POWERPC_ASM_FSLDMA_H__ */
|
|
@ -49,6 +49,14 @@ config INTEL_MID_DMAC
|
|||
config ASYNC_TX_DISABLE_CHANNEL_SWITCH
|
||||
bool
|
||||
|
||||
config AMBA_PL08X
|
||||
bool "ARM PrimeCell PL080 or PL081 support"
|
||||
depends on ARM_AMBA && EXPERIMENTAL
|
||||
select DMA_ENGINE
|
||||
help
|
||||
Platform has a PL08x DMAC device
|
||||
which can provide DMA engine support
|
||||
|
||||
config INTEL_IOATDMA
|
||||
tristate "Intel I/OAT DMA support"
|
||||
depends on PCI && X86
|
||||
|
@ -195,6 +203,22 @@ config PCH_DMA
|
|||
help
|
||||
Enable support for the Topcliff PCH DMA engine.
|
||||
|
||||
config IMX_SDMA
|
||||
tristate "i.MX SDMA support"
|
||||
depends on ARCH_MX25 || ARCH_MX3 || ARCH_MX5
|
||||
select DMA_ENGINE
|
||||
help
|
||||
Support the i.MX SDMA engine. This engine is integrated into
|
||||
Freescale i.MX25/31/35/51 chips.
|
||||
|
||||
config IMX_DMA
|
||||
tristate "i.MX DMA support"
|
||||
depends on ARCH_MX1 || ARCH_MX21 || MACH_MX27
|
||||
select DMA_ENGINE
|
||||
help
|
||||
Support the i.MX DMA engine. This engine is integrated into
|
||||
Freescale i.MX1/21/27 chips.
|
||||
|
||||
config DMA_ENGINE
|
||||
bool
|
||||
|
||||
|
|
|
@ -21,7 +21,10 @@ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
|
|||
obj-$(CONFIG_SH_DMAE) += shdma.o
|
||||
obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
|
||||
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
|
||||
obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
|
||||
obj-$(CONFIG_IMX_DMA) += imx-dma.o
|
||||
obj-$(CONFIG_TIMB_DMA) += timb_dma.o
|
||||
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
|
||||
obj-$(CONFIG_PL330_DMA) += pl330.o
|
||||
obj-$(CONFIG_PCH_DMA) += pch_dma.o
|
||||
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
|
||||
|
|
2167
drivers/dma/amba-pl08x.c
Normal file
2167
drivers/dma/amba-pl08x.c
Normal file
File diff suppressed because it is too large
Load diff
|
@ -690,8 +690,12 @@ int dma_async_device_register(struct dma_device *device)
|
|||
!device->device_prep_dma_memset);
|
||||
BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
|
||||
!device->device_prep_dma_interrupt);
|
||||
BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
|
||||
!device->device_prep_dma_sg);
|
||||
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
|
||||
!device->device_prep_slave_sg);
|
||||
BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
|
||||
!device->device_prep_dma_cyclic);
|
||||
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
|
||||
!device->device_control);
|
||||
|
||||
|
|
|
@ -35,9 +35,10 @@
|
|||
#include <linux/dmapool.h>
|
||||
#include <linux/of_platform.h>
|
||||
|
||||
#include <asm/fsldma.h>
|
||||
#include "fsldma.h"
|
||||
|
||||
static const char msg_ld_oom[] = "No free memory for link descriptor\n";
|
||||
|
||||
static void dma_init(struct fsldma_chan *chan)
|
||||
{
|
||||
/* Reset the channel */
|
||||
|
@ -499,7 +500,7 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
|
|||
|
||||
new = fsl_dma_alloc_descriptor(chan);
|
||||
if (!new) {
|
||||
dev_err(chan->dev, "No free memory for link descriptor\n");
|
||||
dev_err(chan->dev, msg_ld_oom);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -536,8 +537,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
|
|||
/* Allocate the link descriptor from DMA pool */
|
||||
new = fsl_dma_alloc_descriptor(chan);
|
||||
if (!new) {
|
||||
dev_err(chan->dev,
|
||||
"No free memory for link descriptor\n");
|
||||
dev_err(chan->dev, msg_ld_oom);
|
||||
goto fail;
|
||||
}
|
||||
#ifdef FSL_DMA_LD_DEBUG
|
||||
|
@ -583,6 +583,125 @@ fail:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
|
||||
struct scatterlist *dst_sg, unsigned int dst_nents,
|
||||
struct scatterlist *src_sg, unsigned int src_nents,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
|
||||
struct fsldma_chan *chan = to_fsl_chan(dchan);
|
||||
size_t dst_avail, src_avail;
|
||||
dma_addr_t dst, src;
|
||||
size_t len;
|
||||
|
||||
/* basic sanity checks */
|
||||
if (dst_nents == 0 || src_nents == 0)
|
||||
return NULL;
|
||||
|
||||
if (dst_sg == NULL || src_sg == NULL)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* TODO: should we check that both scatterlists have the same
|
||||
* TODO: number of bytes in total? Is that really an error?
|
||||
*/
|
||||
|
||||
/* get prepared for the loop */
|
||||
dst_avail = sg_dma_len(dst_sg);
|
||||
src_avail = sg_dma_len(src_sg);
|
||||
|
||||
/* run until we are out of scatterlist entries */
|
||||
while (true) {
|
||||
|
||||
/* create the largest transaction possible */
|
||||
len = min_t(size_t, src_avail, dst_avail);
|
||||
len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT);
|
||||
if (len == 0)
|
||||
goto fetch;
|
||||
|
||||
dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
|
||||
src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
|
||||
|
||||
/* allocate and populate the descriptor */
|
||||
new = fsl_dma_alloc_descriptor(chan);
|
||||
if (!new) {
|
||||
dev_err(chan->dev, msg_ld_oom);
|
||||
goto fail;
|
||||
}
|
||||
#ifdef FSL_DMA_LD_DEBUG
|
||||
dev_dbg(chan->dev, "new link desc alloc %p\n", new);
|
||||
#endif
|
||||
|
||||
set_desc_cnt(chan, &new->hw, len);
|
||||
set_desc_src(chan, &new->hw, src);
|
||||
set_desc_dst(chan, &new->hw, dst);
|
||||
|
||||
if (!first)
|
||||
first = new;
|
||||
else
|
||||
set_desc_next(chan, &prev->hw, new->async_tx.phys);
|
||||
|
||||
new->async_tx.cookie = 0;
|
||||
async_tx_ack(&new->async_tx);
|
||||
prev = new;
|
||||
|
||||
/* Insert the link descriptor to the LD ring */
|
||||
list_add_tail(&new->node, &first->tx_list);
|
||||
|
||||
/* update metadata */
|
||||
dst_avail -= len;
|
||||
src_avail -= len;
|
||||
|
||||
fetch:
|
||||
/* fetch the next dst scatterlist entry */
|
||||
if (dst_avail == 0) {
|
||||
|
||||
/* no more entries: we're done */
|
||||
if (dst_nents == 0)
|
||||
break;
|
||||
|
||||
/* fetch the next entry: if there are no more: done */
|
||||
dst_sg = sg_next(dst_sg);
|
||||
if (dst_sg == NULL)
|
||||
break;
|
||||
|
||||
dst_nents--;
|
||||
dst_avail = sg_dma_len(dst_sg);
|
||||
}
|
||||
|
||||
/* fetch the next src scatterlist entry */
|
||||
if (src_avail == 0) {
|
||||
|
||||
/* no more entries: we're done */
|
||||
if (src_nents == 0)
|
||||
break;
|
||||
|
||||
/* fetch the next entry: if there are no more: done */
|
||||
src_sg = sg_next(src_sg);
|
||||
if (src_sg == NULL)
|
||||
break;
|
||||
|
||||
src_nents--;
|
||||
src_avail = sg_dma_len(src_sg);
|
||||
}
|
||||
}
|
||||
|
||||
new->async_tx.flags = flags; /* client is in control of this ack */
|
||||
new->async_tx.cookie = -EBUSY;
|
||||
|
||||
/* Set End-of-link to the last link descriptor of new list */
|
||||
set_ld_eol(chan, new);
|
||||
|
||||
return &first->async_tx;
|
||||
|
||||
fail:
|
||||
if (!first)
|
||||
return NULL;
|
||||
|
||||
fsldma_free_desc_list_reverse(chan, &first->tx_list);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
|
||||
* @chan: DMA channel
|
||||
|
@ -599,207 +718,70 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
|
|||
struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
|
||||
enum dma_data_direction direction, unsigned long flags)
|
||||
{
|
||||
struct fsldma_chan *chan;
|
||||
struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
|
||||
struct fsl_dma_slave *slave;
|
||||
size_t copy;
|
||||
|
||||
int i;
|
||||
struct scatterlist *sg;
|
||||
size_t sg_used;
|
||||
size_t hw_used;
|
||||
struct fsl_dma_hw_addr *hw;
|
||||
dma_addr_t dma_dst, dma_src;
|
||||
|
||||
if (!dchan)
|
||||
return NULL;
|
||||
|
||||
if (!dchan->private)
|
||||
return NULL;
|
||||
|
||||
chan = to_fsl_chan(dchan);
|
||||
slave = dchan->private;
|
||||
|
||||
if (list_empty(&slave->addresses))
|
||||
return NULL;
|
||||
|
||||
hw = list_first_entry(&slave->addresses, struct fsl_dma_hw_addr, entry);
|
||||
hw_used = 0;
|
||||
|
||||
/*
|
||||
* Build the hardware transaction to copy from the scatterlist to
|
||||
* the hardware, or from the hardware to the scatterlist
|
||||
* This operation is not supported on the Freescale DMA controller
|
||||
*
|
||||
* If you are copying from the hardware to the scatterlist and it
|
||||
* takes two hardware entries to fill an entire page, then both
|
||||
* hardware entries will be coalesced into the same page
|
||||
*
|
||||
* If you are copying from the scatterlist to the hardware and a
|
||||
* single page can fill two hardware entries, then the data will
|
||||
* be read out of the page into the first hardware entry, and so on
|
||||
* However, we need to provide the function pointer to allow the
|
||||
* device_control() method to work.
|
||||
*/
|
||||
for_each_sg(sgl, sg, sg_len, i) {
|
||||
sg_used = 0;
|
||||
|
||||
/* Loop until the entire scatterlist entry is used */
|
||||
while (sg_used < sg_dma_len(sg)) {
|
||||
|
||||
/*
|
||||
* If we've used up the current hardware address/length
|
||||
* pair, we need to load a new one
|
||||
*
|
||||
* This is done in a while loop so that descriptors with
|
||||
* length == 0 will be skipped
|
||||
*/
|
||||
while (hw_used >= hw->length) {
|
||||
|
||||
/*
|
||||
* If the current hardware entry is the last
|
||||
* entry in the list, we're finished
|
||||
*/
|
||||
if (list_is_last(&hw->entry, &slave->addresses))
|
||||
goto finished;
|
||||
|
||||
/* Get the next hardware address/length pair */
|
||||
hw = list_entry(hw->entry.next,
|
||||
struct fsl_dma_hw_addr, entry);
|
||||
hw_used = 0;
|
||||
}
|
||||
|
||||
/* Allocate the link descriptor from DMA pool */
|
||||
new = fsl_dma_alloc_descriptor(chan);
|
||||
if (!new) {
|
||||
dev_err(chan->dev, "No free memory for "
|
||||
"link descriptor\n");
|
||||
goto fail;
|
||||
}
|
||||
#ifdef FSL_DMA_LD_DEBUG
|
||||
dev_dbg(chan->dev, "new link desc alloc %p\n", new);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Calculate the maximum number of bytes to transfer,
|
||||
* making sure it is less than the DMA controller limit
|
||||
*/
|
||||
copy = min_t(size_t, sg_dma_len(sg) - sg_used,
|
||||
hw->length - hw_used);
|
||||
copy = min_t(size_t, copy, FSL_DMA_BCR_MAX_CNT);
|
||||
|
||||
/*
|
||||
* DMA_FROM_DEVICE
|
||||
* from the hardware to the scatterlist
|
||||
*
|
||||
* DMA_TO_DEVICE
|
||||
* from the scatterlist to the hardware
|
||||
*/
|
||||
if (direction == DMA_FROM_DEVICE) {
|
||||
dma_src = hw->address + hw_used;
|
||||
dma_dst = sg_dma_address(sg) + sg_used;
|
||||
} else {
|
||||
dma_src = sg_dma_address(sg) + sg_used;
|
||||
dma_dst = hw->address + hw_used;
|
||||
}
|
||||
|
||||
/* Fill in the descriptor */
|
||||
set_desc_cnt(chan, &new->hw, copy);
|
||||
set_desc_src(chan, &new->hw, dma_src);
|
||||
set_desc_dst(chan, &new->hw, dma_dst);
|
||||
|
||||
/*
|
||||
* If this is not the first descriptor, chain the
|
||||
* current descriptor after the previous descriptor
|
||||
*/
|
||||
if (!first) {
|
||||
first = new;
|
||||
} else {
|
||||
set_desc_next(chan, &prev->hw,
|
||||
new->async_tx.phys);
|
||||
}
|
||||
|
||||
new->async_tx.cookie = 0;
|
||||
async_tx_ack(&new->async_tx);
|
||||
|
||||
prev = new;
|
||||
sg_used += copy;
|
||||
hw_used += copy;
|
||||
|
||||
/* Insert the link descriptor into the LD ring */
|
||||
list_add_tail(&new->node, &first->tx_list);
|
||||
}
|
||||
}
|
||||
|
||||
finished:
|
||||
|
||||
/* All of the hardware address/length pairs had length == 0 */
|
||||
if (!first || !new)
|
||||
return NULL;
|
||||
|
||||
new->async_tx.flags = flags;
|
||||
new->async_tx.cookie = -EBUSY;
|
||||
|
||||
/* Set End-of-link to the last link descriptor of new list */
|
||||
set_ld_eol(chan, new);
|
||||
|
||||
/* Enable extra controller features */
|
||||
if (chan->set_src_loop_size)
|
||||
chan->set_src_loop_size(chan, slave->src_loop_size);
|
||||
|
||||
if (chan->set_dst_loop_size)
|
||||
chan->set_dst_loop_size(chan, slave->dst_loop_size);
|
||||
|
||||
if (chan->toggle_ext_start)
|
||||
chan->toggle_ext_start(chan, slave->external_start);
|
||||
|
||||
if (chan->toggle_ext_pause)
|
||||
chan->toggle_ext_pause(chan, slave->external_pause);
|
||||
|
||||
if (chan->set_request_count)
|
||||
chan->set_request_count(chan, slave->request_count);
|
||||
|
||||
return &first->async_tx;
|
||||
|
||||
fail:
|
||||
/* If first was not set, then we failed to allocate the very first
|
||||
* descriptor, and we're done */
|
||||
if (!first)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* First is set, so all of the descriptors we allocated have been added
|
||||
* to first->tx_list, INCLUDING "first" itself. Therefore we
|
||||
* must traverse the list backwards freeing each descriptor in turn
|
||||
*
|
||||
* We're re-using variables for the loop, oh well
|
||||
*/
|
||||
fsldma_free_desc_list_reverse(chan, &first->tx_list);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int fsl_dma_device_control(struct dma_chan *dchan,
|
||||
enum dma_ctrl_cmd cmd, unsigned long arg)
|
||||
{
|
||||
struct dma_slave_config *config;
|
||||
struct fsldma_chan *chan;
|
||||
unsigned long flags;
|
||||
|
||||
/* Only supports DMA_TERMINATE_ALL */
|
||||
if (cmd != DMA_TERMINATE_ALL)
|
||||
return -ENXIO;
|
||||
int size;
|
||||
|
||||
if (!dchan)
|
||||
return -EINVAL;
|
||||
|
||||
chan = to_fsl_chan(dchan);
|
||||
|
||||
/* Halt the DMA engine */
|
||||
dma_halt(chan);
|
||||
switch (cmd) {
|
||||
case DMA_TERMINATE_ALL:
|
||||
/* Halt the DMA engine */
|
||||
dma_halt(chan);
|
||||
|
||||
spin_lock_irqsave(&chan->desc_lock, flags);
|
||||
spin_lock_irqsave(&chan->desc_lock, flags);
|
||||
|
||||
/* Remove and free all of the descriptors in the LD queue */
|
||||
fsldma_free_desc_list(chan, &chan->ld_pending);
|
||||
fsldma_free_desc_list(chan, &chan->ld_running);
|
||||
/* Remove and free all of the descriptors in the LD queue */
|
||||
fsldma_free_desc_list(chan, &chan->ld_pending);
|
||||
fsldma_free_desc_list(chan, &chan->ld_running);
|
||||
|
||||
spin_unlock_irqrestore(&chan->desc_lock, flags);
|
||||
spin_unlock_irqrestore(&chan->desc_lock, flags);
|
||||
return 0;
|
||||
|
||||
case DMA_SLAVE_CONFIG:
|
||||
config = (struct dma_slave_config *)arg;
|
||||
|
||||
/* make sure the channel supports setting burst size */
|
||||
if (!chan->set_request_count)
|
||||
return -ENXIO;
|
||||
|
||||
/* we set the controller burst size depending on direction */
|
||||
if (config->direction == DMA_TO_DEVICE)
|
||||
size = config->dst_addr_width * config->dst_maxburst;
|
||||
else
|
||||
size = config->src_addr_width * config->src_maxburst;
|
||||
|
||||
chan->set_request_count(chan, size);
|
||||
return 0;
|
||||
|
||||
case FSLDMA_EXTERNAL_START:
|
||||
|
||||
/* make sure the channel supports external start */
|
||||
if (!chan->toggle_ext_start)
|
||||
return -ENXIO;
|
||||
|
||||
chan->toggle_ext_start(chan, arg);
|
||||
return 0;
|
||||
|
||||
default:
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1327,11 +1309,13 @@ static int __devinit fsldma_of_probe(struct platform_device *op,
|
|||
|
||||
dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
|
||||
dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
|
||||
dma_cap_set(DMA_SG, fdev->common.cap_mask);
|
||||
dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
|
||||
fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
|
||||
fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
|
||||
fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
|
||||
fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
|
||||
fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
|
||||
fdev->common.device_tx_status = fsl_tx_status;
|
||||
fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
|
||||
fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
|
||||
|
|
422
drivers/dma/imx-dma.c
Normal file
422
drivers/dma/imx-dma.c
Normal file
|
@ -0,0 +1,422 @@
|
|||
/*
|
||||
* drivers/dma/imx-dma.c
|
||||
*
|
||||
* This file contains a driver for the Freescale i.MX DMA engine
|
||||
* found on i.MX1/21/27
|
||||
*
|
||||
* Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
|
||||
*
|
||||
* The code contained herein is licensed under the GNU General Public
|
||||
* License. You may obtain a copy of the GNU General Public License
|
||||
* Version 2 or later at the following locations:
|
||||
*
|
||||
* http://www.opensource.org/licenses/gpl-license.html
|
||||
* http://www.gnu.org/copyleft/gpl.html
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/dmaengine.h>
|
||||
|
||||
#include <asm/irq.h>
|
||||
#include <mach/dma-v1.h>
|
||||
#include <mach/hardware.h>
|
||||
|
||||
struct imxdma_channel {
|
||||
struct imxdma_engine *imxdma;
|
||||
unsigned int channel;
|
||||
unsigned int imxdma_channel;
|
||||
|
||||
enum dma_slave_buswidth word_size;
|
||||
dma_addr_t per_address;
|
||||
u32 watermark_level;
|
||||
struct dma_chan chan;
|
||||
spinlock_t lock;
|
||||
struct dma_async_tx_descriptor desc;
|
||||
dma_cookie_t last_completed;
|
||||
enum dma_status status;
|
||||
int dma_request;
|
||||
struct scatterlist *sg_list;
|
||||
};
|
||||
|
||||
#define MAX_DMA_CHANNELS 8
|
||||
|
||||
struct imxdma_engine {
|
||||
struct device *dev;
|
||||
struct dma_device dma_device;
|
||||
struct imxdma_channel channel[MAX_DMA_CHANNELS];
|
||||
};
|
||||
|
||||
static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
|
||||
{
|
||||
return container_of(chan, struct imxdma_channel, chan);
|
||||
}
|
||||
|
||||
static void imxdma_handle(struct imxdma_channel *imxdmac)
|
||||
{
|
||||
if (imxdmac->desc.callback)
|
||||
imxdmac->desc.callback(imxdmac->desc.callback_param);
|
||||
imxdmac->last_completed = imxdmac->desc.cookie;
|
||||
}
|
||||
|
||||
static void imxdma_irq_handler(int channel, void *data)
|
||||
{
|
||||
struct imxdma_channel *imxdmac = data;
|
||||
|
||||
imxdmac->status = DMA_SUCCESS;
|
||||
imxdma_handle(imxdmac);
|
||||
}
|
||||
|
||||
static void imxdma_err_handler(int channel, void *data, int error)
|
||||
{
|
||||
struct imxdma_channel *imxdmac = data;
|
||||
|
||||
imxdmac->status = DMA_ERROR;
|
||||
imxdma_handle(imxdmac);
|
||||
}
|
||||
|
||||
static void imxdma_progression(int channel, void *data,
|
||||
struct scatterlist *sg)
|
||||
{
|
||||
struct imxdma_channel *imxdmac = data;
|
||||
|
||||
imxdmac->status = DMA_SUCCESS;
|
||||
imxdma_handle(imxdmac);
|
||||
}
|
||||
|
||||
static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
|
||||
struct dma_slave_config *dmaengine_cfg = (void *)arg;
|
||||
int ret;
|
||||
unsigned int mode = 0;
|
||||
|
||||
switch (cmd) {
|
||||
case DMA_TERMINATE_ALL:
|
||||
imxdmac->status = DMA_ERROR;
|
||||
imx_dma_disable(imxdmac->imxdma_channel);
|
||||
return 0;
|
||||
case DMA_SLAVE_CONFIG:
|
||||
if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
|
||||
imxdmac->per_address = dmaengine_cfg->src_addr;
|
||||
imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
|
||||
imxdmac->word_size = dmaengine_cfg->src_addr_width;
|
||||
} else {
|
||||
imxdmac->per_address = dmaengine_cfg->dst_addr;
|
||||
imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
|
||||
imxdmac->word_size = dmaengine_cfg->dst_addr_width;
|
||||
}
|
||||
|
||||
switch (imxdmac->word_size) {
|
||||
case DMA_SLAVE_BUSWIDTH_1_BYTE:
|
||||
mode = IMX_DMA_MEMSIZE_8;
|
||||
break;
|
||||
case DMA_SLAVE_BUSWIDTH_2_BYTES:
|
||||
mode = IMX_DMA_MEMSIZE_16;
|
||||
break;
|
||||
default:
|
||||
case DMA_SLAVE_BUSWIDTH_4_BYTES:
|
||||
mode = IMX_DMA_MEMSIZE_32;
|
||||
break;
|
||||
}
|
||||
ret = imx_dma_config_channel(imxdmac->imxdma_channel,
|
||||
mode | IMX_DMA_TYPE_FIFO,
|
||||
IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
|
||||
imxdmac->dma_request, 1);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
imx_dma_config_burstlen(imxdmac->imxdma_channel, imxdmac->watermark_level);
|
||||
|
||||
return 0;
|
||||
default:
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static enum dma_status imxdma_tx_status(struct dma_chan *chan,
|
||||
dma_cookie_t cookie,
|
||||
struct dma_tx_state *txstate)
|
||||
{
|
||||
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
|
||||
dma_cookie_t last_used;
|
||||
enum dma_status ret;
|
||||
|
||||
last_used = chan->cookie;
|
||||
|
||||
ret = dma_async_is_complete(cookie, imxdmac->last_completed, last_used);
|
||||
dma_set_tx_state(txstate, imxdmac->last_completed, last_used, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static dma_cookie_t imxdma_assign_cookie(struct imxdma_channel *imxdma)
|
||||
{
|
||||
dma_cookie_t cookie = imxdma->chan.cookie;
|
||||
|
||||
if (++cookie < 0)
|
||||
cookie = 1;
|
||||
|
||||
imxdma->chan.cookie = cookie;
|
||||
imxdma->desc.cookie = cookie;
|
||||
|
||||
return cookie;
|
||||
}
|
||||
|
||||
static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
|
||||
{
|
||||
struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
|
||||
dma_cookie_t cookie;
|
||||
|
||||
spin_lock_irq(&imxdmac->lock);
|
||||
|
||||
cookie = imxdma_assign_cookie(imxdmac);
|
||||
|
||||
imx_dma_enable(imxdmac->imxdma_channel);
|
||||
|
||||
spin_unlock_irq(&imxdmac->lock);
|
||||
|
||||
return cookie;
|
||||
}
|
||||
|
||||
static int imxdma_alloc_chan_resources(struct dma_chan *chan)
|
||||
{
|
||||
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
|
||||
struct imx_dma_data *data = chan->private;
|
||||
|
||||
imxdmac->dma_request = data->dma_request;
|
||||
|
||||
dma_async_tx_descriptor_init(&imxdmac->desc, chan);
|
||||
imxdmac->desc.tx_submit = imxdma_tx_submit;
|
||||
/* txd.flags will be overwritten in prep funcs */
|
||||
imxdmac->desc.flags = DMA_CTRL_ACK;
|
||||
|
||||
imxdmac->status = DMA_SUCCESS;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void imxdma_free_chan_resources(struct dma_chan *chan)
|
||||
{
|
||||
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
|
||||
|
||||
imx_dma_disable(imxdmac->imxdma_channel);
|
||||
|
||||
if (imxdmac->sg_list) {
|
||||
kfree(imxdmac->sg_list);
|
||||
imxdmac->sg_list = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
|
||||
struct dma_chan *chan, struct scatterlist *sgl,
|
||||
unsigned int sg_len, enum dma_data_direction direction,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
|
||||
struct scatterlist *sg;
|
||||
int i, ret, dma_length = 0;
|
||||
unsigned int dmamode;
|
||||
|
||||
if (imxdmac->status == DMA_IN_PROGRESS)
|
||||
return NULL;
|
||||
|
||||
imxdmac->status = DMA_IN_PROGRESS;
|
||||
|
||||
for_each_sg(sgl, sg, sg_len, i) {
|
||||
dma_length += sg->length;
|
||||
}
|
||||
|
||||
if (direction == DMA_FROM_DEVICE)
|
||||
dmamode = DMA_MODE_READ;
|
||||
else
|
||||
dmamode = DMA_MODE_WRITE;
|
||||
|
||||
ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len,
|
||||
dma_length, imxdmac->per_address, dmamode);
|
||||
if (ret)
|
||||
return NULL;
|
||||
|
||||
return &imxdmac->desc;
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
|
||||
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
|
||||
size_t period_len, enum dma_data_direction direction)
|
||||
{
|
||||
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
|
||||
struct imxdma_engine *imxdma = imxdmac->imxdma;
|
||||
int i, ret;
|
||||
unsigned int periods = buf_len / period_len;
|
||||
unsigned int dmamode;
|
||||
|
||||
dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
|
||||
__func__, imxdmac->channel, buf_len, period_len);
|
||||
|
||||
if (imxdmac->status == DMA_IN_PROGRESS)
|
||||
return NULL;
|
||||
imxdmac->status = DMA_IN_PROGRESS;
|
||||
|
||||
ret = imx_dma_setup_progression_handler(imxdmac->imxdma_channel,
|
||||
imxdma_progression);
|
||||
if (ret) {
|
||||
dev_err(imxdma->dev, "Failed to setup the DMA handler\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (imxdmac->sg_list)
|
||||
kfree(imxdmac->sg_list);
|
||||
|
||||
imxdmac->sg_list = kcalloc(periods + 1,
|
||||
sizeof(struct scatterlist), GFP_KERNEL);
|
||||
if (!imxdmac->sg_list)
|
||||
return NULL;
|
||||
|
||||
sg_init_table(imxdmac->sg_list, periods);
|
||||
|
||||
for (i = 0; i < periods; i++) {
|
||||
imxdmac->sg_list[i].page_link = 0;
|
||||
imxdmac->sg_list[i].offset = 0;
|
||||
imxdmac->sg_list[i].dma_address = dma_addr;
|
||||
imxdmac->sg_list[i].length = period_len;
|
||||
dma_addr += period_len;
|
||||
}
|
||||
|
||||
/* close the loop */
|
||||
imxdmac->sg_list[periods].offset = 0;
|
||||
imxdmac->sg_list[periods].length = 0;
|
||||
imxdmac->sg_list[periods].page_link =
|
||||
((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
|
||||
|
||||
if (direction == DMA_FROM_DEVICE)
|
||||
dmamode = DMA_MODE_READ;
|
||||
else
|
||||
dmamode = DMA_MODE_WRITE;
|
||||
|
||||
ret = imx_dma_setup_sg(imxdmac->imxdma_channel, imxdmac->sg_list, periods,
|
||||
IMX_DMA_LENGTH_LOOP, imxdmac->per_address, dmamode);
|
||||
if (ret)
|
||||
return NULL;
|
||||
|
||||
return &imxdmac->desc;
|
||||
}
|
||||
|
||||
static void imxdma_issue_pending(struct dma_chan *chan)
|
||||
{
|
||||
/*
|
||||
* Nothing to do. We only have a single descriptor
|
||||
*/
|
||||
}
|
||||
|
||||
static int __init imxdma_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct imxdma_engine *imxdma;
|
||||
int ret, i;
|
||||
|
||||
imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL);
|
||||
if (!imxdma)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&imxdma->dma_device.channels);
|
||||
|
||||
/* Initialize channel parameters */
|
||||
for (i = 0; i < MAX_DMA_CHANNELS; i++) {
|
||||
struct imxdma_channel *imxdmac = &imxdma->channel[i];
|
||||
|
||||
imxdmac->imxdma_channel = imx_dma_request_by_prio("dmaengine",
|
||||
DMA_PRIO_MEDIUM);
|
||||
if (imxdmac->channel < 0)
|
||||
goto err_init;
|
||||
|
||||
imx_dma_setup_handlers(imxdmac->imxdma_channel,
|
||||
imxdma_irq_handler, imxdma_err_handler, imxdmac);
|
||||
|
||||
imxdmac->imxdma = imxdma;
|
||||
spin_lock_init(&imxdmac->lock);
|
||||
|
||||
dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
|
||||
dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
|
||||
|
||||
imxdmac->chan.device = &imxdma->dma_device;
|
||||
imxdmac->chan.chan_id = i;
|
||||
imxdmac->channel = i;
|
||||
|
||||
/* Add the channel to the DMAC list */
|
||||
list_add_tail(&imxdmac->chan.device_node, &imxdma->dma_device.channels);
|
||||
}
|
||||
|
||||
imxdma->dev = &pdev->dev;
|
||||
imxdma->dma_device.dev = &pdev->dev;
|
||||
|
||||
imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
|
||||
imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
|
||||
imxdma->dma_device.device_tx_status = imxdma_tx_status;
|
||||
imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
|
||||
imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
|
||||
imxdma->dma_device.device_control = imxdma_control;
|
||||
imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
|
||||
|
||||
platform_set_drvdata(pdev, imxdma);
|
||||
|
||||
ret = dma_async_device_register(&imxdma->dma_device);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "unable to register\n");
|
||||
goto err_init;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_init:
|
||||
while (i-- >= 0) {
|
||||
struct imxdma_channel *imxdmac = &imxdma->channel[i];
|
||||
imx_dma_free(imxdmac->imxdma_channel);
|
||||
}
|
||||
|
||||
kfree(imxdma);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __exit imxdma_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
|
||||
int i;
|
||||
|
||||
dma_async_device_unregister(&imxdma->dma_device);
|
||||
|
||||
for (i = 0; i < MAX_DMA_CHANNELS; i++) {
|
||||
struct imxdma_channel *imxdmac = &imxdma->channel[i];
|
||||
|
||||
imx_dma_free(imxdmac->imxdma_channel);
|
||||
}
|
||||
|
||||
kfree(imxdma);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver imxdma_driver = {
|
||||
.driver = {
|
||||
.name = "imx-dma",
|
||||
},
|
||||
.remove = __exit_p(imxdma_remove),
|
||||
};
|
||||
|
||||
static int __init imxdma_module_init(void)
|
||||
{
|
||||
return platform_driver_probe(&imxdma_driver, imxdma_probe);
|
||||
}
|
||||
subsys_initcall(imxdma_module_init);
|
||||
|
||||
MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
|
||||
MODULE_DESCRIPTION("i.MX dma driver");
|
||||
MODULE_LICENSE("GPL");
|
1392
drivers/dma/imx-sdma.c
Normal file
1392
drivers/dma/imx-sdma.c
Normal file
File diff suppressed because it is too large
Load diff
|
@ -25,6 +25,7 @@
|
|||
*/
|
||||
#include <linux/pci.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/intel_mid_dma.h>
|
||||
|
||||
#define MAX_CHAN 4 /*max ch across controllers*/
|
||||
|
@ -91,13 +92,13 @@ static int get_block_ts(int len, int tx_width, int block_size)
|
|||
int byte_width = 0, block_ts = 0;
|
||||
|
||||
switch (tx_width) {
|
||||
case LNW_DMA_WIDTH_8BIT:
|
||||
case DMA_SLAVE_BUSWIDTH_1_BYTE:
|
||||
byte_width = 1;
|
||||
break;
|
||||
case LNW_DMA_WIDTH_16BIT:
|
||||
case DMA_SLAVE_BUSWIDTH_2_BYTES:
|
||||
byte_width = 2;
|
||||
break;
|
||||
case LNW_DMA_WIDTH_32BIT:
|
||||
case DMA_SLAVE_BUSWIDTH_4_BYTES:
|
||||
default:
|
||||
byte_width = 4;
|
||||
break;
|
||||
|
@ -247,16 +248,17 @@ static void midc_dostart(struct intel_mid_dma_chan *midc,
|
|||
struct middma_device *mid = to_middma_device(midc->chan.device);
|
||||
|
||||
/* channel is idle */
|
||||
if (midc->in_use && test_ch_en(midc->dma_base, midc->ch_id)) {
|
||||
if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) {
|
||||
/*error*/
|
||||
pr_err("ERR_MDMA: channel is busy in start\n");
|
||||
/* The tasklet will hopefully advance the queue... */
|
||||
return;
|
||||
}
|
||||
|
||||
midc->busy = true;
|
||||
/*write registers and en*/
|
||||
iowrite32(first->sar, midc->ch_regs + SAR);
|
||||
iowrite32(first->dar, midc->ch_regs + DAR);
|
||||
iowrite32(first->lli_phys, midc->ch_regs + LLP);
|
||||
iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH);
|
||||
iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW);
|
||||
iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW);
|
||||
|
@ -264,9 +266,9 @@ static void midc_dostart(struct intel_mid_dma_chan *midc,
|
|||
pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n",
|
||||
(int)first->sar, (int)first->dar, first->cfg_hi,
|
||||
first->cfg_lo, first->ctl_hi, first->ctl_lo);
|
||||
first->status = DMA_IN_PROGRESS;
|
||||
|
||||
iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
|
||||
first->status = DMA_IN_PROGRESS;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -283,20 +285,36 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
|
|||
{
|
||||
struct dma_async_tx_descriptor *txd = &desc->txd;
|
||||
dma_async_tx_callback callback_txd = NULL;
|
||||
struct intel_mid_dma_lli *llitem;
|
||||
void *param_txd = NULL;
|
||||
|
||||
midc->completed = txd->cookie;
|
||||
callback_txd = txd->callback;
|
||||
param_txd = txd->callback_param;
|
||||
|
||||
list_move(&desc->desc_node, &midc->free_list);
|
||||
|
||||
if (desc->lli != NULL) {
|
||||
/*clear the DONE bit of completed LLI in memory*/
|
||||
llitem = desc->lli + desc->current_lli;
|
||||
llitem->ctl_hi &= CLEAR_DONE;
|
||||
if (desc->current_lli < desc->lli_length-1)
|
||||
(desc->current_lli)++;
|
||||
else
|
||||
desc->current_lli = 0;
|
||||
}
|
||||
spin_unlock_bh(&midc->lock);
|
||||
if (callback_txd) {
|
||||
pr_debug("MDMA: TXD callback set ... calling\n");
|
||||
callback_txd(param_txd);
|
||||
spin_lock_bh(&midc->lock);
|
||||
return;
|
||||
}
|
||||
if (midc->raw_tfr) {
|
||||
desc->status = DMA_SUCCESS;
|
||||
if (desc->lli != NULL) {
|
||||
pci_pool_free(desc->lli_pool, desc->lli,
|
||||
desc->lli_phys);
|
||||
pci_pool_destroy(desc->lli_pool);
|
||||
}
|
||||
list_move(&desc->desc_node, &midc->free_list);
|
||||
midc->busy = false;
|
||||
}
|
||||
spin_lock_bh(&midc->lock);
|
||||
|
||||
|
@ -317,14 +335,89 @@ static void midc_scan_descriptors(struct middma_device *mid,
|
|||
|
||||
/*tx is complete*/
|
||||
list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
|
||||
if (desc->status == DMA_IN_PROGRESS) {
|
||||
desc->status = DMA_SUCCESS;
|
||||
if (desc->status == DMA_IN_PROGRESS)
|
||||
midc_descriptor_complete(midc, desc);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* midc_lli_fill_sg - Helper function to convert
|
||||
* SG list to Linked List Items.
|
||||
*@midc: Channel
|
||||
*@desc: DMA descriptor
|
||||
*@sglist: Pointer to SG list
|
||||
*@sglen: SG list length
|
||||
*@flags: DMA transaction flags
|
||||
*
|
||||
* Walk through the SG list and convert the SG list into Linked
|
||||
* List Items (LLI).
|
||||
*/
|
||||
static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
|
||||
struct intel_mid_dma_desc *desc,
|
||||
struct scatterlist *sglist,
|
||||
unsigned int sglen,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct intel_mid_dma_slave *mids;
|
||||
struct scatterlist *sg;
|
||||
dma_addr_t lli_next, sg_phy_addr;
|
||||
struct intel_mid_dma_lli *lli_bloc_desc;
|
||||
union intel_mid_dma_ctl_lo ctl_lo;
|
||||
union intel_mid_dma_ctl_hi ctl_hi;
|
||||
int i;
|
||||
|
||||
pr_debug("MDMA: Entered midc_lli_fill_sg\n");
|
||||
mids = midc->mid_slave;
|
||||
|
||||
lli_bloc_desc = desc->lli;
|
||||
lli_next = desc->lli_phys;
|
||||
|
||||
ctl_lo.ctl_lo = desc->ctl_lo;
|
||||
ctl_hi.ctl_hi = desc->ctl_hi;
|
||||
for_each_sg(sglist, sg, sglen, i) {
|
||||
/*Populate CTL_LOW and LLI values*/
|
||||
if (i != sglen - 1) {
|
||||
lli_next = lli_next +
|
||||
sizeof(struct intel_mid_dma_lli);
|
||||
} else {
|
||||
/*Check for circular list, otherwise terminate LLI to ZERO*/
|
||||
if (flags & DMA_PREP_CIRCULAR_LIST) {
|
||||
pr_debug("MDMA: LLI is configured in circular mode\n");
|
||||
lli_next = desc->lli_phys;
|
||||
} else {
|
||||
lli_next = 0;
|
||||
ctl_lo.ctlx.llp_dst_en = 0;
|
||||
ctl_lo.ctlx.llp_src_en = 0;
|
||||
}
|
||||
}
|
||||
/*Populate CTL_HI values*/
|
||||
ctl_hi.ctlx.block_ts = get_block_ts(sg->length,
|
||||
desc->width,
|
||||
midc->dma->block_size);
|
||||
/*Populate SAR and DAR values*/
|
||||
sg_phy_addr = sg_phys(sg);
|
||||
if (desc->dirn == DMA_TO_DEVICE) {
|
||||
lli_bloc_desc->sar = sg_phy_addr;
|
||||
lli_bloc_desc->dar = mids->dma_slave.dst_addr;
|
||||
} else if (desc->dirn == DMA_FROM_DEVICE) {
|
||||
lli_bloc_desc->sar = mids->dma_slave.src_addr;
|
||||
lli_bloc_desc->dar = sg_phy_addr;
|
||||
}
|
||||
/*Copy values into block descriptor in system memroy*/
|
||||
lli_bloc_desc->llp = lli_next;
|
||||
lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo;
|
||||
lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi;
|
||||
|
||||
lli_bloc_desc++;
|
||||
}
|
||||
/*Copy very first LLI values to descriptor*/
|
||||
desc->ctl_lo = desc->lli->ctl_lo;
|
||||
desc->ctl_hi = desc->lli->ctl_hi;
|
||||
desc->sar = desc->lli->sar;
|
||||
desc->dar = desc->lli->dar;
|
||||
|
||||
return 0;
|
||||
}
|
||||
/*****************************************************************************
|
||||
DMA engine callback Functions*/
|
||||
/**
|
||||
|
@ -349,12 +442,12 @@ static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx)
|
|||
desc->txd.cookie = cookie;
|
||||
|
||||
|
||||
if (list_empty(&midc->active_list)) {
|
||||
midc_dostart(midc, desc);
|
||||
if (list_empty(&midc->active_list))
|
||||
list_add_tail(&desc->desc_node, &midc->active_list);
|
||||
} else {
|
||||
else
|
||||
list_add_tail(&desc->desc_node, &midc->queue);
|
||||
}
|
||||
|
||||
midc_dostart(midc, desc);
|
||||
spin_unlock_bh(&midc->lock);
|
||||
|
||||
return cookie;
|
||||
|
@ -414,6 +507,23 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int dma_slave_control(struct dma_chan *chan, unsigned long arg)
|
||||
{
|
||||
struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
|
||||
struct dma_slave_config *slave = (struct dma_slave_config *)arg;
|
||||
struct intel_mid_dma_slave *mid_slave;
|
||||
|
||||
BUG_ON(!midc);
|
||||
BUG_ON(!slave);
|
||||
pr_debug("MDMA: slave control called\n");
|
||||
|
||||
mid_slave = to_intel_mid_dma_slave(slave);
|
||||
|
||||
BUG_ON(!mid_slave);
|
||||
|
||||
midc->mid_slave = mid_slave;
|
||||
return 0;
|
||||
}
|
||||
/**
|
||||
* intel_mid_dma_device_control - DMA device control
|
||||
* @chan: chan for DMA control
|
||||
|
@ -428,49 +538,41 @@ static int intel_mid_dma_device_control(struct dma_chan *chan,
|
|||
struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
|
||||
struct middma_device *mid = to_middma_device(chan->device);
|
||||
struct intel_mid_dma_desc *desc, *_desc;
|
||||
LIST_HEAD(list);
|
||||
union intel_mid_dma_cfg_lo cfg_lo;
|
||||
|
||||
if (cmd == DMA_SLAVE_CONFIG)
|
||||
return dma_slave_control(chan, arg);
|
||||
|
||||
if (cmd != DMA_TERMINATE_ALL)
|
||||
return -ENXIO;
|
||||
|
||||
spin_lock_bh(&midc->lock);
|
||||
if (midc->in_use == false) {
|
||||
if (midc->busy == false) {
|
||||
spin_unlock_bh(&midc->lock);
|
||||
return 0;
|
||||
}
|
||||
list_splice_init(&midc->free_list, &list);
|
||||
midc->descs_allocated = 0;
|
||||
midc->slave = NULL;
|
||||
|
||||
/*Suspend and disable the channel*/
|
||||
cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW);
|
||||
cfg_lo.cfgx.ch_susp = 1;
|
||||
iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW);
|
||||
iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
|
||||
midc->busy = false;
|
||||
/* Disable interrupts */
|
||||
disable_dma_interrupt(midc);
|
||||
midc->descs_allocated = 0;
|
||||
|
||||
spin_unlock_bh(&midc->lock);
|
||||
list_for_each_entry_safe(desc, _desc, &list, desc_node) {
|
||||
pr_debug("MDMA: freeing descriptor %p\n", desc);
|
||||
pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
|
||||
list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
|
||||
if (desc->lli != NULL) {
|
||||
pci_pool_free(desc->lli_pool, desc->lli,
|
||||
desc->lli_phys);
|
||||
pci_pool_destroy(desc->lli_pool);
|
||||
}
|
||||
list_move(&desc->desc_node, &midc->free_list);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_mid_dma_prep_slave_sg - Prep slave sg txn
|
||||
* @chan: chan for DMA transfer
|
||||
* @sgl: scatter gather list
|
||||
* @sg_len: length of sg txn
|
||||
* @direction: DMA transfer dirtn
|
||||
* @flags: DMA flags
|
||||
*
|
||||
* Do DMA sg txn: NOT supported now
|
||||
*/
|
||||
static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
|
||||
struct dma_chan *chan, struct scatterlist *sgl,
|
||||
unsigned int sg_len, enum dma_data_direction direction,
|
||||
unsigned long flags)
|
||||
{
|
||||
/*not supported now*/
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_mid_dma_prep_memcpy - Prep memcpy txn
|
||||
|
@ -495,23 +597,24 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
|
|||
union intel_mid_dma_ctl_hi ctl_hi;
|
||||
union intel_mid_dma_cfg_lo cfg_lo;
|
||||
union intel_mid_dma_cfg_hi cfg_hi;
|
||||
enum intel_mid_dma_width width = 0;
|
||||
enum dma_slave_buswidth width;
|
||||
|
||||
pr_debug("MDMA: Prep for memcpy\n");
|
||||
WARN_ON(!chan);
|
||||
BUG_ON(!chan);
|
||||
if (!len)
|
||||
return NULL;
|
||||
|
||||
mids = chan->private;
|
||||
WARN_ON(!mids);
|
||||
|
||||
midc = to_intel_mid_dma_chan(chan);
|
||||
WARN_ON(!midc);
|
||||
BUG_ON(!midc);
|
||||
|
||||
mids = midc->mid_slave;
|
||||
BUG_ON(!mids);
|
||||
|
||||
pr_debug("MDMA:called for DMA %x CH %d Length %zu\n",
|
||||
midc->dma->pci_id, midc->ch_id, len);
|
||||
pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n",
|
||||
mids->cfg_mode, mids->dirn, mids->hs_mode, mids->src_width);
|
||||
mids->cfg_mode, mids->dma_slave.direction,
|
||||
mids->hs_mode, mids->dma_slave.src_addr_width);
|
||||
|
||||
/*calculate CFG_LO*/
|
||||
if (mids->hs_mode == LNW_DMA_SW_HS) {
|
||||
|
@ -530,13 +633,13 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
|
|||
if (midc->dma->pimr_mask) {
|
||||
cfg_hi.cfgx.protctl = 0x0; /*default value*/
|
||||
cfg_hi.cfgx.fifo_mode = 1;
|
||||
if (mids->dirn == DMA_TO_DEVICE) {
|
||||
if (mids->dma_slave.direction == DMA_TO_DEVICE) {
|
||||
cfg_hi.cfgx.src_per = 0;
|
||||
if (mids->device_instance == 0)
|
||||
cfg_hi.cfgx.dst_per = 3;
|
||||
if (mids->device_instance == 1)
|
||||
cfg_hi.cfgx.dst_per = 1;
|
||||
} else if (mids->dirn == DMA_FROM_DEVICE) {
|
||||
} else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
|
||||
if (mids->device_instance == 0)
|
||||
cfg_hi.cfgx.src_per = 2;
|
||||
if (mids->device_instance == 1)
|
||||
|
@ -552,7 +655,8 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
|
|||
|
||||
/*calculate CTL_HI*/
|
||||
ctl_hi.ctlx.reser = 0;
|
||||
width = mids->src_width;
|
||||
ctl_hi.ctlx.done = 0;
|
||||
width = mids->dma_slave.src_addr_width;
|
||||
|
||||
ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size);
|
||||
pr_debug("MDMA:calc len %d for block size %d\n",
|
||||
|
@ -560,21 +664,21 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
|
|||
/*calculate CTL_LO*/
|
||||
ctl_lo.ctl_lo = 0;
|
||||
ctl_lo.ctlx.int_en = 1;
|
||||
ctl_lo.ctlx.dst_tr_width = mids->dst_width;
|
||||
ctl_lo.ctlx.src_tr_width = mids->src_width;
|
||||
ctl_lo.ctlx.dst_msize = mids->src_msize;
|
||||
ctl_lo.ctlx.src_msize = mids->dst_msize;
|
||||
ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width;
|
||||
ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width;
|
||||
ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst;
|
||||
ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst;
|
||||
|
||||
if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
|
||||
ctl_lo.ctlx.tt_fc = 0;
|
||||
ctl_lo.ctlx.sinc = 0;
|
||||
ctl_lo.ctlx.dinc = 0;
|
||||
} else {
|
||||
if (mids->dirn == DMA_TO_DEVICE) {
|
||||
if (mids->dma_slave.direction == DMA_TO_DEVICE) {
|
||||
ctl_lo.ctlx.sinc = 0;
|
||||
ctl_lo.ctlx.dinc = 2;
|
||||
ctl_lo.ctlx.tt_fc = 1;
|
||||
} else if (mids->dirn == DMA_FROM_DEVICE) {
|
||||
} else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
|
||||
ctl_lo.ctlx.sinc = 2;
|
||||
ctl_lo.ctlx.dinc = 0;
|
||||
ctl_lo.ctlx.tt_fc = 2;
|
||||
|
@ -597,7 +701,10 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
|
|||
desc->ctl_lo = ctl_lo.ctl_lo;
|
||||
desc->ctl_hi = ctl_hi.ctl_hi;
|
||||
desc->width = width;
|
||||
desc->dirn = mids->dirn;
|
||||
desc->dirn = mids->dma_slave.direction;
|
||||
desc->lli_phys = 0;
|
||||
desc->lli = NULL;
|
||||
desc->lli_pool = NULL;
|
||||
return &desc->txd;
|
||||
|
||||
err_desc_get:
|
||||
|
@ -605,6 +712,85 @@ err_desc_get:
|
|||
midc_desc_put(midc, desc);
|
||||
return NULL;
|
||||
}
|
||||
/**
|
||||
* intel_mid_dma_prep_slave_sg - Prep slave sg txn
|
||||
* @chan: chan for DMA transfer
|
||||
* @sgl: scatter gather list
|
||||
* @sg_len: length of sg txn
|
||||
* @direction: DMA transfer dirtn
|
||||
* @flags: DMA flags
|
||||
*
|
||||
* Prepares LLI based periphral transfer
|
||||
*/
|
||||
static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
|
||||
struct dma_chan *chan, struct scatterlist *sgl,
|
||||
unsigned int sg_len, enum dma_data_direction direction,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct intel_mid_dma_chan *midc = NULL;
|
||||
struct intel_mid_dma_slave *mids = NULL;
|
||||
struct intel_mid_dma_desc *desc = NULL;
|
||||
struct dma_async_tx_descriptor *txd = NULL;
|
||||
union intel_mid_dma_ctl_lo ctl_lo;
|
||||
|
||||
pr_debug("MDMA: Prep for slave SG\n");
|
||||
|
||||
if (!sg_len) {
|
||||
pr_err("MDMA: Invalid SG length\n");
|
||||
return NULL;
|
||||
}
|
||||
midc = to_intel_mid_dma_chan(chan);
|
||||
BUG_ON(!midc);
|
||||
|
||||
mids = midc->mid_slave;
|
||||
BUG_ON(!mids);
|
||||
|
||||
if (!midc->dma->pimr_mask) {
|
||||
pr_debug("MDMA: SG list is not supported by this controller\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
|
||||
sg_len, direction, flags);
|
||||
|
||||
txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags);
|
||||
if (NULL == txd) {
|
||||
pr_err("MDMA: Prep memcpy failed\n");
|
||||
return NULL;
|
||||
}
|
||||
desc = to_intel_mid_dma_desc(txd);
|
||||
desc->dirn = direction;
|
||||
ctl_lo.ctl_lo = desc->ctl_lo;
|
||||
ctl_lo.ctlx.llp_dst_en = 1;
|
||||
ctl_lo.ctlx.llp_src_en = 1;
|
||||
desc->ctl_lo = ctl_lo.ctl_lo;
|
||||
desc->lli_length = sg_len;
|
||||
desc->current_lli = 0;
|
||||
/* DMA coherent memory pool for LLI descriptors*/
|
||||
desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool",
|
||||
midc->dma->pdev,
|
||||
(sizeof(struct intel_mid_dma_lli)*sg_len),
|
||||
32, 0);
|
||||
if (NULL == desc->lli_pool) {
|
||||
pr_err("MID_DMA:LLI pool create failed\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys);
|
||||
if (!desc->lli) {
|
||||
pr_err("MID_DMA: LLI alloc failed\n");
|
||||
pci_pool_destroy(desc->lli_pool);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
midc_lli_fill_sg(midc, desc, sgl, sg_len, flags);
|
||||
if (flags & DMA_PREP_INTERRUPT) {
|
||||
iowrite32(UNMASK_INTR_REG(midc->ch_id),
|
||||
midc->dma_base + MASK_BLOCK);
|
||||
pr_debug("MDMA:Enabled Block interrupt\n");
|
||||
}
|
||||
return &desc->txd;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_mid_dma_free_chan_resources - Frees dma resources
|
||||
|
@ -618,11 +804,11 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
|
|||
struct middma_device *mid = to_middma_device(chan->device);
|
||||
struct intel_mid_dma_desc *desc, *_desc;
|
||||
|
||||
if (true == midc->in_use) {
|
||||
if (true == midc->busy) {
|
||||
/*trying to free ch in use!!!!!*/
|
||||
pr_err("ERR_MDMA: trying to free ch in use\n");
|
||||
}
|
||||
|
||||
pm_runtime_put(&mid->pdev->dev);
|
||||
spin_lock_bh(&midc->lock);
|
||||
midc->descs_allocated = 0;
|
||||
list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
|
||||
|
@ -639,6 +825,7 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
|
|||
}
|
||||
spin_unlock_bh(&midc->lock);
|
||||
midc->in_use = false;
|
||||
midc->busy = false;
|
||||
/* Disable CH interrupts */
|
||||
iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK);
|
||||
iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR);
|
||||
|
@ -659,11 +846,20 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
|
|||
dma_addr_t phys;
|
||||
int i = 0;
|
||||
|
||||
pm_runtime_get_sync(&mid->pdev->dev);
|
||||
|
||||
if (mid->state == SUSPENDED) {
|
||||
if (dma_resume(mid->pdev)) {
|
||||
pr_err("ERR_MDMA: resume failed");
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
|
||||
/* ASSERT: channel is idle */
|
||||
if (test_ch_en(mid->dma_base, midc->ch_id)) {
|
||||
/*ch is not idle*/
|
||||
pr_err("ERR_MDMA: ch not idle\n");
|
||||
pm_runtime_put(&mid->pdev->dev);
|
||||
return -EIO;
|
||||
}
|
||||
midc->completed = chan->cookie = 1;
|
||||
|
@ -674,6 +870,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
|
|||
desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys);
|
||||
if (!desc) {
|
||||
pr_err("ERR_MDMA: desc failed\n");
|
||||
pm_runtime_put(&mid->pdev->dev);
|
||||
return -ENOMEM;
|
||||
/*check*/
|
||||
}
|
||||
|
@ -686,7 +883,8 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
|
|||
list_add_tail(&desc->desc_node, &midc->free_list);
|
||||
}
|
||||
spin_unlock_bh(&midc->lock);
|
||||
midc->in_use = false;
|
||||
midc->in_use = true;
|
||||
midc->busy = false;
|
||||
pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i);
|
||||
return i;
|
||||
}
|
||||
|
@ -715,7 +913,7 @@ static void dma_tasklet(unsigned long data)
|
|||
{
|
||||
struct middma_device *mid = NULL;
|
||||
struct intel_mid_dma_chan *midc = NULL;
|
||||
u32 status;
|
||||
u32 status, raw_tfr, raw_block;
|
||||
int i;
|
||||
|
||||
mid = (struct middma_device *)data;
|
||||
|
@ -724,8 +922,9 @@ static void dma_tasklet(unsigned long data)
|
|||
return;
|
||||
}
|
||||
pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id);
|
||||
status = ioread32(mid->dma_base + RAW_TFR);
|
||||
pr_debug("MDMA:RAW_TFR %x\n", status);
|
||||
raw_tfr = ioread32(mid->dma_base + RAW_TFR);
|
||||
raw_block = ioread32(mid->dma_base + RAW_BLOCK);
|
||||
status = raw_tfr | raw_block;
|
||||
status &= mid->intr_mask;
|
||||
while (status) {
|
||||
/*txn interrupt*/
|
||||
|
@ -741,15 +940,23 @@ static void dma_tasklet(unsigned long data)
|
|||
}
|
||||
pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
|
||||
status, midc->ch_id, i);
|
||||
midc->raw_tfr = raw_tfr;
|
||||
midc->raw_block = raw_block;
|
||||
spin_lock_bh(&midc->lock);
|
||||
/*clearing this interrupts first*/
|
||||
iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR);
|
||||
iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_BLOCK);
|
||||
|
||||
spin_lock_bh(&midc->lock);
|
||||
if (raw_block) {
|
||||
iowrite32((1 << midc->ch_id),
|
||||
mid->dma_base + CLEAR_BLOCK);
|
||||
}
|
||||
midc_scan_descriptors(mid, midc);
|
||||
pr_debug("MDMA:Scan of desc... complete, unmasking\n");
|
||||
iowrite32(UNMASK_INTR_REG(midc->ch_id),
|
||||
mid->dma_base + MASK_TFR);
|
||||
if (raw_block) {
|
||||
iowrite32(UNMASK_INTR_REG(midc->ch_id),
|
||||
mid->dma_base + MASK_BLOCK);
|
||||
}
|
||||
spin_unlock_bh(&midc->lock);
|
||||
}
|
||||
|
||||
|
@ -804,9 +1011,14 @@ static void dma_tasklet2(unsigned long data)
|
|||
static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
|
||||
{
|
||||
struct middma_device *mid = data;
|
||||
u32 status;
|
||||
u32 tfr_status, err_status;
|
||||
int call_tasklet = 0;
|
||||
|
||||
tfr_status = ioread32(mid->dma_base + RAW_TFR);
|
||||
err_status = ioread32(mid->dma_base + RAW_ERR);
|
||||
if (!tfr_status && !err_status)
|
||||
return IRQ_NONE;
|
||||
|
||||
/*DMA Interrupt*/
|
||||
pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
|
||||
if (!mid) {
|
||||
|
@ -814,19 +1026,18 @@ static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
status = ioread32(mid->dma_base + RAW_TFR);
|
||||
pr_debug("MDMA: Status %x, Mask %x\n", status, mid->intr_mask);
|
||||
status &= mid->intr_mask;
|
||||
if (status) {
|
||||
pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask);
|
||||
tfr_status &= mid->intr_mask;
|
||||
if (tfr_status) {
|
||||
/*need to disable intr*/
|
||||
iowrite32((status << 8), mid->dma_base + MASK_TFR);
|
||||
pr_debug("MDMA: Calling tasklet %x\n", status);
|
||||
iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR);
|
||||
iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK);
|
||||
pr_debug("MDMA: Calling tasklet %x\n", tfr_status);
|
||||
call_tasklet = 1;
|
||||
}
|
||||
status = ioread32(mid->dma_base + RAW_ERR);
|
||||
status &= mid->intr_mask;
|
||||
if (status) {
|
||||
iowrite32(MASK_INTR_REG(status), mid->dma_base + MASK_ERR);
|
||||
err_status &= mid->intr_mask;
|
||||
if (err_status) {
|
||||
iowrite32(MASK_INTR_REG(err_status), mid->dma_base + MASK_ERR);
|
||||
call_tasklet = 1;
|
||||
}
|
||||
if (call_tasklet)
|
||||
|
@ -856,7 +1067,6 @@ static int mid_setup_dma(struct pci_dev *pdev)
|
|||
{
|
||||
struct middma_device *dma = pci_get_drvdata(pdev);
|
||||
int err, i;
|
||||
unsigned int irq_level;
|
||||
|
||||
/* DMA coherent memory pool for DMA descriptor allocations */
|
||||
dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev,
|
||||
|
@ -884,6 +1094,7 @@ static int mid_setup_dma(struct pci_dev *pdev)
|
|||
pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan);
|
||||
/*init CH structures*/
|
||||
dma->intr_mask = 0;
|
||||
dma->state = RUNNING;
|
||||
for (i = 0; i < dma->max_chan; i++) {
|
||||
struct intel_mid_dma_chan *midch = &dma->ch[i];
|
||||
|
||||
|
@ -943,7 +1154,6 @@ static int mid_setup_dma(struct pci_dev *pdev)
|
|||
|
||||
/*register irq */
|
||||
if (dma->pimr_mask) {
|
||||
irq_level = IRQF_SHARED;
|
||||
pr_debug("MDMA:Requesting irq shared for DMAC1\n");
|
||||
err = request_irq(pdev->irq, intel_mid_dma_interrupt1,
|
||||
IRQF_SHARED, "INTEL_MID_DMAC1", dma);
|
||||
|
@ -951,10 +1161,9 @@ static int mid_setup_dma(struct pci_dev *pdev)
|
|||
goto err_irq;
|
||||
} else {
|
||||
dma->intr_mask = 0x03;
|
||||
irq_level = 0;
|
||||
pr_debug("MDMA:Requesting irq for DMAC2\n");
|
||||
err = request_irq(pdev->irq, intel_mid_dma_interrupt2,
|
||||
0, "INTEL_MID_DMAC2", dma);
|
||||
IRQF_SHARED, "INTEL_MID_DMAC2", dma);
|
||||
if (0 != err)
|
||||
goto err_irq;
|
||||
}
|
||||
|
@ -1070,6 +1279,9 @@ static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
|
|||
if (err)
|
||||
goto err_dma;
|
||||
|
||||
pm_runtime_set_active(&pdev->dev);
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
pm_runtime_allow(&pdev->dev);
|
||||
return 0;
|
||||
|
||||
err_dma:
|
||||
|
@ -1104,6 +1316,85 @@ static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
|
|||
pci_disable_device(pdev);
|
||||
}
|
||||
|
||||
/* Power Management */
|
||||
/*
|
||||
* dma_suspend - PCI suspend function
|
||||
*
|
||||
* @pci: PCI device structure
|
||||
* @state: PM message
|
||||
*
|
||||
* This function is called by OS when a power event occurs
|
||||
*/
|
||||
int dma_suspend(struct pci_dev *pci, pm_message_t state)
|
||||
{
|
||||
int i;
|
||||
struct middma_device *device = pci_get_drvdata(pci);
|
||||
pr_debug("MDMA: dma_suspend called\n");
|
||||
|
||||
for (i = 0; i < device->max_chan; i++) {
|
||||
if (device->ch[i].in_use)
|
||||
return -EAGAIN;
|
||||
}
|
||||
device->state = SUSPENDED;
|
||||
pci_set_drvdata(pci, device);
|
||||
pci_save_state(pci);
|
||||
pci_disable_device(pci);
|
||||
pci_set_power_state(pci, PCI_D3hot);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_resume - PCI resume function
|
||||
*
|
||||
* @pci: PCI device structure
|
||||
*
|
||||
* This function is called by OS when a power event occurs
|
||||
*/
|
||||
int dma_resume(struct pci_dev *pci)
|
||||
{
|
||||
int ret;
|
||||
struct middma_device *device = pci_get_drvdata(pci);
|
||||
|
||||
pr_debug("MDMA: dma_resume called\n");
|
||||
pci_set_power_state(pci, PCI_D0);
|
||||
pci_restore_state(pci);
|
||||
ret = pci_enable_device(pci);
|
||||
if (ret) {
|
||||
pr_err("MDMA: device cant be enabled for %x\n", pci->device);
|
||||
return ret;
|
||||
}
|
||||
device->state = RUNNING;
|
||||
iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
|
||||
pci_set_drvdata(pci, device);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dma_runtime_suspend(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pci_dev = to_pci_dev(dev);
|
||||
return dma_suspend(pci_dev, PMSG_SUSPEND);
|
||||
}
|
||||
|
||||
static int dma_runtime_resume(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pci_dev = to_pci_dev(dev);
|
||||
return dma_resume(pci_dev);
|
||||
}
|
||||
|
||||
static int dma_runtime_idle(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct middma_device *device = pci_get_drvdata(pdev);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < device->max_chan; i++) {
|
||||
if (device->ch[i].in_use)
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
return pm_schedule_suspend(dev, 0);
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
* PCI stuff
|
||||
*/
|
||||
|
@ -1116,11 +1407,24 @@ static struct pci_device_id intel_mid_dma_ids[] = {
|
|||
};
|
||||
MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids);
|
||||
|
||||
static const struct dev_pm_ops intel_mid_dma_pm = {
|
||||
.runtime_suspend = dma_runtime_suspend,
|
||||
.runtime_resume = dma_runtime_resume,
|
||||
.runtime_idle = dma_runtime_idle,
|
||||
};
|
||||
|
||||
static struct pci_driver intel_mid_dma_pci = {
|
||||
.name = "Intel MID DMA",
|
||||
.id_table = intel_mid_dma_ids,
|
||||
.probe = intel_mid_dma_probe,
|
||||
.remove = __devexit_p(intel_mid_dma_remove),
|
||||
#ifdef CONFIG_PM
|
||||
.suspend = dma_suspend,
|
||||
.resume = dma_resume,
|
||||
.driver = {
|
||||
.pm = &intel_mid_dma_pm,
|
||||
},
|
||||
#endif
|
||||
};
|
||||
|
||||
static int __init intel_mid_dma_init(void)
|
||||
|
|
|
@ -29,11 +29,12 @@
|
|||
#include <linux/dmapool.h>
|
||||
#include <linux/pci_ids.h>
|
||||
|
||||
#define INTEL_MID_DMA_DRIVER_VERSION "1.0.5"
|
||||
#define INTEL_MID_DMA_DRIVER_VERSION "1.1.0"
|
||||
|
||||
#define REG_BIT0 0x00000001
|
||||
#define REG_BIT8 0x00000100
|
||||
|
||||
#define INT_MASK_WE 0x8
|
||||
#define CLEAR_DONE 0xFFFFEFFF
|
||||
#define UNMASK_INTR_REG(chan_num) \
|
||||
((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num))
|
||||
#define MASK_INTR_REG(chan_num) (REG_BIT8 << chan_num)
|
||||
|
@ -41,6 +42,9 @@
|
|||
#define ENABLE_CHANNEL(chan_num) \
|
||||
((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num))
|
||||
|
||||
#define DISABLE_CHANNEL(chan_num) \
|
||||
(REG_BIT8 << chan_num)
|
||||
|
||||
#define DESCS_PER_CHANNEL 16
|
||||
/*DMA Registers*/
|
||||
/*registers associated with channel programming*/
|
||||
|
@ -50,6 +54,7 @@
|
|||
/*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/
|
||||
#define SAR 0x00 /* Source Address Register*/
|
||||
#define DAR 0x08 /* Destination Address Register*/
|
||||
#define LLP 0x10 /* Linked List Pointer Register*/
|
||||
#define CTL_LOW 0x18 /* Control Register*/
|
||||
#define CTL_HIGH 0x1C /* Control Register*/
|
||||
#define CFG_LOW 0x40 /* Configuration Register Low*/
|
||||
|
@ -112,8 +117,8 @@ union intel_mid_dma_ctl_lo {
|
|||
union intel_mid_dma_ctl_hi {
|
||||
struct {
|
||||
u32 block_ts:12; /*block transfer size*/
|
||||
/*configured by DMAC*/
|
||||
u32 reser:20;
|
||||
u32 done:1; /*Done - updated by DMAC*/
|
||||
u32 reser:19; /*configured by DMAC*/
|
||||
} ctlx;
|
||||
u32 ctl_hi;
|
||||
|
||||
|
@ -152,6 +157,7 @@ union intel_mid_dma_cfg_hi {
|
|||
u32 cfg_hi;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* struct intel_mid_dma_chan - internal mid representation of a DMA channel
|
||||
* @chan: dma_chan strcture represetation for mid chan
|
||||
|
@ -166,7 +172,10 @@ union intel_mid_dma_cfg_hi {
|
|||
* @slave: dma slave struture
|
||||
* @descs_allocated: total number of decsiptors allocated
|
||||
* @dma: dma device struture pointer
|
||||
* @busy: bool representing if ch is busy (active txn) or not
|
||||
* @in_use: bool representing if ch is in use or not
|
||||
* @raw_tfr: raw trf interrupt recieved
|
||||
* @raw_block: raw block interrupt recieved
|
||||
*/
|
||||
struct intel_mid_dma_chan {
|
||||
struct dma_chan chan;
|
||||
|
@ -178,10 +187,13 @@ struct intel_mid_dma_chan {
|
|||
struct list_head active_list;
|
||||
struct list_head queue;
|
||||
struct list_head free_list;
|
||||
struct intel_mid_dma_slave *slave;
|
||||
unsigned int descs_allocated;
|
||||
struct middma_device *dma;
|
||||
bool busy;
|
||||
bool in_use;
|
||||
u32 raw_tfr;
|
||||
u32 raw_block;
|
||||
struct intel_mid_dma_slave *mid_slave;
|
||||
};
|
||||
|
||||
static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan(
|
||||
|
@ -190,6 +202,10 @@ static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan(
|
|||
return container_of(chan, struct intel_mid_dma_chan, chan);
|
||||
}
|
||||
|
||||
enum intel_mid_dma_state {
|
||||
RUNNING = 0,
|
||||
SUSPENDED,
|
||||
};
|
||||
/**
|
||||
* struct middma_device - internal representation of a DMA device
|
||||
* @pdev: PCI device
|
||||
|
@ -205,6 +221,7 @@ static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan(
|
|||
* @max_chan: max number of chs supported (from drv_data)
|
||||
* @block_size: Block size of DMA transfer supported (from drv_data)
|
||||
* @pimr_mask: MMIO register addr for periphral interrupt (from drv_data)
|
||||
* @state: dma PM device state
|
||||
*/
|
||||
struct middma_device {
|
||||
struct pci_dev *pdev;
|
||||
|
@ -220,6 +237,7 @@ struct middma_device {
|
|||
int max_chan;
|
||||
int block_size;
|
||||
unsigned int pimr_mask;
|
||||
enum intel_mid_dma_state state;
|
||||
};
|
||||
|
||||
static inline struct middma_device *to_middma_device(struct dma_device *common)
|
||||
|
@ -238,14 +256,27 @@ struct intel_mid_dma_desc {
|
|||
u32 cfg_lo;
|
||||
u32 ctl_lo;
|
||||
u32 ctl_hi;
|
||||
struct pci_pool *lli_pool;
|
||||
struct intel_mid_dma_lli *lli;
|
||||
dma_addr_t lli_phys;
|
||||
unsigned int lli_length;
|
||||
unsigned int current_lli;
|
||||
dma_addr_t next;
|
||||
enum dma_data_direction dirn;
|
||||
enum dma_status status;
|
||||
enum intel_mid_dma_width width; /*width of DMA txn*/
|
||||
enum dma_slave_buswidth width; /*width of DMA txn*/
|
||||
enum intel_mid_dma_mode cfg_mode; /*mode configuration*/
|
||||
|
||||
};
|
||||
|
||||
struct intel_mid_dma_lli {
|
||||
dma_addr_t sar;
|
||||
dma_addr_t dar;
|
||||
dma_addr_t llp;
|
||||
u32 ctl_lo;
|
||||
u32 ctl_hi;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
static inline int test_ch_en(void __iomem *dma, u32 ch_no)
|
||||
{
|
||||
u32 en_reg = ioread32(dma + DMA_CHAN_EN);
|
||||
|
@ -257,4 +288,14 @@ static inline struct intel_mid_dma_desc *to_intel_mid_dma_desc
|
|||
{
|
||||
return container_of(txd, struct intel_mid_dma_desc, txd);
|
||||
}
|
||||
|
||||
static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave
|
||||
(struct dma_slave_config *slave)
|
||||
{
|
||||
return container_of(slave, struct intel_mid_dma_slave, dma_slave);
|
||||
}
|
||||
|
||||
|
||||
int dma_resume(struct pci_dev *pci);
|
||||
|
||||
#endif /*__INTEL_MID_DMAC_REGS_H__*/
|
||||
|
|
|
@ -162,7 +162,7 @@ static int mv_is_err_intr(u32 intr_cause)
|
|||
|
||||
static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
|
||||
{
|
||||
u32 val = (1 << (1 + (chan->idx * 16)));
|
||||
u32 val = ~(1 << (chan->idx * 16));
|
||||
dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
|
||||
__raw_writel(val, XOR_INTR_CAUSE(chan));
|
||||
}
|
||||
|
|
|
@ -580,7 +580,6 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
|
|||
|
||||
sh_chan = to_sh_chan(chan);
|
||||
param = chan->private;
|
||||
slave_addr = param->config->addr;
|
||||
|
||||
/* Someone calling slave DMA on a public channel? */
|
||||
if (!param || !sg_len) {
|
||||
|
@ -589,6 +588,8 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
|
|||
return NULL;
|
||||
}
|
||||
|
||||
slave_addr = param->config->addr;
|
||||
|
||||
/*
|
||||
* if (param != NULL), this is a successfully requested slave channel,
|
||||
* therefore param->config != NULL too.
|
||||
|
|
|
@ -1903,6 +1903,18 @@ err:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *
|
||||
d40_prep_sg(struct dma_chan *chan,
|
||||
struct scatterlist *dst_sg, unsigned int dst_nents,
|
||||
struct scatterlist *src_sg, unsigned int src_nents,
|
||||
unsigned long dma_flags)
|
||||
{
|
||||
if (dst_nents != src_nents)
|
||||
return NULL;
|
||||
|
||||
return stedma40_memcpy_sg(chan, dst_sg, src_sg, dst_nents, dma_flags);
|
||||
}
|
||||
|
||||
static int d40_prep_slave_sg_log(struct d40_desc *d40d,
|
||||
struct d40_chan *d40c,
|
||||
struct scatterlist *sgl,
|
||||
|
@ -2325,6 +2337,7 @@ static int __init d40_dmaengine_init(struct d40_base *base,
|
|||
base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
|
||||
base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
|
||||
base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
|
||||
base->dma_slave.device_prep_dma_sg = d40_prep_sg;
|
||||
base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
|
||||
base->dma_slave.device_tx_status = d40_tx_status;
|
||||
base->dma_slave.device_issue_pending = d40_issue_pending;
|
||||
|
@ -2345,10 +2358,12 @@ static int __init d40_dmaengine_init(struct d40_base *base,
|
|||
|
||||
dma_cap_zero(base->dma_memcpy.cap_mask);
|
||||
dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
|
||||
dma_cap_set(DMA_SG, base->dma_slave.cap_mask);
|
||||
|
||||
base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
|
||||
base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
|
||||
base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
|
||||
base->dma_slave.device_prep_dma_sg = d40_prep_sg;
|
||||
base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
|
||||
base->dma_memcpy.device_tx_status = d40_tx_status;
|
||||
base->dma_memcpy.device_issue_pending = d40_issue_pending;
|
||||
|
@ -2375,10 +2390,12 @@ static int __init d40_dmaengine_init(struct d40_base *base,
|
|||
dma_cap_zero(base->dma_both.cap_mask);
|
||||
dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
|
||||
dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
|
||||
dma_cap_set(DMA_SG, base->dma_slave.cap_mask);
|
||||
|
||||
base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
|
||||
base->dma_both.device_free_chan_resources = d40_free_chan_resources;
|
||||
base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
|
||||
base->dma_slave.device_prep_dma_sg = d40_prep_sg;
|
||||
base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
|
||||
base->dma_both.device_tx_status = d40_tx_status;
|
||||
base->dma_both.device_issue_pending = d40_issue_pending;
|
||||
|
|
222
include/linux/amba/pl08x.h
Normal file
222
include/linux/amba/pl08x.h
Normal file
|
@ -0,0 +1,222 @@
|
|||
/*
|
||||
* linux/amba/pl08x.h - ARM PrimeCell DMA Controller driver
|
||||
*
|
||||
* Copyright (C) 2005 ARM Ltd
|
||||
* Copyright (C) 2010 ST-Ericsson SA
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* pl08x information required by platform code
|
||||
*
|
||||
* Please credit ARM.com
|
||||
* Documentation: ARM DDI 0196D
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef AMBA_PL08X_H
|
||||
#define AMBA_PL08X_H
|
||||
|
||||
/* We need sizes of structs from this header */
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
/**
|
||||
* struct pl08x_channel_data - data structure to pass info between
|
||||
* platform and PL08x driver regarding channel configuration
|
||||
* @bus_id: name of this device channel, not just a device name since
|
||||
* devices may have more than one channel e.g. "foo_tx"
|
||||
* @min_signal: the minimum DMA signal number to be muxed in for this
|
||||
* channel (for platforms supporting muxed signals). If you have
|
||||
* static assignments, make sure this is set to the assigned signal
|
||||
* number, PL08x have 16 possible signals in number 0 thru 15 so
|
||||
* when these are not enough they often get muxed (in hardware)
|
||||
* disabling simultaneous use of the same channel for two devices.
|
||||
* @max_signal: the maximum DMA signal number to be muxed in for
|
||||
* the channel. Set to the same as min_signal for
|
||||
* devices with static assignments
|
||||
* @muxval: a number usually used to poke into some mux regiser to
|
||||
* mux in the signal to this channel
|
||||
* @cctl_opt: default options for the channel control register
|
||||
* @addr: source/target address in physical memory for this DMA channel,
|
||||
* can be the address of a FIFO register for burst requests for example.
|
||||
* This can be left undefined if the PrimeCell API is used for configuring
|
||||
* this.
|
||||
* @circular_buffer: whether the buffer passed in is circular and
|
||||
* shall simply be looped round round (like a record baby round
|
||||
* round round round)
|
||||
* @single: the device connected to this channel will request single
|
||||
* DMA transfers, not bursts. (Bursts are default.)
|
||||
*/
|
||||
struct pl08x_channel_data {
|
||||
char *bus_id;
|
||||
int min_signal;
|
||||
int max_signal;
|
||||
u32 muxval;
|
||||
u32 cctl;
|
||||
u32 ccfg;
|
||||
dma_addr_t addr;
|
||||
bool circular_buffer;
|
||||
bool single;
|
||||
};
|
||||
|
||||
/**
|
||||
* Struct pl08x_bus_data - information of source or destination
|
||||
* busses for a transfer
|
||||
* @addr: current address
|
||||
* @maxwidth: the maximum width of a transfer on this bus
|
||||
* @buswidth: the width of this bus in bytes: 1, 2 or 4
|
||||
* @fill_bytes: bytes required to fill to the next bus memory
|
||||
* boundary
|
||||
*/
|
||||
struct pl08x_bus_data {
|
||||
dma_addr_t addr;
|
||||
u8 maxwidth;
|
||||
u8 buswidth;
|
||||
u32 fill_bytes;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct pl08x_phy_chan - holder for the physical channels
|
||||
* @id: physical index to this channel
|
||||
* @lock: a lock to use when altering an instance of this struct
|
||||
* @signal: the physical signal (aka channel) serving this
|
||||
* physical channel right now
|
||||
* @serving: the virtual channel currently being served by this
|
||||
* physical channel
|
||||
*/
|
||||
struct pl08x_phy_chan {
|
||||
unsigned int id;
|
||||
void __iomem *base;
|
||||
spinlock_t lock;
|
||||
int signal;
|
||||
struct pl08x_dma_chan *serving;
|
||||
u32 csrc;
|
||||
u32 cdst;
|
||||
u32 clli;
|
||||
u32 cctl;
|
||||
u32 ccfg;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
|
||||
* @llis_bus: DMA memory address (physical) start for the LLIs
|
||||
* @llis_va: virtual memory address start for the LLIs
|
||||
*/
|
||||
struct pl08x_txd {
|
||||
struct dma_async_tx_descriptor tx;
|
||||
struct list_head node;
|
||||
enum dma_data_direction direction;
|
||||
struct pl08x_bus_data srcbus;
|
||||
struct pl08x_bus_data dstbus;
|
||||
int len;
|
||||
dma_addr_t llis_bus;
|
||||
void *llis_va;
|
||||
struct pl08x_channel_data *cd;
|
||||
bool active;
|
||||
/*
|
||||
* Settings to be put into the physical channel when we
|
||||
* trigger this txd
|
||||
*/
|
||||
u32 csrc;
|
||||
u32 cdst;
|
||||
u32 clli;
|
||||
u32 cctl;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct pl08x_dma_chan_state - holds the PL08x specific virtual
|
||||
* channel states
|
||||
* @PL08X_CHAN_IDLE: the channel is idle
|
||||
* @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
|
||||
* channel and is running a transfer on it
|
||||
* @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
|
||||
* channel, but the transfer is currently paused
|
||||
* @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
|
||||
* channel to become available (only pertains to memcpy channels)
|
||||
*/
|
||||
enum pl08x_dma_chan_state {
|
||||
PL08X_CHAN_IDLE,
|
||||
PL08X_CHAN_RUNNING,
|
||||
PL08X_CHAN_PAUSED,
|
||||
PL08X_CHAN_WAITING,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
|
||||
* @chan: wrappped abstract channel
|
||||
* @phychan: the physical channel utilized by this channel, if there is one
|
||||
* @tasklet: tasklet scheduled by the IRQ to handle actual work etc
|
||||
* @name: name of channel
|
||||
* @cd: channel platform data
|
||||
* @runtime_addr: address for RX/TX according to the runtime config
|
||||
* @runtime_direction: current direction of this channel according to
|
||||
* runtime config
|
||||
* @lc: last completed transaction on this channel
|
||||
* @desc_list: queued transactions pending on this channel
|
||||
* @at: active transaction on this channel
|
||||
* @lockflags: sometimes we let a lock last between two function calls,
|
||||
* especially prep/submit, and then we need to store the IRQ flags
|
||||
* in the channel state, here
|
||||
* @lock: a lock for this channel data
|
||||
* @host: a pointer to the host (internal use)
|
||||
* @state: whether the channel is idle, paused, running etc
|
||||
* @slave: whether this channel is a device (slave) or for memcpy
|
||||
* @waiting: a TX descriptor on this channel which is waiting for
|
||||
* a physical channel to become available
|
||||
*/
|
||||
struct pl08x_dma_chan {
|
||||
struct dma_chan chan;
|
||||
struct pl08x_phy_chan *phychan;
|
||||
struct tasklet_struct tasklet;
|
||||
char *name;
|
||||
struct pl08x_channel_data *cd;
|
||||
dma_addr_t runtime_addr;
|
||||
enum dma_data_direction runtime_direction;
|
||||
atomic_t last_issued;
|
||||
dma_cookie_t lc;
|
||||
struct list_head desc_list;
|
||||
struct pl08x_txd *at;
|
||||
unsigned long lockflags;
|
||||
spinlock_t lock;
|
||||
void *host;
|
||||
enum pl08x_dma_chan_state state;
|
||||
bool slave;
|
||||
struct pl08x_txd *waiting;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct pl08x_platform_data - the platform configuration for the
|
||||
* PL08x PrimeCells.
|
||||
* @slave_channels: the channels defined for the different devices on the
|
||||
* platform, all inclusive, including multiplexed channels. The available
|
||||
* physical channels will be multiplexed around these signals as they
|
||||
* are requested, just enumerate all possible channels.
|
||||
* @get_signal: request a physical signal to be used for a DMA
|
||||
* transfer immediately: if there is some multiplexing or similar blocking
|
||||
* the use of the channel the transfer can be denied by returning
|
||||
* less than zero, else it returns the allocated signal number
|
||||
* @put_signal: indicate to the platform that this physical signal is not
|
||||
* running any DMA transfer and multiplexing can be recycled
|
||||
* @bus_bit_lli: Bit[0] of the address indicated which AHB bus master the
|
||||
* LLI addresses are on 0/1 Master 1/2.
|
||||
*/
|
||||
struct pl08x_platform_data {
|
||||
struct pl08x_channel_data *slave_channels;
|
||||
unsigned int num_slave_channels;
|
||||
struct pl08x_channel_data memcpy_channel;
|
||||
int (*get_signal)(struct pl08x_dma_chan *);
|
||||
void (*put_signal)(struct pl08x_dma_chan *);
|
||||
};
|
||||
|
||||
#ifdef CONFIG_AMBA_PL08X
|
||||
bool pl08x_filter_id(struct dma_chan *chan, void *chan_id);
|
||||
#else
|
||||
static inline bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* AMBA_PL08X_H */
|
|
@ -64,13 +64,15 @@ enum dma_transaction_type {
|
|||
DMA_PQ_VAL,
|
||||
DMA_MEMSET,
|
||||
DMA_INTERRUPT,
|
||||
DMA_SG,
|
||||
DMA_PRIVATE,
|
||||
DMA_ASYNC_TX,
|
||||
DMA_SLAVE,
|
||||
DMA_CYCLIC,
|
||||
};
|
||||
|
||||
/* last transaction type for creation of the capabilities mask */
|
||||
#define DMA_TX_TYPE_END (DMA_SLAVE + 1)
|
||||
#define DMA_TX_TYPE_END (DMA_CYCLIC + 1)
|
||||
|
||||
|
||||
/**
|
||||
|
@ -119,12 +121,15 @@ enum dma_ctrl_flags {
|
|||
* configuration data in statically from the platform). An additional
|
||||
* argument of struct dma_slave_config must be passed in with this
|
||||
* command.
|
||||
* @FSLDMA_EXTERNAL_START: this command will put the Freescale DMA controller
|
||||
* into external start mode.
|
||||
*/
|
||||
enum dma_ctrl_cmd {
|
||||
DMA_TERMINATE_ALL,
|
||||
DMA_PAUSE,
|
||||
DMA_RESUME,
|
||||
DMA_SLAVE_CONFIG,
|
||||
FSLDMA_EXTERNAL_START,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -422,6 +427,9 @@ struct dma_tx_state {
|
|||
* @device_prep_dma_memset: prepares a memset operation
|
||||
* @device_prep_dma_interrupt: prepares an end of chain interrupt operation
|
||||
* @device_prep_slave_sg: prepares a slave dma operation
|
||||
* @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
|
||||
* The function takes a buffer of size buf_len. The callback function will
|
||||
* be called after period_len bytes have been transferred.
|
||||
* @device_control: manipulate all pending operations on a channel, returns
|
||||
* zero or error code
|
||||
* @device_tx_status: poll for transaction completion, the optional
|
||||
|
@ -473,11 +481,19 @@ struct dma_device {
|
|||
unsigned long flags);
|
||||
struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
|
||||
struct dma_chan *chan, unsigned long flags);
|
||||
struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
|
||||
struct dma_chan *chan,
|
||||
struct scatterlist *dst_sg, unsigned int dst_nents,
|
||||
struct scatterlist *src_sg, unsigned int src_nents,
|
||||
unsigned long flags);
|
||||
|
||||
struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
|
||||
struct dma_chan *chan, struct scatterlist *sgl,
|
||||
unsigned int sg_len, enum dma_data_direction direction,
|
||||
unsigned long flags);
|
||||
struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
|
||||
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
|
||||
size_t period_len, enum dma_data_direction direction);
|
||||
int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
||||
unsigned long arg);
|
||||
|
||||
|
@ -487,6 +503,40 @@ struct dma_device {
|
|||
void (*device_issue_pending)(struct dma_chan *chan);
|
||||
};
|
||||
|
||||
static inline int dmaengine_device_control(struct dma_chan *chan,
|
||||
enum dma_ctrl_cmd cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
return chan->device->device_control(chan, cmd, arg);
|
||||
}
|
||||
|
||||
static inline int dmaengine_slave_config(struct dma_chan *chan,
|
||||
struct dma_slave_config *config)
|
||||
{
|
||||
return dmaengine_device_control(chan, DMA_SLAVE_CONFIG,
|
||||
(unsigned long)config);
|
||||
}
|
||||
|
||||
static inline int dmaengine_terminate_all(struct dma_chan *chan)
|
||||
{
|
||||
return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
|
||||
}
|
||||
|
||||
static inline int dmaengine_pause(struct dma_chan *chan)
|
||||
{
|
||||
return dmaengine_device_control(chan, DMA_PAUSE, 0);
|
||||
}
|
||||
|
||||
static inline int dmaengine_resume(struct dma_chan *chan)
|
||||
{
|
||||
return dmaengine_device_control(chan, DMA_RESUME, 0);
|
||||
}
|
||||
|
||||
static inline int dmaengine_submit(struct dma_async_tx_descriptor *desc)
|
||||
{
|
||||
return desc->tx_submit(desc);
|
||||
}
|
||||
|
||||
static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
|
||||
{
|
||||
size_t mask;
|
||||
|
@ -548,7 +598,7 @@ static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
|
|||
return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
|
||||
}
|
||||
|
||||
static unsigned short dma_dev_to_maxpq(struct dma_device *dma)
|
||||
static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
|
||||
{
|
||||
return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
|
||||
}
|
||||
|
|
|
@ -27,14 +27,7 @@
|
|||
|
||||
#include <linux/dmaengine.h>
|
||||
|
||||
/*DMA transaction width, src and dstn width would be same
|
||||
The DMA length must be width aligned,
|
||||
for 32 bit width the length must be 32 bit (4bytes) aligned only*/
|
||||
enum intel_mid_dma_width {
|
||||
LNW_DMA_WIDTH_8BIT = 0x0,
|
||||
LNW_DMA_WIDTH_16BIT = 0x1,
|
||||
LNW_DMA_WIDTH_32BIT = 0x2,
|
||||
};
|
||||
#define DMA_PREP_CIRCULAR_LIST (1 << 10)
|
||||
|
||||
/*DMA mode configurations*/
|
||||
enum intel_mid_dma_mode {
|
||||
|
@ -69,18 +62,15 @@ enum intel_mid_dma_msize {
|
|||
* @cfg_mode: DMA data transfer mode (per-per/mem-per/mem-mem)
|
||||
* @src_msize: Source DMA burst size
|
||||
* @dst_msize: Dst DMA burst size
|
||||
* @per_addr: Periphral address
|
||||
* @device_instance: DMA peripheral device instance, we can have multiple
|
||||
* peripheral device connected to single DMAC
|
||||
*/
|
||||
struct intel_mid_dma_slave {
|
||||
enum dma_data_direction dirn;
|
||||
enum intel_mid_dma_width src_width; /*width of DMA src txn*/
|
||||
enum intel_mid_dma_width dst_width; /*width of DMA dst txn*/
|
||||
enum intel_mid_dma_hs_mode hs_mode; /*handshaking*/
|
||||
enum intel_mid_dma_mode cfg_mode; /*mode configuration*/
|
||||
enum intel_mid_dma_msize src_msize; /*size if src burst*/
|
||||
enum intel_mid_dma_msize dst_msize; /*size of dst burst*/
|
||||
unsigned int device_instance; /*0, 1 for periphral instance*/
|
||||
struct dma_slave_config dma_slave;
|
||||
};
|
||||
|
||||
#endif /*__INTEL_MID_DMA_H__*/
|
||||
|
|
Loading…
Reference in a new issue