131 lines
4.1 KiB
Diff
131 lines
4.1 KiB
Diff
|
From caf38f4702a75c7ba13d5d80d902812c5faa8501 Mon Sep 17 00:00:00 2001
|
||
|
From: Joel Fernandes <joelf@ti.com>
|
||
|
Date: Thu, 27 Jun 2013 20:18:52 -0500
|
||
|
Subject: [PATCH 10/13] DMA: EDMA: Add support for Cyclic DMA
|
||
|
|
||
|
Using the PaRAM configuration function that we split for reuse by the
|
||
|
different DMA types, we implement Cyclic DMA support.
|
||
|
For the cyclic case, we pass different configuration paramters to this
|
||
|
function, and add all the Cyclic-specific code separately.
|
||
|
Callbacks are handled transparently as usual by the virt-dma layer.
|
||
|
Linking is handled the same way as the slave SG case.
|
||
|
|
||
|
Signed-off-by: Joel Fernandes <joelf@ti.com>
|
||
|
---
|
||
|
drivers/dma/edma.c | 85 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||
|
1 file changed, 85 insertions(+)
|
||
|
|
||
|
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
|
||
|
index 87b7e2b..cec9a12 100644
|
||
|
--- a/drivers/dma/edma.c
|
||
|
+++ b/drivers/dma/edma.c
|
||
|
@@ -321,6 +321,88 @@ static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset,
|
||
|
return absync;
|
||
|
}
|
||
|
|
||
|
+static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
|
||
|
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
|
||
|
+ size_t period_len, enum dma_transfer_direction direction, unsigned long flags,
|
||
|
+ void *context)
|
||
|
+{
|
||
|
+ struct edma_chan *echan = to_edma_chan(chan);
|
||
|
+ struct device *dev = chan->device->dev;
|
||
|
+ struct edma_desc *edesc;
|
||
|
+ dma_addr_t src_addr, dst_addr;
|
||
|
+ enum dma_slave_buswidth dev_width;
|
||
|
+ u32 burst;
|
||
|
+ int i, ret, nr_periods;
|
||
|
+
|
||
|
+ if (unlikely(!echan || !buf_len || !period_len))
|
||
|
+ return NULL;
|
||
|
+
|
||
|
+ if (direction == DMA_DEV_TO_MEM) {
|
||
|
+ src_addr = echan->cfg.src_addr;
|
||
|
+ dst_addr = buf_addr;
|
||
|
+ dev_width = echan->cfg.src_addr_width;
|
||
|
+ burst = echan->cfg.src_maxburst;
|
||
|
+ } else if (direction == DMA_MEM_TO_DEV) {
|
||
|
+ src_addr = buf_addr;
|
||
|
+ dst_addr = echan->cfg.dst_addr;
|
||
|
+ dev_width = echan->cfg.dst_addr_width;
|
||
|
+ burst = echan->cfg.dst_maxburst;
|
||
|
+ } else {
|
||
|
+ dev_err(dev, "%s: bad direction?\n", __func__);
|
||
|
+ return NULL;
|
||
|
+ }
|
||
|
+
|
||
|
+ if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
|
||
|
+ dev_err(dev, "Undefined slave buswidth\n");
|
||
|
+ return NULL;
|
||
|
+ }
|
||
|
+
|
||
|
+ if(unlikely(period_len % buf_len)) {
|
||
|
+ dev_err(dev, "Period should be multiple of Buf length\n");
|
||
|
+ return NULL;
|
||
|
+ }
|
||
|
+
|
||
|
+ nr_periods = period_len / buf_len;
|
||
|
+
|
||
|
+ edesc = kzalloc(sizeof(*edesc) + nr_periods *
|
||
|
+ sizeof(edesc->pset[0]), GFP_ATOMIC);
|
||
|
+ if (!edesc) {
|
||
|
+ dev_dbg(dev, "Failed to allocate a descriptor\n");
|
||
|
+ return NULL;
|
||
|
+ }
|
||
|
+
|
||
|
+ edesc->pset_nr = nr_periods;
|
||
|
+
|
||
|
+ for(i = 0; i < nr_periods; i++) {
|
||
|
+ /* Allocate a PaRAM slot, if needed */
|
||
|
+ if (echan->slot[i] < 0) {
|
||
|
+ echan->slot[i] =
|
||
|
+ edma_alloc_slot(EDMA_CTLR(echan->ch_num),
|
||
|
+ EDMA_SLOT_ANY);
|
||
|
+ if (echan->slot[i] < 0) {
|
||
|
+ dev_err(dev, "Failed to allocate slot\n");
|
||
|
+ return NULL;
|
||
|
+ }
|
||
|
+ }
|
||
|
+
|
||
|
+ if (direction == DMA_DEV_TO_MEM)
|
||
|
+ dst_addr += period_len;
|
||
|
+ else
|
||
|
+ src_addr += period_len;
|
||
|
+
|
||
|
+ ret = edma_config_pset(chan, &edesc->pset[i], src_addr, dst_addr,
|
||
|
+ burst, dev_width, period_len, direction);
|
||
|
+ if(ret < 0)
|
||
|
+ return NULL;
|
||
|
+
|
||
|
+ edesc->absync = ret;
|
||
|
+ if (i == nr_periods - 1)
|
||
|
+ edesc->pset[i].opt |= TCINTEN;
|
||
|
+ }
|
||
|
+ /* TODO tx_flags (last parameter) needs to be investigated...\n" */
|
||
|
+ return vchan_tx_prep(&echan->vchan, &edesc->vdesc, 0);
|
||
|
+}
|
||
|
+
|
||
|
static struct dma_async_tx_descriptor *edma_prep_slave_sg(
|
||
|
struct dma_chan *chan, struct scatterlist *sgl,
|
||
|
unsigned int sg_len, enum dma_transfer_direction direction,
|
||
|
@@ -424,6 +506,8 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
|
||
|
edesc = echan->edesc;
|
||
|
if (edesc) {
|
||
|
edma_execute(echan);
|
||
|
+ /* Note: that desc->callback must be setup by EDMA users so that
|
||
|
+ the virt-dma layer calls their callback on vchan_cookie_complete() */
|
||
|
vchan_cookie_complete(&edesc->vdesc);
|
||
|
}
|
||
|
|
||
|
@@ -605,6 +689,7 @@ static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
|
||
|
struct device *dev)
|
||
|
{
|
||
|
dma->device_prep_slave_sg = edma_prep_slave_sg;
|
||
|
+ dma->device_prep_dma_cyclic = edma_prep_dma_cyclic;
|
||
|
dma->device_alloc_chan_resources = edma_alloc_chan_resources;
|
||
|
dma->device_free_chan_resources = edma_free_chan_resources;
|
||
|
dma->device_issue_pending = edma_issue_pending;
|
||
|
--
|
||
|
1.8.2.1
|
||
|
|