272 lines
8.3 KiB
Diff
272 lines
8.3 KiB
Diff
From 14c120151d8ae2e335dd8728c88d6cd1a52863c8 Mon Sep 17 00:00:00 2001
|
|
From: Joel Fernandes <joelf@ti.com>
|
|
Date: Tue, 25 Jun 2013 09:35:33 -0500
|
|
Subject: [PATCH 09/13] DMA: EDMA: Split out PaRAM set calculations into its
|
|
own function
|
|
|
|
PaRAM set calculation is abstracted into its own function to
|
|
enable better reuse for other DMA cases. Currently it only
|
|
implements the Slave case.
|
|
|
|
This provides a much cleaner abstraction to the internals of the
|
|
PaRAM set. However, any PaRAM attributes that are not common to
|
|
all DMA types must be set separately. This function only calculates
|
|
the most-common attributes.
|
|
|
|
Also added comments clarifying A-sync case calculations.
|
|
|
|
Signed-off-by: Joel Fernandes <joelf@ti.com>
|
|
---
|
|
drivers/dma/edma.c | 197 ++++++++++++++++++++++++++++++++++-------------------
|
|
1 file changed, 126 insertions(+), 71 deletions(-)
|
|
|
|
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
|
|
index e008ed2..87b7e2b 100644
|
|
--- a/drivers/dma/edma.c
|
|
+++ b/drivers/dma/edma.c
|
|
@@ -211,6 +211,116 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
|
return ret;
|
|
}
|
|
|
|
+/*
|
|
+ * A clean implementation of a PaRAM set configuration abstraction
|
|
+ * @chan: Channel who's PaRAM set we're configuring
|
|
+ * @src_addr: Source address of the DMA
|
|
+ * @dst_addr: Destination address of the DMA
|
|
+ * @burst: In units of dev_width, how much to send
|
|
+ * @dev_width: How much is the dev_width
|
|
+ * @dma_length: Total length of the DMA transfer
|
|
+ * @direction: Direction of the transfer
|
|
+ */
|
|
+static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset,
|
|
+ dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
|
|
+ enum dma_slave_buswidth dev_width, unsigned int dma_length,
|
|
+ enum dma_transfer_direction direction)
|
|
+{
|
|
+ struct edma_chan *echan = to_edma_chan(chan);
|
|
+ struct device *dev = chan->device->dev;
|
|
+ int acnt, bcnt, ccnt, cidx;
|
|
+ int src_bidx, dst_bidx, src_cidx, dst_cidx;
|
|
+ int absync;
|
|
+
|
|
+ acnt = dev_width;
|
|
+ /*
|
|
+ * If the maxburst is equal to the fifo width, use
|
|
+ * A-synced transfers. This allows for large contiguous
|
|
+ * buffer transfers using only one PaRAM set.
|
|
+ */
|
|
+ if (burst == 1) {
|
|
+ absync = false;
|
|
+ /*
|
|
+ * For the A-sync case, bcnt and ccnt are the remainder
|
|
+ * and quotient respectively of the division of:
|
|
+ * (dma_length / acnt) by (SZ_64K -1). This is so
|
|
+ * that in case bcnt over flows, we have ccnt to use.
|
|
+ * Note: In A-sync tranfer only, bcntrld is used, but it
|
|
+ * only applies for sg_dma_len(sg) >= SZ_64K.
|
|
+ * In this case, the best way adopted is- bccnt for the
|
|
+ * first frame will be the remainder below. Then for
|
|
+ * every successive frame, bcnt will be SZ_64K-1. This
|
|
+ * is assured as bcntrld = 0xffff in end of function.
|
|
+ */
|
|
+ ccnt = dma_length / acnt / (SZ_64K - 1);
|
|
+ bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
|
|
+ /*
|
|
+ * If bcnt is non-zero, we have a remainder and hence an
|
|
+ * extra frame to transfer, so increment ccnt.
|
|
+ */
|
|
+ if (bcnt)
|
|
+ ccnt++;
|
|
+ else
|
|
+ bcnt = SZ_64K - 1;
|
|
+ cidx = acnt;
|
|
+ /*
|
|
+ * If maxburst is greater than the fifo address_width,
|
|
+ * use AB-synced transfers where A count is the fifo
|
|
+ * address_width and B count is the maxburst. In this
|
|
+ * case, we are limited to transfers of C count frames
|
|
+ * of (address_width * maxburst) where C count is limited
|
|
+ * to SZ_64K-1. This places an upper bound on the length
|
|
+ * of an SG segment that can be handled.
|
|
+ */
|
|
+ } else {
|
|
+ absync = true;
|
|
+ bcnt = burst;
|
|
+ ccnt = dma_length / (acnt * bcnt);
|
|
+ if (ccnt > (SZ_64K - 1)) {
|
|
+ dev_err(dev, "Exceeded max SG segment size\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ cidx = acnt * bcnt;
|
|
+ }
|
|
+
|
|
+ if (direction == DMA_MEM_TO_DEV) {
|
|
+ src_bidx = acnt;
|
|
+ src_cidx = cidx;
|
|
+ dst_bidx = 0;
|
|
+ dst_cidx = 0;
|
|
+ } else if (direction == DMA_DEV_TO_MEM) {
|
|
+ src_bidx = 0;
|
|
+ src_cidx = 0;
|
|
+ dst_bidx = acnt;
|
|
+ dst_cidx = cidx;
|
|
+ } else {
|
|
+ dev_err(dev, "%s: direction not implemented yet\n", __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
|
|
+ /* Configure A or AB synchronized transfers */
|
|
+ if (absync)
|
|
+ pset->opt |= SYNCDIM;
|
|
+
|
|
+ pset->src = src_addr;
|
|
+ pset->dst = dst_addr;
|
|
+
|
|
+ pset->src_dst_bidx = (dst_bidx << 16) | src_bidx;
|
|
+ pset->src_dst_cidx = (dst_cidx << 16) | src_cidx;
|
|
+
|
|
+ pset->a_b_cnt = bcnt << 16 | acnt;
|
|
+ pset->ccnt = ccnt;
|
|
+ /*
|
|
+ * Only time when (bcntrld) auto reload is required is for
|
|
+ * A-sync case, and in this case, a requirement of reload value
|
|
+ * of SZ_64K-1 only is assured. 'link' is initially set to NULL
|
|
+ * and then later will be populated by edma_execute.
|
|
+ */
|
|
+ pset->link_bcntrld = 0xffffffff;
|
|
+ return absync;
|
|
+}
|
|
+
|
|
static struct dma_async_tx_descriptor *edma_prep_slave_sg(
|
|
struct dma_chan *chan, struct scatterlist *sgl,
|
|
unsigned int sg_len, enum dma_transfer_direction direction,
|
|
@@ -219,23 +329,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
|
|
struct edma_chan *echan = to_edma_chan(chan);
|
|
struct device *dev = chan->device->dev;
|
|
struct edma_desc *edesc;
|
|
- dma_addr_t dev_addr;
|
|
+ dma_addr_t src_addr = 0, dst_addr = 0;
|
|
enum dma_slave_buswidth dev_width;
|
|
u32 burst;
|
|
struct scatterlist *sg;
|
|
- int i;
|
|
- int acnt, bcnt, ccnt, src, dst, cidx;
|
|
- int src_bidx, dst_bidx, src_cidx, dst_cidx;
|
|
+ int i, ret;
|
|
|
|
if (unlikely(!echan || !sgl || !sg_len))
|
|
return NULL;
|
|
|
|
if (direction == DMA_DEV_TO_MEM) {
|
|
- dev_addr = echan->cfg.src_addr;
|
|
+ src_addr = echan->cfg.src_addr;
|
|
dev_width = echan->cfg.src_addr_width;
|
|
burst = echan->cfg.src_maxburst;
|
|
} else if (direction == DMA_MEM_TO_DEV) {
|
|
- dev_addr = echan->cfg.dst_addr;
|
|
+ dst_addr = echan->cfg.dst_addr;
|
|
dev_width = echan->cfg.dst_addr_width;
|
|
burst = echan->cfg.dst_maxburst;
|
|
} else {
|
|
@@ -263,7 +371,14 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
|
|
|
|
edesc->pset_nr = sg_len;
|
|
|
|
+ /* Configure PaRAM sets for each SG */
|
|
for_each_sg(sgl, sg, sg_len, i) {
|
|
+ /* Get address for each SG */
|
|
+ if (direction == DMA_DEV_TO_MEM)
|
|
+ dst_addr = sg_dma_address(sg);
|
|
+ else
|
|
+ src_addr = sg_dma_address(sg);
|
|
+
|
|
/* Allocate a PaRAM slot, if needed */
|
|
if (echan->slot[i] < 0) {
|
|
echan->slot[i] =
|
|
@@ -275,76 +390,16 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
|
|
}
|
|
}
|
|
|
|
- acnt = dev_width;
|
|
+ ret = edma_config_pset(chan, &edesc->pset[i], src_addr, dst_addr,
|
|
+ burst, dev_width, sg_dma_len(sg), direction);
|
|
+ if(ret < 0)
|
|
+ return NULL;
|
|
|
|
- /*
|
|
- * If the maxburst is equal to the fifo width, use
|
|
- * A-synced transfers. This allows for large contiguous
|
|
- * buffer transfers using only one PaRAM set.
|
|
- */
|
|
- if (burst == 1) {
|
|
- edesc->absync = false;
|
|
- ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1);
|
|
- bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1);
|
|
- if (bcnt)
|
|
- ccnt++;
|
|
- else
|
|
- bcnt = SZ_64K - 1;
|
|
- cidx = acnt;
|
|
- /*
|
|
- * If maxburst is greater than the fifo address_width,
|
|
- * use AB-synced transfers where A count is the fifo
|
|
- * address_width and B count is the maxburst. In this
|
|
- * case, we are limited to transfers of C count frames
|
|
- * of (address_width * maxburst) where C count is limited
|
|
- * to SZ_64K-1. This places an upper bound on the length
|
|
- * of an SG segment that can be handled.
|
|
- */
|
|
- } else {
|
|
- edesc->absync = true;
|
|
- bcnt = burst;
|
|
- ccnt = sg_dma_len(sg) / (acnt * bcnt);
|
|
- if (ccnt > (SZ_64K - 1)) {
|
|
- dev_err(dev, "Exceeded max SG segment size\n");
|
|
- return NULL;
|
|
- }
|
|
- cidx = acnt * bcnt;
|
|
- }
|
|
+ edesc->absync = ret;
|
|
|
|
- if (direction == DMA_MEM_TO_DEV) {
|
|
- src = sg_dma_address(sg);
|
|
- dst = dev_addr;
|
|
- src_bidx = acnt;
|
|
- src_cidx = cidx;
|
|
- dst_bidx = 0;
|
|
- dst_cidx = 0;
|
|
- } else {
|
|
- src = dev_addr;
|
|
- dst = sg_dma_address(sg);
|
|
- src_bidx = 0;
|
|
- src_cidx = 0;
|
|
- dst_bidx = acnt;
|
|
- dst_cidx = cidx;
|
|
- }
|
|
-
|
|
- edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
|
|
- /* Configure A or AB synchronized transfers */
|
|
- if (edesc->absync)
|
|
- edesc->pset[i].opt |= SYNCDIM;
|
|
/* If this is the last set, enable completion interrupt flag */
|
|
if (i == sg_len - 1)
|
|
edesc->pset[i].opt |= TCINTEN;
|
|
-
|
|
- edesc->pset[i].src = src;
|
|
- edesc->pset[i].dst = dst;
|
|
-
|
|
- edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx;
|
|
- edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx;
|
|
-
|
|
- edesc->pset[i].a_b_cnt = bcnt << 16 | acnt;
|
|
- edesc->pset[i].ccnt = ccnt;
|
|
- edesc->pset[i].link_bcntrld = 0xffffffff;
|
|
-
|
|
}
|
|
|
|
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
|
|
--
|
|
1.8.2.1
|
|
|