diff options
Diffstat (limited to 'target/linux/sunxi/patches-4.1/160-dmaengine-add-sun4i-driver.patch')
-rw-r--r-- | target/linux/sunxi/patches-4.1/160-dmaengine-add-sun4i-driver.patch | 1371 |
1 files changed, 0 insertions, 1371 deletions
diff --git a/target/linux/sunxi/patches-4.1/160-dmaengine-add-sun4i-driver.patch b/target/linux/sunxi/patches-4.1/160-dmaengine-add-sun4i-driver.patch deleted file mode 100644 index 471fb2a..0000000 --- a/target/linux/sunxi/patches-4.1/160-dmaengine-add-sun4i-driver.patch +++ /dev/null @@ -1,1371 +0,0 @@ -From 1a28c76f3965775854ed6f6229de457c3d0674ab Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Emilio=20L=C3=B3pez?= <emilio@elopez.com.ar> -Date: Sat, 4 Apr 2015 11:37:24 +0200 -Subject: [PATCH] dma: sun4i: Add support for the DMA engine on sun[457]i SoCs -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -This patch adds support for the DMA engine present on Allwinner A10, -A13, A10S and A20 SoCs. This engine has two kinds of channels: normal -and dedicated. The main difference is in the mode of operation; -while a single normal channel may be operating at any given time, -dedicated channels may operate simultaneously provided there is no -overlap of source or destination. - -Hardware documentation can be found on A10 User Manual (section 12), A13 -User Manual (section 14) and A20 User Manual (section 1.12) - -Signed-off-by: Emilio López <emilio@elopez.com.ar> -Signed-off-by: Hans de Goede <hdegoede@redhat.com> ---- - -Changes from v4: - * Fix for interrupt triggering after freeing a dma-channel, this fixed - the problems with jack - * Adjust to recent kernel dma API changes - -Changes from v3: - * Drop threaded IRQ to get lower latency - * Drop chancnt - * Fix crash on first use when using a DMA-aware bootloader (eg., one - that supports NAND) - -Changes from v2: - * Faster memcpy - * Quicker cyclic transfers - * Address some stylistic and locking comments from Maxime - * probably some more stuff I'm forgetting - -Changes from v1: - * address comments from Chen-Yu and Maxime - * fix issue converting bus width - * switch to using a threaded IRQ instead of a tasklet on - recommendation from Maxime - * fix issue setting magic timing parameter for SPI transfers - * fix an issue with list handling reported by the kbuild 0-DAY robot (thanks!) - * drop a lot of unused #define - * probably some more stuff I'm forgetting ---- - .../devicetree/bindings/dma/sun4i-dma.txt | 46 + - drivers/dma/Kconfig | 11 + - drivers/dma/Makefile | 1 + - drivers/dma/sun4i-dma.c | 1235 ++++++++++++++++++++ - 4 files changed, 1293 insertions(+) - create mode 100644 Documentation/devicetree/bindings/dma/sun4i-dma.txt - create mode 100644 drivers/dma/sun4i-dma.c - ---- /dev/null -+++ b/Documentation/devicetree/bindings/dma/sun4i-dma.txt -@@ -0,0 +1,46 @@ -+Allwinner A10 DMA Controller -+ -+This driver follows the generic DMA bindings defined in dma.txt. -+ -+Required properties: -+ -+- compatible: Must be "allwinner,sun4i-a10-dma" -+- reg: Should contain the registers base address and length -+- interrupts: Should contain a reference to the interrupt used by this device -+- clocks: Should contain a reference to the parent AHB clock -+- #dma-cells : Should be 2, first cell denoting normal or dedicated dma, -+ second cell holding the request line number. -+ -+Example: -+ dma: dma-controller@01c02000 { -+ compatible = "allwinner,sun4i-a10-dma"; -+ reg = <0x01c02000 0x1000>; -+ interrupts = <27>; -+ clocks = <&ahb_gates 6>; -+ #dma-cells = <2>; -+ }; -+ -+Clients: -+ -+DMA clients connected to the Allwinner A10 DMA controller must use the -+format described in the dma.txt file, using a three-cell specifier for -+each channel: a phandle plus two integer cells. -+The three cells in order are: -+ -+1. A phandle pointing to the DMA controller. -+2. Whether it is using normal (0) or dedicated (1) channels -+3. The port ID as specified in the datasheet -+ -+Example: -+ spi2: spi@01c17000 { -+ compatible = "allwinner,sun4i-a10-spi"; -+ reg = <0x01c17000 0x1000>; -+ interrupts = <0 12 4>; -+ clocks = <&ahb_gates 22>, <&spi2_clk>; -+ clock-names = "ahb", "mod"; -+ dmas = <&dma 1 29>, <&dma 1 28>; -+ dma-names = "rx", "tx"; -+ status = "disabled"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ }; ---- a/drivers/dma/Kconfig -+++ b/drivers/dma/Kconfig -@@ -444,6 +444,17 @@ config XGENE_DMA - help - Enable support for the APM X-Gene SoC DMA engine. - -+config SUN4I_DMA -+ tristate "Allwinner A10 DMA support" -+ depends on (MACH_SUN4I || MACH_SUN5I || MACH_SUN7I || (COMPILE_TEST && OF && ARM)) -+ default (MACH_SUN4I || MACH_SUN5I || MACH_SUN7I) -+ select DMA_ENGINE -+ select DMA_OF -+ select DMA_VIRTUAL_CHANNELS -+ help -+ Enable support for the DMA controller present in the sun4i, -+ sun5i and sun7i Allwinner ARM SoCs. -+ - config DMA_ENGINE - bool - ---- a/drivers/dma/Makefile -+++ b/drivers/dma/Makefile -@@ -54,3 +54,4 @@ obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o - obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o - obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o - obj-$(CONFIG_XGENE_DMA) += xgene-dma.o -+obj-$(CONFIG_SUN4I_DMA) += sun4i-dma.o ---- /dev/null -+++ b/drivers/dma/sun4i-dma.c -@@ -0,0 +1,1235 @@ -+/* -+ * Copyright (C) 2014 Emilio López -+ * Emilio López <emilio@elopez.com.ar> -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ */ -+ -+#include <linux/bitmap.h> -+#include <linux/bitops.h> -+#include <linux/clk.h> -+#include <linux/dmaengine.h> -+#include <linux/dmapool.h> -+#include <linux/interrupt.h> -+#include <linux/module.h> -+#include <linux/of_dma.h> -+#include <linux/platform_device.h> -+#include <linux/slab.h> -+#include <linux/spinlock.h> -+ -+#include "virt-dma.h" -+ -+/** Normal DMA register values **/ -+ -+/* Normal DMA source/destination data request type values */ -+#define NDMA_DRQ_TYPE_SDRAM 0x16 -+#define NDMA_DRQ_TYPE_LIMIT (0x1F + 1) -+ -+/** Normal DMA register layout **/ -+ -+/* Normal DMA configuration register layout */ -+#define NDMA_CFG_LOADING BIT(31) -+#define NDMA_CFG_CONT_MODE BIT(30) -+#define NDMA_CFG_WAIT_STATE(n) ((n) << 27) -+#define NDMA_CFG_DEST_DATA_WIDTH(width) ((width) << 25) -+#define NDMA_CFG_DEST_BURST_LENGTH(len) ((len) << 23) -+#define NDMA_CFG_DEST_NON_SECURE BIT(22) -+#define NDMA_CFG_DEST_FIXED_ADDR BIT(21) -+#define NDMA_CFG_DEST_DRQ_TYPE(type) ((type) << 16) -+#define NDMA_CFG_BYTE_COUNT_MODE_REMAIN BIT(15) -+#define NDMA_CFG_SRC_DATA_WIDTH(width) ((width) << 9) -+#define NDMA_CFG_SRC_BURST_LENGTH(len) ((len) << 7) -+#define NDMA_CFG_SRC_NON_SECURE BIT(6) -+#define NDMA_CFG_SRC_FIXED_ADDR BIT(5) -+#define NDMA_CFG_SRC_DRQ_TYPE(type) ((type) << 0) -+ -+/** Dedicated DMA register values **/ -+ -+/* Dedicated DMA source/destination address mode values */ -+#define DDMA_ADDR_MODE_LINEAR 0 -+#define DDMA_ADDR_MODE_IO 1 -+#define DDMA_ADDR_MODE_HORIZONTAL_PAGE 2 -+#define DDMA_ADDR_MODE_VERTICAL_PAGE 3 -+ -+/* Dedicated DMA source/destination data request type values */ -+#define DDMA_DRQ_TYPE_SDRAM 0x1 -+#define DDMA_DRQ_TYPE_LIMIT (0x1F + 1) -+ -+/** Dedicated DMA register layout **/ -+ -+/* Dedicated DMA configuration register layout */ -+#define DDMA_CFG_LOADING BIT(31) -+#define DDMA_CFG_BUSY BIT(30) -+#define DDMA_CFG_CONT_MODE BIT(29) -+#define DDMA_CFG_DEST_NON_SECURE BIT(28) -+#define DDMA_CFG_DEST_DATA_WIDTH(width) ((width) << 25) -+#define DDMA_CFG_DEST_BURST_LENGTH(len) ((len) << 23) -+#define DDMA_CFG_DEST_ADDR_MODE(mode) ((mode) << 21) -+#define DDMA_CFG_DEST_DRQ_TYPE(type) ((type) << 16) -+#define DDMA_CFG_BYTE_COUNT_MODE_REMAIN BIT(15) -+#define DDMA_CFG_SRC_NON_SECURE BIT(12) -+#define DDMA_CFG_SRC_DATA_WIDTH(width) ((width) << 9) -+#define DDMA_CFG_SRC_BURST_LENGTH(len) ((len) << 7) -+#define DDMA_CFG_SRC_ADDR_MODE(mode) ((mode) << 5) -+#define DDMA_CFG_SRC_DRQ_TYPE(type) ((type) << 0) -+ -+/* Dedicated DMA parameter register layout */ -+#define DDMA_PARA_DEST_DATA_BLK_SIZE(n) (((n) - 1) << 24) -+#define DDMA_PARA_DEST_WAIT_CYCLES(n) (((n) - 1) << 16) -+#define DDMA_PARA_SRC_DATA_BLK_SIZE(n) (((n) - 1) << 8) -+#define DDMA_PARA_SRC_WAIT_CYCLES(n) (((n) - 1) << 0) -+ -+/** DMA register offsets **/ -+ -+/* General register offsets */ -+#define DMA_IRQ_ENABLE_REG 0x0 -+#define DMA_IRQ_PENDING_STATUS_REG 0x4 -+ -+/* Normal DMA register offsets */ -+#define NDMA_CHANNEL_REG_BASE(n) (0x100 + (n) * 0x20) -+#define NDMA_CFG_REG 0x0 -+#define NDMA_SRC_ADDR_REG 0x4 -+#define NDMA_DEST_ADDR_REG 0x8 -+#define NDMA_BYTE_COUNT_REG 0xC -+ -+/* Dedicated DMA register offsets */ -+#define DDMA_CHANNEL_REG_BASE(n) (0x300 + (n) * 0x20) -+#define DDMA_CFG_REG 0x0 -+#define DDMA_SRC_ADDR_REG 0x4 -+#define DDMA_DEST_ADDR_REG 0x8 -+#define DDMA_BYTE_COUNT_REG 0xC -+#define DDMA_PARA_REG 0x18 -+ -+/** DMA Driver **/ -+ -+/* -+ * Normal DMA has 8 channels, and Dedicated DMA has another 8, so that's -+ * 16 channels. As for endpoints, there's 29 and 21 respectively. Given -+ * that the Normal DMA endpoints (other than SDRAM) can be used as tx/rx, -+ * we need 78 vchans in total -+ */ -+#define NDMA_NR_MAX_CHANNELS 8 -+#define DDMA_NR_MAX_CHANNELS 8 -+#define DMA_NR_MAX_CHANNELS (NDMA_NR_MAX_CHANNELS + DDMA_NR_MAX_CHANNELS) -+#define NDMA_NR_MAX_VCHANS (29 * 2 - 1) -+#define DDMA_NR_MAX_VCHANS 21 -+#define DMA_NR_MAX_VCHANS (NDMA_NR_MAX_VCHANS + DDMA_NR_MAX_VCHANS) -+ -+/* This set of DDMA timing parameters were found experimentally while -+ * working with the SPI driver and seem to make it behave correctly */ -+#define DDMA_MAGIC_SPI_PARAMETERS (DDMA_PARA_DEST_DATA_BLK_SIZE(1) | \ -+ DDMA_PARA_SRC_DATA_BLK_SIZE(1) | \ -+ DDMA_PARA_DEST_WAIT_CYCLES(2) | \ -+ DDMA_PARA_SRC_WAIT_CYCLES(2)) -+ -+struct sun4i_dma_pchan { -+ /* Register base of channel */ -+ void __iomem *base; -+ /* vchan currently being serviced */ -+ struct sun4i_dma_vchan *vchan; -+ /* Is this a dedicated pchan? */ -+ int is_dedicated; -+}; -+ -+struct sun4i_dma_vchan { -+ struct virt_dma_chan vc; -+ struct dma_slave_config cfg; -+ struct sun4i_dma_pchan *pchan; -+ struct sun4i_dma_promise *processing; -+ struct sun4i_dma_contract *contract; -+ u8 endpoint; -+ int is_dedicated; -+}; -+ -+struct sun4i_dma_promise { -+ u32 cfg; -+ u32 para; -+ dma_addr_t src; -+ dma_addr_t dst; -+ size_t len; -+ struct list_head list; -+}; -+ -+/* A contract is a set of promises */ -+struct sun4i_dma_contract { -+ struct virt_dma_desc vd; -+ struct list_head demands; -+ struct list_head completed_demands; -+ int is_cyclic; -+}; -+ -+struct sun4i_dma_dev { -+ DECLARE_BITMAP(pchans_used, DMA_NR_MAX_CHANNELS); -+ struct dma_device slave; -+ struct sun4i_dma_pchan *pchans; -+ struct sun4i_dma_vchan *vchans; -+ void __iomem *base; -+ struct clk *clk; -+ int irq; -+ spinlock_t lock; -+}; -+ -+static struct sun4i_dma_dev *to_sun4i_dma_dev(struct dma_device *dev) -+{ -+ return container_of(dev, struct sun4i_dma_dev, slave); -+} -+ -+static struct sun4i_dma_vchan *to_sun4i_dma_vchan(struct dma_chan *chan) -+{ -+ return container_of(chan, struct sun4i_dma_vchan, vc.chan); -+} -+ -+static struct sun4i_dma_contract *to_sun4i_dma_contract(struct virt_dma_desc *vd) -+{ -+ return container_of(vd, struct sun4i_dma_contract, vd); -+} -+ -+static struct device *chan2dev(struct dma_chan *chan) -+{ -+ return &chan->dev->device; -+} -+ -+static int convert_burst(u32 maxburst) -+{ -+ if (maxburst > 8) -+ return -EINVAL; -+ -+ /* 1 -> 0, 4 -> 1, 8 -> 2 */ -+ return (maxburst >> 2); -+} -+ -+static int convert_buswidth(enum dma_slave_buswidth addr_width) -+{ -+ if (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES) -+ return -EINVAL; -+ -+ /* 8 (1 byte) -> 0, 16 (2 bytes) -> 1, 32 (4 bytes) -> 2 */ -+ return (addr_width >> 1); -+} -+ -+static int choose_optimal_buswidth(dma_addr_t addr) -+{ -+ /* On 32 bit aligned addresses, we can use a 32 bit bus width */ -+ if (addr % 4 == 0) -+ return DMA_SLAVE_BUSWIDTH_4_BYTES; -+ /* On 16 bit aligned addresses, we can use a 16 bit bus width */ -+ else if (addr % 2 == 0) -+ return DMA_SLAVE_BUSWIDTH_2_BYTES; -+ -+ /* Worst-case scenario, we need to do byte aligned reads */ -+ return DMA_SLAVE_BUSWIDTH_1_BYTE; -+} -+ -+static void sun4i_dma_free_chan_resources(struct dma_chan *chan) -+{ -+ struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); -+ -+ vchan_free_chan_resources(&vchan->vc); -+} -+ -+static struct sun4i_dma_pchan *find_and_use_pchan(struct sun4i_dma_dev *priv, -+ struct sun4i_dma_vchan *vchan) -+{ -+ struct sun4i_dma_pchan *pchan = NULL, *pchans = priv->pchans; -+ unsigned long flags; -+ int i, max; -+ -+ /* -+ * pchans 0-NDMA_NR_MAX_CHANNELS are normal, and -+ * NDMA_NR_MAX_CHANNELS+ are dedicated ones -+ */ -+ if (vchan->is_dedicated) { -+ i = NDMA_NR_MAX_CHANNELS; -+ max = DMA_NR_MAX_CHANNELS; -+ } else { -+ i = 0; -+ max = NDMA_NR_MAX_CHANNELS; -+ } -+ -+ spin_lock_irqsave(&priv->lock, flags); -+ for_each_clear_bit_from(i, &priv->pchans_used, max) { -+ pchan = &pchans[i]; -+ pchan->vchan = vchan; -+ set_bit(i, priv->pchans_used); -+ break; -+ } -+ spin_unlock_irqrestore(&priv->lock, flags); -+ -+ return pchan; -+} -+ -+static void release_pchan(struct sun4i_dma_dev *priv, -+ struct sun4i_dma_pchan *pchan) -+{ -+ unsigned long flags; -+ int nr = pchan - priv->pchans; -+ -+ spin_lock_irqsave(&priv->lock, flags); -+ -+ pchan->vchan = NULL; -+ clear_bit(nr, priv->pchans_used); -+ -+ spin_unlock_irqrestore(&priv->lock, flags); -+} -+ -+static void configure_pchan(struct sun4i_dma_pchan *pchan, -+ struct sun4i_dma_promise *d) -+{ -+ /* -+ * Configure addresses and misc parameters depending on type -+ * DDMA has an extra field with timing parameters -+ */ -+ if (pchan->is_dedicated) { -+ writel_relaxed(d->src, pchan->base + DDMA_SRC_ADDR_REG); -+ writel_relaxed(d->dst, pchan->base + DDMA_DEST_ADDR_REG); -+ writel_relaxed(d->len, pchan->base + DDMA_BYTE_COUNT_REG); -+ writel_relaxed(d->para, pchan->base + DDMA_PARA_REG); -+ writel_relaxed(d->cfg, pchan->base + DDMA_CFG_REG); -+ } else { -+ writel_relaxed(d->src, pchan->base + NDMA_SRC_ADDR_REG); -+ writel_relaxed(d->dst, pchan->base + NDMA_DEST_ADDR_REG); -+ writel_relaxed(d->len, pchan->base + NDMA_BYTE_COUNT_REG); -+ writel_relaxed(d->cfg, pchan->base + NDMA_CFG_REG); -+ } -+} -+ -+static void set_pchan_interrupt(struct sun4i_dma_dev *priv, -+ struct sun4i_dma_pchan *pchan, -+ int half, int end) -+{ -+ u32 reg; -+ int pchan_number = pchan - priv->pchans; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&priv->lock, flags); -+ -+ reg = readl_relaxed(priv->base + DMA_IRQ_ENABLE_REG); -+ -+ if (half) -+ reg |= BIT(pchan_number * 2); -+ else -+ reg &= ~BIT(pchan_number * 2); -+ -+ if (end) -+ reg |= BIT(pchan_number * 2 + 1); -+ else -+ reg &= ~BIT(pchan_number * 2 + 1); -+ -+ writel_relaxed(reg, priv->base + DMA_IRQ_ENABLE_REG); -+ -+ spin_unlock_irqrestore(&priv->lock, flags); -+} -+ -+/** -+ * Execute pending operations on a vchan -+ * -+ * When given a vchan, this function will try to acquire a suitable -+ * pchan and, if successful, will configure it to fulfill a promise -+ * from the next pending contract. -+ * -+ * This function must be called with &vchan->vc.lock held. -+ */ -+static int __execute_vchan_pending(struct sun4i_dma_dev *priv, -+ struct sun4i_dma_vchan *vchan) -+{ -+ struct sun4i_dma_promise *promise = NULL; -+ struct sun4i_dma_contract *contract = NULL; -+ struct sun4i_dma_pchan *pchan; -+ struct virt_dma_desc *vd; -+ int ret; -+ -+ lockdep_assert_held(&vchan->vc.lock); -+ -+ /* We need a pchan to do anything, so secure one if available */ -+ pchan = find_and_use_pchan(priv, vchan); -+ if (!pchan) -+ return -EBUSY; -+ -+ /* -+ * Channel endpoints must not be repeated, so if this vchan -+ * has already submitted some work, we can't do anything else -+ */ -+ if (vchan->processing) { -+ dev_dbg(chan2dev(&vchan->vc.chan), -+ "processing something to this endpoint already\n"); -+ ret = -EBUSY; -+ goto release_pchan; -+ } -+ -+ do { -+ /* Figure out which contract we're working with today */ -+ vd = vchan_next_desc(&vchan->vc); -+ if (!vd) { -+ dev_dbg(chan2dev(&vchan->vc.chan), -+ "No pending contract found"); -+ ret = 0; -+ goto release_pchan; -+ } -+ -+ contract = to_sun4i_dma_contract(vd); -+ if (list_empty(&contract->demands)) { -+ /* The contract has been completed so mark it as such */ -+ list_del(&contract->vd.node); -+ vchan_cookie_complete(&contract->vd); -+ dev_dbg(chan2dev(&vchan->vc.chan), -+ "Empty contract found and marked complete"); -+ } -+ } while (list_empty(&contract->demands)); -+ -+ /* Now find out what we need to do */ -+ promise = list_first_entry(&contract->demands, -+ struct sun4i_dma_promise, list); -+ vchan->processing = promise; -+ -+ /* ... and make it reality */ -+ if (promise) { -+ vchan->contract = contract; -+ vchan->pchan = pchan; -+ set_pchan_interrupt(priv, pchan, contract->is_cyclic, 1); -+ configure_pchan(pchan, promise); -+ } -+ -+ return 0; -+ -+release_pchan: -+ release_pchan(priv, pchan); -+ return ret; -+} -+ -+/** -+ * Generate a promise, to be used in a normal DMA contract. -+ * -+ * A NDMA promise contains all the information required to program the -+ * normal part of the DMA Engine and get data copied. A non-executed -+ * promise will live in the demands list on a contract. Once it has been -+ * completed, it will be moved to the completed demands list for later freeing. -+ * All linked promises will be freed when the corresponding contract is freed -+ */ -+static struct sun4i_dma_promise * -+generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest, -+ size_t len, struct dma_slave_config *sconfig) -+{ -+ struct sun4i_dma_promise *promise; -+ int ret; -+ -+ promise = kzalloc(sizeof(*promise), GFP_NOWAIT); -+ if (!promise) -+ return NULL; -+ -+ promise->src = src; -+ promise->dst = dest; -+ promise->len = len; -+ promise->cfg = NDMA_CFG_LOADING | NDMA_CFG_BYTE_COUNT_MODE_REMAIN; -+ -+ /* Use sensible default values if client is using undefined ones */ -+ if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) -+ sconfig->src_addr_width = sconfig->dst_addr_width; -+ if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) -+ sconfig->dst_addr_width = sconfig->src_addr_width; -+ if (sconfig->src_maxburst == 0) -+ sconfig->src_maxburst = sconfig->dst_maxburst; -+ if (sconfig->dst_maxburst == 0) -+ sconfig->dst_maxburst = sconfig->src_maxburst; -+ -+ dev_dbg(chan2dev(chan), -+ "src burst %d, dst burst %d, src buswidth %d, dst buswidth %d", -+ sconfig->src_maxburst, sconfig->dst_maxburst, -+ sconfig->src_addr_width, sconfig->dst_addr_width); -+ -+ /* Source burst */ -+ ret = convert_burst(sconfig->src_maxburst); -+ if (IS_ERR_VALUE(ret)) -+ goto fail; -+ promise->cfg |= NDMA_CFG_SRC_BURST_LENGTH(ret); -+ -+ /* Destination burst */ -+ ret = convert_burst(sconfig->dst_maxburst); -+ if (IS_ERR_VALUE(ret)) -+ goto fail; -+ promise->cfg |= NDMA_CFG_DEST_BURST_LENGTH(ret); -+ -+ /* Source bus width */ -+ ret = convert_buswidth(sconfig->src_addr_width); -+ if (IS_ERR_VALUE(ret)) -+ goto fail; -+ promise->cfg |= NDMA_CFG_SRC_DATA_WIDTH(ret); -+ -+ /* Destination bus width */ -+ ret = convert_buswidth(sconfig->dst_addr_width); -+ if (IS_ERR_VALUE(ret)) -+ goto fail; -+ promise->cfg |= NDMA_CFG_DEST_DATA_WIDTH(ret); -+ -+ return promise; -+ -+fail: -+ kfree(promise); -+ return NULL; -+} -+ -+/** -+ * Generate a promise, to be used in a dedicated DMA contract. -+ * -+ * A DDMA promise contains all the information required to program the -+ * Dedicated part of the DMA Engine and get data copied. A non-executed -+ * promise will live in the demands list on a contract. Once it has been -+ * completed, it will be moved to the completed demands list for later freeing. -+ * All linked promises will be freed when the corresponding contract is freed -+ */ -+static struct sun4i_dma_promise * -+generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest, -+ size_t len, struct dma_slave_config *sconfig) -+{ -+ struct sun4i_dma_promise *promise; -+ int ret; -+ -+ promise = kzalloc(sizeof(*promise), GFP_NOWAIT); -+ if (!promise) -+ return NULL; -+ -+ promise->src = src; -+ promise->dst = dest; -+ promise->len = len; -+ promise->cfg = DDMA_CFG_LOADING | DDMA_CFG_BYTE_COUNT_MODE_REMAIN; -+ -+ /* Source burst */ -+ ret = convert_burst(sconfig->src_maxburst); -+ if (IS_ERR_VALUE(ret)) -+ goto fail; -+ promise->cfg |= DDMA_CFG_SRC_BURST_LENGTH(ret); -+ -+ /* Destination burst */ -+ ret = convert_burst(sconfig->dst_maxburst); -+ if (IS_ERR_VALUE(ret)) -+ goto fail; -+ promise->cfg |= DDMA_CFG_DEST_BURST_LENGTH(ret); -+ -+ /* Source bus width */ -+ ret = convert_buswidth(sconfig->src_addr_width); -+ if (IS_ERR_VALUE(ret)) -+ goto fail; -+ promise->cfg |= DDMA_CFG_SRC_DATA_WIDTH(ret); -+ -+ /* Destination bus width */ -+ ret = convert_buswidth(sconfig->dst_addr_width); -+ if (IS_ERR_VALUE(ret)) -+ goto fail; -+ promise->cfg |= DDMA_CFG_DEST_DATA_WIDTH(ret); -+ -+ return promise; -+ -+fail: -+ kfree(promise); -+ return NULL; -+} -+ -+/** -+ * Generate a contract -+ * -+ * Contracts function as DMA descriptors. As our hardware does not support -+ * linked lists, we need to implement SG via software. We use a contract -+ * to hold all the pieces of the request and process them serially one -+ * after another. Each piece is represented as a promise. -+ */ -+static struct sun4i_dma_contract *generate_dma_contract(void) -+{ -+ struct sun4i_dma_contract *contract; -+ -+ contract = kzalloc(sizeof(*contract), GFP_NOWAIT); -+ if (!contract) -+ return NULL; -+ -+ INIT_LIST_HEAD(&contract->demands); -+ INIT_LIST_HEAD(&contract->completed_demands); -+ -+ return contract; -+} -+ -+/** -+ * Get next promise on a cyclic transfer -+ * -+ * Cyclic contracts contain a series of promises which are executed on a -+ * loop. This function returns the next promise from a cyclic contract, -+ * so it can be programmed into the hardware. -+ */ -+static struct sun4i_dma_promise * -+get_next_cyclic_promise(struct sun4i_dma_contract *contract) -+{ -+ struct sun4i_dma_promise *promise; -+ -+ promise = list_first_entry_or_null(&contract->demands, -+ struct sun4i_dma_promise, list); -+ if (!promise) { -+ list_splice_init(&contract->completed_demands, -+ &contract->demands); -+ promise = list_first_entry(&contract->demands, -+ struct sun4i_dma_promise, list); -+ } -+ -+ return promise; -+} -+ -+/** -+ * Free a contract and all its associated promises -+ */ -+static void sun4i_dma_free_contract(struct virt_dma_desc *vd) -+{ -+ struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd); -+ struct sun4i_dma_promise *promise; -+ -+ /* Free all the demands and completed demands */ -+ list_for_each_entry(promise, &contract->demands, list) -+ kfree(promise); -+ -+ list_for_each_entry(promise, &contract->completed_demands, list) -+ kfree(promise); -+ -+ kfree(contract); -+} -+ -+static struct dma_async_tx_descriptor * -+sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, -+ dma_addr_t src, size_t len, unsigned long flags) -+{ -+ struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); -+ struct dma_slave_config *sconfig = &vchan->cfg; -+ struct sun4i_dma_promise *promise; -+ struct sun4i_dma_contract *contract; -+ -+ contract = generate_dma_contract(); -+ if (!contract) -+ return NULL; -+ -+ /* -+ * We can only do the copy to bus aligned addresses, so -+ * choose the best one so we get decent performance. We also -+ * maximize the burst size for this same reason. -+ */ -+ sconfig->src_addr_width = choose_optimal_buswidth(src); -+ sconfig->dst_addr_width = choose_optimal_buswidth(dest); -+ sconfig->src_maxburst = 8; -+ sconfig->dst_maxburst = 8; -+ -+ if (vchan->is_dedicated) -+ promise = generate_ddma_promise(chan, src, dest, len, sconfig); -+ else -+ promise = generate_ndma_promise(chan, src, dest, len, sconfig); -+ -+ if (!promise) { -+ kfree(contract); -+ return NULL; -+ } -+ -+ /* Configure memcpy mode */ -+ if (vchan->is_dedicated) { -+ promise->cfg |= DDMA_CFG_SRC_DRQ_TYPE(DDMA_DRQ_TYPE_SDRAM) | -+ DDMA_CFG_DEST_DRQ_TYPE(DDMA_DRQ_TYPE_SDRAM); -+ } else { -+ promise->cfg |= NDMA_CFG_SRC_DRQ_TYPE(NDMA_DRQ_TYPE_SDRAM) | -+ NDMA_CFG_DEST_DRQ_TYPE(NDMA_DRQ_TYPE_SDRAM); -+ } -+ -+ /* Fill the contract with our only promise */ -+ list_add_tail(&promise->list, &contract->demands); -+ -+ /* And add it to the vchan */ -+ return vchan_tx_prep(&vchan->vc, &contract->vd, flags); -+} -+ -+static struct dma_async_tx_descriptor * -+sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len, -+ size_t period_len, enum dma_transfer_direction dir, -+ unsigned long flags) -+{ -+ struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); -+ struct dma_slave_config *sconfig = &vchan->cfg; -+ struct sun4i_dma_promise *promise; -+ struct sun4i_dma_contract *contract; -+ dma_addr_t src, dest; -+ u32 endpoints; -+ int nr_periods, offset, plength, i; -+ -+ if (!is_slave_direction(dir)) { -+ dev_err(chan2dev(chan), "Invalid DMA direction\n"); -+ return NULL; -+ } -+ -+ if (vchan->is_dedicated) { -+ /* -+ * As we are using this just for audio data, we need to use -+ * normal DMA. There is nothing stopping us from supporting -+ * dedicated DMA here as well, so if a client comes up and -+ * requires it, it will be simple to implement it. -+ */ -+ dev_err(chan2dev(chan), -+ "Cyclic transfers are only supported on Normal DMA\n"); -+ return NULL; -+ } -+ -+ contract = generate_dma_contract(); -+ if (!contract) -+ return NULL; -+ -+ contract->is_cyclic = 1; -+ -+ /* Figure out the endpoints and the address we need */ -+ if (dir == DMA_MEM_TO_DEV) { -+ src = buf; -+ dest = sconfig->dst_addr; -+ endpoints = NDMA_CFG_SRC_DRQ_TYPE(NDMA_DRQ_TYPE_SDRAM) | -+ NDMA_CFG_DEST_DRQ_TYPE(vchan->endpoint) | -+ NDMA_CFG_DEST_FIXED_ADDR; -+ } else { -+ src = sconfig->src_addr; -+ dest = buf; -+ endpoints = NDMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) | -+ NDMA_CFG_SRC_FIXED_ADDR | -+ NDMA_CFG_DEST_DRQ_TYPE(NDMA_DRQ_TYPE_SDRAM); -+ } -+ -+ /* -+ * We will be using half done interrupts to make two periods -+ * out of a promise, so we need to program the DMA engine less -+ * often -+ */ -+ nr_periods = DIV_ROUND_UP(len / period_len, 2); -+ for (i = 0; i < nr_periods; i++) { -+ /* Calculate the offset in the buffer and the length needed */ -+ offset = i * period_len * 2; -+ plength = min((len - offset), (period_len * 2)); -+ if (dir == DMA_MEM_TO_DEV) -+ src = buf + offset; -+ else -+ dest = buf + offset; -+ -+ /* Make the promise */ -+ promise = generate_ndma_promise(chan, src, dest, -+ plength, sconfig); -+ if (!promise) { -+ /* TODO: should we free everything? */ -+ return NULL; -+ } -+ promise->cfg |= endpoints; -+ -+ /* Then add it to the contract */ -+ list_add_tail(&promise->list, &contract->demands); -+ } -+ -+ /* And add it to the vchan */ -+ return vchan_tx_prep(&vchan->vc, &contract->vd, flags); -+} -+ -+static struct dma_async_tx_descriptor * -+sun4i_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, -+ unsigned int sg_len, enum dma_transfer_direction dir, -+ unsigned long flags, void *context) -+{ -+ struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); -+ struct dma_slave_config *sconfig = &vchan->cfg; -+ struct sun4i_dma_promise *promise; -+ struct sun4i_dma_contract *contract; -+ struct scatterlist *sg; -+ dma_addr_t srcaddr, dstaddr; -+ u32 endpoints, para; -+ int i; -+ -+ if (!sgl) -+ return NULL; -+ -+ if (!is_slave_direction(dir)) { -+ dev_err(chan2dev(chan), "Invalid DMA direction\n"); -+ return NULL; -+ } -+ -+ contract = generate_dma_contract(); -+ if (!contract) -+ return NULL; -+ -+ /* Figure out endpoints */ -+ if (vchan->is_dedicated && dir == DMA_MEM_TO_DEV) { -+ endpoints = DDMA_CFG_SRC_DRQ_TYPE(DDMA_DRQ_TYPE_SDRAM) | -+ DDMA_CFG_SRC_ADDR_MODE(DDMA_ADDR_MODE_LINEAR) | -+ DDMA_CFG_DEST_DRQ_TYPE(vchan->endpoint) | -+ DDMA_CFG_DEST_ADDR_MODE(DDMA_ADDR_MODE_IO); -+ } else if (!vchan->is_dedicated && dir == DMA_MEM_TO_DEV) { -+ endpoints = NDMA_CFG_SRC_DRQ_TYPE(NDMA_DRQ_TYPE_SDRAM) | -+ NDMA_CFG_DEST_DRQ_TYPE(vchan->endpoint) | -+ NDMA_CFG_DEST_FIXED_ADDR; -+ } else if (vchan->is_dedicated) { -+ endpoints = DDMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) | -+ DDMA_CFG_SRC_ADDR_MODE(DDMA_ADDR_MODE_IO) | -+ DDMA_CFG_DEST_DRQ_TYPE(DDMA_DRQ_TYPE_SDRAM) | -+ DDMA_CFG_DEST_ADDR_MODE(DDMA_ADDR_MODE_LINEAR); -+ } else { -+ endpoints = NDMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) | -+ NDMA_CFG_SRC_FIXED_ADDR | -+ NDMA_CFG_DEST_DRQ_TYPE(NDMA_DRQ_TYPE_SDRAM); -+ } -+ -+ for_each_sg(sgl, sg, sg_len, i) { -+ /* Figure out addresses */ -+ if (dir == DMA_MEM_TO_DEV) { -+ srcaddr = sg_dma_address(sg); -+ dstaddr = sconfig->dst_addr; -+ } else { -+ srcaddr = sconfig->src_addr; -+ dstaddr = sg_dma_address(sg); -+ } -+ -+ /* -+ * These are the magic DMA engine timings that keep SPI going. -+ * I haven't seen any interface on DMAEngine to configure -+ * timings, and so far they seem to work for everything we -+ * support, so I've kept them here. I don't know if other -+ * devices need different timings because, as usual, we only -+ * have the "para" bitfield meanings, but no comment on what -+ * the values should be when doing a certain operation :| -+ */ -+ para = DDMA_MAGIC_SPI_PARAMETERS; -+ -+ /* And make a suitable promise */ -+ if (vchan->is_dedicated) -+ promise = generate_ddma_promise(chan, srcaddr, dstaddr, -+ sg_dma_len(sg), sconfig); -+ else -+ promise = generate_ndma_promise(chan, srcaddr, dstaddr, -+ sg_dma_len(sg), sconfig); -+ -+ if (!promise) -+ return NULL; /* TODO: should we free everything? */ -+ -+ promise->cfg |= endpoints; -+ promise->para = para; -+ -+ /* Then add it to the contract */ -+ list_add_tail(&promise->list, &contract->demands); -+ } -+ -+ /* -+ * Once we've got all the promises ready, add the contract -+ * to the pending list on the vchan -+ */ -+ return vchan_tx_prep(&vchan->vc, &contract->vd, flags); -+} -+ -+static int sun4i_dma_terminate_all(struct dma_chan *chan) -+{ -+ struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device); -+ struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); -+ struct sun4i_dma_pchan *pchan = vchan->pchan; -+ LIST_HEAD(head); -+ unsigned long flags; -+ -+ spin_lock_irqsave(&vchan->vc.lock, flags); -+ vchan_get_all_descriptors(&vchan->vc, &head); -+ spin_unlock_irqrestore(&vchan->vc.lock, flags); -+ -+ /* -+ * Clearing the configuration register will halt the pchan. Interrupts -+ * may still trigger, so don't forget to disable them. -+ */ -+ if (pchan) { -+ if (pchan->is_dedicated) -+ writel(0, pchan->base + DDMA_CFG_REG); -+ else -+ writel(0, pchan->base + NDMA_CFG_REG); -+ set_pchan_interrupt(priv, pchan, 0, 0); -+ release_pchan(priv, pchan); -+ } -+ -+ spin_lock_irqsave(&vchan->vc.lock, flags); -+ vchan_dma_desc_free_list(&vchan->vc, &head); -+ /* Clear these so the vchan is usable again */ -+ vchan->processing = NULL; -+ vchan->pchan = NULL; -+ spin_unlock_irqrestore(&vchan->vc.lock, flags); -+ -+ return 0; -+} -+ -+static int sun4i_dma_config(struct dma_chan *chan, -+ struct dma_slave_config *config) -+{ -+ struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); -+ -+ memcpy(&vchan->cfg, config, sizeof(*config)); -+ -+ return 0; -+} -+ -+static struct dma_chan *sun4i_dma_of_xlate(struct of_phandle_args *dma_spec, -+ struct of_dma *ofdma) -+{ -+ struct sun4i_dma_dev *priv = ofdma->of_dma_data; -+ struct sun4i_dma_vchan *vchan; -+ struct dma_chan *chan; -+ u8 is_dedicated = dma_spec->args[0]; -+ u8 endpoint = dma_spec->args[1]; -+ -+ /* Check if type is Normal or Dedicated */ -+ if (is_dedicated != 0 && is_dedicated != 1) -+ return NULL; -+ -+ /* Make sure the endpoint looks sane */ -+ if ((is_dedicated && endpoint >= DDMA_DRQ_TYPE_LIMIT) || -+ (!is_dedicated && endpoint >= NDMA_DRQ_TYPE_LIMIT)) -+ return NULL; -+ -+ chan = dma_get_any_slave_channel(&priv->slave); -+ if (!chan) -+ return NULL; -+ -+ /* Assign the endpoint to the vchan */ -+ vchan = to_sun4i_dma_vchan(chan); -+ vchan->is_dedicated = is_dedicated; -+ vchan->endpoint = endpoint; -+ -+ return chan; -+} -+ -+static enum dma_status sun4i_dma_tx_status(struct dma_chan *chan, -+ dma_cookie_t cookie, -+ struct dma_tx_state *state) -+{ -+ struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); -+ struct sun4i_dma_pchan *pchan = vchan->pchan; -+ struct sun4i_dma_contract *contract; -+ struct sun4i_dma_promise *promise; -+ struct virt_dma_desc *vd; -+ unsigned long flags; -+ enum dma_status ret; -+ size_t bytes = 0; -+ -+ ret = dma_cookie_status(chan, cookie, state); -+ if (ret == DMA_COMPLETE) -+ return ret; -+ -+ spin_lock_irqsave(&vchan->vc.lock, flags); -+ vd = vchan_find_desc(&vchan->vc, cookie); -+ if (!vd) -+ goto exit; -+ contract = to_sun4i_dma_contract(vd); -+ -+ list_for_each_entry(promise, &contract->demands, list) -+ bytes += promise->len; -+ -+ /* -+ * The hardware is configured to return the remaining byte -+ * quantity. If possible, replace the first listed element's -+ * full size with the actual remaining amount -+ */ -+ promise = list_first_entry_or_null(&contract->demands, -+ struct sun4i_dma_promise, list); -+ if (promise && pchan) { -+ bytes -= promise->len; -+ if (pchan->is_dedicated) -+ bytes += readl(pchan->base + DDMA_BYTE_COUNT_REG); -+ else -+ bytes += readl(pchan->base + NDMA_BYTE_COUNT_REG); -+ } -+ -+exit: -+ -+ dma_set_residue(state, bytes); -+ spin_unlock_irqrestore(&vchan->vc.lock, flags); -+ -+ return ret; -+} -+ -+static void sun4i_dma_issue_pending(struct dma_chan *chan) -+{ -+ struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device); -+ struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); -+ unsigned long flags; -+ -+ spin_lock_irqsave(&vchan->vc.lock, flags); -+ -+ /* -+ * If there are pending transactions for this vchan, push one of -+ * them into the engine to get the ball rolling. -+ */ -+ if (vchan_issue_pending(&vchan->vc)) -+ __execute_vchan_pending(priv, vchan); -+ -+ spin_unlock_irqrestore(&vchan->vc.lock, flags); -+} -+ -+static irqreturn_t sun4i_dma_interrupt(int irq, void *dev_id) -+{ -+ struct sun4i_dma_dev *priv = dev_id; -+ struct sun4i_dma_pchan *pchans = priv->pchans, *pchan; -+ struct sun4i_dma_vchan *vchan; -+ struct sun4i_dma_contract *contract; -+ struct sun4i_dma_promise *promise; -+ unsigned long pendirq, irqs, disableirqs; -+ int bit, i, free_room, allow_mitigation = 1; -+ -+ pendirq = readl_relaxed(priv->base + DMA_IRQ_PENDING_STATUS_REG); -+ -+handle_pending: -+ -+ disableirqs = 0; -+ free_room = 0; -+ -+ for_each_set_bit(bit, &pendirq, 32) { -+ pchan = &pchans[bit >> 1]; -+ vchan = pchan->vchan; -+ if (!vchan) /* a terminated channel may still interrupt */ -+ continue; -+ contract = vchan->contract; -+ -+ /* -+ * Disable the IRQ and free the pchan if it's an end -+ * interrupt (odd bit) -+ */ -+ if (bit & 1) { -+ spin_lock(&vchan->vc.lock); -+ -+ /* -+ * Move the promise into the completed list now that -+ * we're done with it -+ */ -+ list_del(&vchan->processing->list); -+ list_add_tail(&vchan->processing->list, -+ &contract->completed_demands); -+ -+ /* -+ * Cyclic DMA transfers are special: -+ * - There's always something we can dispatch -+ * - We need to run the callback -+ * - Latency is very important, as this is used by audio -+ * We therefore just cycle through the list and dispatch -+ * whatever we have here, reusing the pchan. There's -+ * no need to run the thread after this. -+ * -+ * For non-cyclic transfers we need to look around, -+ * so we can program some more work, or notify the -+ * client that their transfers have been completed. -+ */ -+ if (contract->is_cyclic) { -+ promise = get_next_cyclic_promise(contract); -+ vchan->processing = promise; -+ configure_pchan(pchan, promise); -+ vchan_cyclic_callback(&contract->vd); -+ } else { -+ vchan->processing = NULL; -+ vchan->pchan = NULL; -+ -+ free_room = 1; -+ disableirqs |= BIT(bit); -+ release_pchan(priv, pchan); -+ } -+ -+ spin_unlock(&vchan->vc.lock); -+ } else { -+ /* Half done interrupt */ -+ if (contract->is_cyclic) -+ vchan_cyclic_callback(&contract->vd); -+ else -+ disableirqs |= BIT(bit); -+ } -+ } -+ -+ /* Disable the IRQs for events we handled */ -+ spin_lock(&priv->lock); -+ irqs = readl_relaxed(priv->base + DMA_IRQ_ENABLE_REG); -+ writel_relaxed(irqs & ~disableirqs, priv->base + DMA_IRQ_ENABLE_REG); -+ spin_unlock(&priv->lock); -+ -+ /* Writing 1 to the pending field will clear the pending interrupt */ -+ writel_relaxed(pendirq, priv->base + DMA_IRQ_PENDING_STATUS_REG); -+ -+ /* -+ * If a pchan was freed, we may be able to schedule something else, -+ * so have a look around -+ */ -+ if (free_room) { -+ for (i = 0; i < DMA_NR_MAX_VCHANS; i++) { -+ vchan = &priv->vchans[i]; -+ spin_lock(&vchan->vc.lock); -+ __execute_vchan_pending(priv, vchan); -+ spin_unlock(&vchan->vc.lock); -+ } -+ } -+ -+ /* -+ * Handle newer interrupts if some showed up, but only do it once -+ * to avoid a too long a loop -+ */ -+ if (allow_mitigation) { -+ pendirq = readl_relaxed(priv->base + DMA_IRQ_PENDING_STATUS_REG); -+ if (pendirq) { -+ allow_mitigation = 0; -+ goto handle_pending; -+ } -+ } -+ -+ return IRQ_HANDLED; -+} -+ -+static int sun4i_dma_probe(struct platform_device *pdev) -+{ -+ struct sun4i_dma_dev *priv; -+ struct resource *res; -+ int i, j, ret; -+ -+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); -+ if (!priv) -+ return -ENOMEM; -+ -+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ priv->base = devm_ioremap_resource(&pdev->dev, res); -+ if (IS_ERR(priv->base)) -+ return PTR_ERR(priv->base); -+ -+ priv->irq = platform_get_irq(pdev, 0); -+ if (priv->irq < 0) { -+ dev_err(&pdev->dev, "Cannot claim IRQ\n"); -+ return priv->irq; -+ } -+ -+ priv->clk = devm_clk_get(&pdev->dev, NULL); -+ if (IS_ERR(priv->clk)) { -+ dev_err(&pdev->dev, "No clock specified\n"); -+ return PTR_ERR(priv->clk); -+ } -+ -+ platform_set_drvdata(pdev, priv); -+ spin_lock_init(&priv->lock); -+ -+ dma_cap_zero(priv->slave.cap_mask); -+ dma_cap_set(DMA_PRIVATE, priv->slave.cap_mask); -+ dma_cap_set(DMA_MEMCPY, priv->slave.cap_mask); -+ dma_cap_set(DMA_CYCLIC, priv->slave.cap_mask); -+ dma_cap_set(DMA_SLAVE, priv->slave.cap_mask); -+ -+ INIT_LIST_HEAD(&priv->slave.channels); -+ priv->slave.device_free_chan_resources = sun4i_dma_free_chan_resources; -+ priv->slave.device_tx_status = sun4i_dma_tx_status; -+ priv->slave.device_issue_pending = sun4i_dma_issue_pending; -+ priv->slave.device_prep_slave_sg = sun4i_dma_prep_slave_sg; -+ priv->slave.device_prep_dma_memcpy = sun4i_dma_prep_dma_memcpy; -+ priv->slave.device_prep_dma_cyclic = sun4i_dma_prep_dma_cyclic; -+ priv->slave.device_config = sun4i_dma_config; -+ priv->slave.device_terminate_all = sun4i_dma_terminate_all; -+ priv->slave.copy_align = DMA_SLAVE_BUSWIDTH_4_BYTES; -+ priv->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | -+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | -+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); -+ priv->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | -+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | -+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); -+ priv->slave.directions = BIT(DMA_DEV_TO_MEM) | -+ BIT(DMA_MEM_TO_DEV); -+ priv->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; -+ -+ priv->slave.dev = &pdev->dev; -+ -+ priv->pchans = devm_kcalloc(&pdev->dev, DMA_NR_MAX_CHANNELS, -+ sizeof(struct sun4i_dma_pchan), GFP_KERNEL); -+ priv->vchans = devm_kcalloc(&pdev->dev, DMA_NR_MAX_VCHANS, -+ sizeof(struct sun4i_dma_vchan), GFP_KERNEL); -+ if (!priv->vchans || !priv->pchans) -+ return -ENOMEM; -+ -+ /* -+ * [0..NDMA_NR_MAX_CHANNELS) are normal pchans, and -+ * [NDMA_NR_MAX_CHANNELS..DMA_NR_MAX_CHANNELS) are dedicated ones -+ */ -+ for (i = 0; i < NDMA_NR_MAX_CHANNELS; i++) -+ priv->pchans[i].base = priv->base + NDMA_CHANNEL_REG_BASE(i); -+ -+ for (j = 0; i < DMA_NR_MAX_CHANNELS; i++, j++) { -+ priv->pchans[i].base = priv->base + DDMA_CHANNEL_REG_BASE(j); -+ priv->pchans[i].is_dedicated = 1; -+ } -+ -+ for (i = 0; i < DMA_NR_MAX_VCHANS; i++) { -+ struct sun4i_dma_vchan *vchan = &priv->vchans[i]; -+ -+ spin_lock_init(&vchan->vc.lock); -+ vchan->vc.desc_free = sun4i_dma_free_contract; -+ vchan_init(&vchan->vc, &priv->slave); -+ } -+ -+ ret = clk_prepare_enable(priv->clk); -+ if (ret) { -+ dev_err(&pdev->dev, "Couldn't enable the clock\n"); -+ return ret; -+ } -+ -+ /* -+ * Make sure the IRQs are all disabled and accounted for. The bootloader -+ * likes to leave these dirty -+ */ -+ writel(0, priv->base + DMA_IRQ_ENABLE_REG); -+ writel(0xFFFFFFFF, priv->base + DMA_IRQ_PENDING_STATUS_REG); -+ -+ ret = devm_request_irq(&pdev->dev, priv->irq, sun4i_dma_interrupt, -+ 0, dev_name(&pdev->dev), priv); -+ if (ret) { -+ dev_err(&pdev->dev, "Cannot request IRQ\n"); -+ goto err_clk_disable; -+ } -+ -+ ret = dma_async_device_register(&priv->slave); -+ if (ret) { -+ dev_warn(&pdev->dev, "Failed to register DMA engine device\n"); -+ goto err_clk_disable; -+ } -+ -+ ret = of_dma_controller_register(pdev->dev.of_node, sun4i_dma_of_xlate, -+ priv); -+ if (ret) { -+ dev_err(&pdev->dev, "of_dma_controller_register failed\n"); -+ goto err_dma_unregister; -+ } -+ -+ dev_dbg(&pdev->dev, "Successfully probed SUN4I_DMA\n"); -+ -+ return 0; -+ -+err_dma_unregister: -+ dma_async_device_unregister(&priv->slave); -+err_clk_disable: -+ clk_disable_unprepare(priv->clk); -+ return ret; -+} -+ -+static int sun4i_dma_remove(struct platform_device *pdev) -+{ -+ struct sun4i_dma_dev *priv = platform_get_drvdata(pdev); -+ -+ /* Disable IRQ so no more work is scheduled */ -+ disable_irq(priv->irq); -+ -+ of_dma_controller_free(pdev->dev.of_node); -+ dma_async_device_unregister(&priv->slave); -+ -+ clk_disable_unprepare(priv->clk); -+ -+ return 0; -+} -+ -+static struct of_device_id sun4i_dma_match[] = { -+ { .compatible = "allwinner,sun4i-a10-dma" }, -+ { /* sentinel */ }, -+}; -+ -+static struct platform_driver sun4i_dma_driver = { -+ .probe = sun4i_dma_probe, -+ .remove = sun4i_dma_remove, -+ .driver = { -+ .name = "sun4i-dma", -+ .of_match_table = sun4i_dma_match, -+ }, -+}; -+ -+module_platform_driver(sun4i_dma_driver); -+ -+MODULE_DESCRIPTION("Allwinner A10 Dedicated DMA Controller Driver"); -+MODULE_AUTHOR("Emilio López <emilio@elopez.com.ar>"); -+MODULE_LICENSE("GPL"); |