From bb255f74290d889b65a563bac7a4be0427fdbec8 Mon Sep 17 00:00:00 2001 From: John Crispin Date: Thu, 16 Feb 2017 12:25:25 +0100 Subject: ipq806x: add v4.9 support Signed-off-by: John Crispin --- ...08-spi-qup-Fix-DMA-mode-to-work-correctly.patch | 219 +++++++++++++++++++++ 1 file changed, 219 insertions(+) create mode 100644 target/linux/ipq806x/patches-4.9/0008-spi-qup-Fix-DMA-mode-to-work-correctly.patch (limited to 'target/linux/ipq806x/patches-4.9/0008-spi-qup-Fix-DMA-mode-to-work-correctly.patch') diff --git a/target/linux/ipq806x/patches-4.9/0008-spi-qup-Fix-DMA-mode-to-work-correctly.patch b/target/linux/ipq806x/patches-4.9/0008-spi-qup-Fix-DMA-mode-to-work-correctly.patch new file mode 100644 index 0000000..d950446 --- /dev/null +++ b/target/linux/ipq806x/patches-4.9/0008-spi-qup-Fix-DMA-mode-to-work-correctly.patch @@ -0,0 +1,219 @@ +From 715d008b67b21fb8bfefaeeefa5b8ddf23777872 Mon Sep 17 00:00:00 2001 +From: Andy Gross +Date: Tue, 2 Feb 2016 17:00:53 -0600 +Subject: [PATCH 08/37] spi: qup: Fix DMA mode to work correctly + +This patch fixes a few issues with the DMA mode. The QUP needs to be +placed in the run mode before the DMA transactions are executed. The +conditions for being able to DMA vary between revisions of the QUP. +This is due to v1.1.1 using ADM DMA and later revisions using BAM. + +Signed-off-by: Andy Gross +--- + drivers/spi/spi-qup.c | 94 ++++++++++++++++++++++++++++++++----------------- + 1 file changed, 62 insertions(+), 32 deletions(-) + +--- a/drivers/spi/spi-qup.c ++++ b/drivers/spi/spi-qup.c +@@ -142,6 +142,7 @@ struct spi_qup { + + struct spi_transfer *xfer; + struct completion done; ++ struct completion dma_tx_done; + int error; + int w_size; /* bytes per SPI word */ + int n_words; +@@ -284,16 +285,16 @@ static void spi_qup_fifo_write(struct sp + + static void spi_qup_dma_done(void *data) + { +- struct spi_qup *qup = data; ++ struct completion *done = data; + +- complete(&qup->done); ++ complete(done); + } + + static int spi_qup_prep_sg(struct spi_master *master, struct spi_transfer *xfer, + enum dma_transfer_direction dir, +- dma_async_tx_callback callback) ++ dma_async_tx_callback callback, ++ void *data) + { +- struct spi_qup *qup = spi_master_get_devdata(master); + unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE; + struct dma_async_tx_descriptor *desc; + struct scatterlist *sgl; +@@ -312,11 +313,11 @@ static int spi_qup_prep_sg(struct spi_ma + } + + desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags); +- if (!desc) +- return -EINVAL; ++ if (IS_ERR_OR_NULL(desc)) ++ return desc ? PTR_ERR(desc) : -EINVAL; + + desc->callback = callback; +- desc->callback_param = qup; ++ desc->callback_param = data; + + cookie = dmaengine_submit(desc); + +@@ -332,18 +333,29 @@ static void spi_qup_dma_terminate(struct + dmaengine_terminate_all(master->dma_rx); + } + +-static int spi_qup_do_dma(struct spi_master *master, struct spi_transfer *xfer) ++static int spi_qup_do_dma(struct spi_master *master, struct spi_transfer *xfer, ++unsigned long timeout) + { ++ struct spi_qup *qup = spi_master_get_devdata(master); + dma_async_tx_callback rx_done = NULL, tx_done = NULL; + int ret; + ++ /* before issuing the descriptors, set the QUP to run */ ++ ret = spi_qup_set_state(qup, QUP_STATE_RUN); ++ if (ret) { ++ dev_warn(qup->dev, "cannot set RUN state\n"); ++ return ret; ++ } ++ + if (xfer->rx_buf) + rx_done = spi_qup_dma_done; +- else if (xfer->tx_buf) ++ ++ if (xfer->tx_buf) + tx_done = spi_qup_dma_done; + + if (xfer->rx_buf) { +- ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done); ++ ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done, ++ &qup->done); + if (ret) + return ret; + +@@ -351,17 +363,25 @@ static int spi_qup_do_dma(struct spi_mas + } + + if (xfer->tx_buf) { +- ret = spi_qup_prep_sg(master, xfer, DMA_MEM_TO_DEV, tx_done); ++ ret = spi_qup_prep_sg(master, xfer, DMA_MEM_TO_DEV, tx_done, ++ &qup->dma_tx_done); + if (ret) + return ret; + + dma_async_issue_pending(master->dma_tx); + } + +- return 0; ++ if (xfer->rx_buf && !wait_for_completion_timeout(&qup->done, timeout)) ++ return -ETIMEDOUT; ++ ++ if (xfer->tx_buf && !wait_for_completion_timeout(&qup->dma_tx_done, timeout)) ++ ret = -ETIMEDOUT; ++ ++ return ret; + } + +-static int spi_qup_do_pio(struct spi_master *master, struct spi_transfer *xfer) ++static int spi_qup_do_pio(struct spi_master *master, struct spi_transfer *xfer, ++ unsigned long timeout) + { + struct spi_qup *qup = spi_master_get_devdata(master); + int ret; +@@ -380,6 +400,15 @@ static int spi_qup_do_pio(struct spi_mas + + spi_qup_fifo_write(qup, xfer); + ++ ret = spi_qup_set_state(qup, QUP_STATE_RUN); ++ if (ret) { ++ dev_warn(qup->dev, "cannot set RUN state\n"); ++ return ret; ++ } ++ ++ if (!wait_for_completion_timeout(&qup->done, timeout)) ++ return -ETIMEDOUT; ++ + return 0; + } + +@@ -428,7 +457,6 @@ static irqreturn_t spi_qup_qup_irq(int i + dev_warn(controller->dev, "CLK_OVER_RUN\n"); + if (spi_err & SPI_ERROR_CLK_UNDER_RUN) + dev_warn(controller->dev, "CLK_UNDER_RUN\n"); +- + error = -EIO; + } + +@@ -617,6 +645,7 @@ static int spi_qup_transfer_one(struct s + timeout = 100 * msecs_to_jiffies(timeout); + + reinit_completion(&controller->done); ++ reinit_completion(&controller->dma_tx_done); + + spin_lock_irqsave(&controller->lock, flags); + controller->xfer = xfer; +@@ -626,21 +655,13 @@ static int spi_qup_transfer_one(struct s + spin_unlock_irqrestore(&controller->lock, flags); + + if (spi_qup_is_dma_xfer(controller->mode)) +- ret = spi_qup_do_dma(master, xfer); ++ ret = spi_qup_do_dma(master, xfer, timeout); + else +- ret = spi_qup_do_pio(master, xfer); ++ ret = spi_qup_do_pio(master, xfer, timeout); + + if (ret) + goto exit; + +- if (spi_qup_set_state(controller, QUP_STATE_RUN)) { +- dev_warn(controller->dev, "cannot set EXECUTE state\n"); +- goto exit; +- } +- +- if (!wait_for_completion_timeout(&controller->done, timeout)) +- ret = -ETIMEDOUT; +- + exit: + spi_qup_set_state(controller, QUP_STATE_RESET); + spin_lock_irqsave(&controller->lock, flags); +@@ -662,15 +683,23 @@ static bool spi_qup_can_dma(struct spi_m + size_t dma_align = dma_get_cache_alignment(); + int n_words; + +- if (xfer->rx_buf && (xfer->len % qup->in_blk_sz || +- IS_ERR_OR_NULL(master->dma_rx) || +- !IS_ALIGNED((size_t)xfer->rx_buf, dma_align))) +- return false; ++ if (xfer->rx_buf) { ++ if (!IS_ALIGNED((size_t)xfer->rx_buf, dma_align) || ++ IS_ERR_OR_NULL(master->dma_rx)) ++ return false; + +- if (xfer->tx_buf && (xfer->len % qup->out_blk_sz || +- IS_ERR_OR_NULL(master->dma_tx) || +- !IS_ALIGNED((size_t)xfer->tx_buf, dma_align))) +- return false; ++ if (qup->qup_v1 && (xfer->len % qup->in_blk_sz)) ++ return false; ++ } ++ ++ if (xfer->tx_buf) { ++ if (!IS_ALIGNED((size_t)xfer->tx_buf, dma_align) || ++ IS_ERR_OR_NULL(master->dma_tx)) ++ return false; ++ ++ if (qup->qup_v1 && (xfer->len % qup->out_blk_sz)) ++ return false; ++ } + + n_words = xfer->len / DIV_ROUND_UP(xfer->bits_per_word, 8); + if (n_words <= (qup->in_fifo_sz / sizeof(u32))) +@@ -836,6 +865,7 @@ static int spi_qup_probe(struct platform + + spin_lock_init(&controller->lock); + init_completion(&controller->done); ++ init_completion(&controller->dma_tx_done); + + iomode = readl_relaxed(base + QUP_IO_M_MODES); + -- cgit v1.1