diff options
author | John Crispin <john@phrozen.org> | 2017-08-18 18:11:52 +0200 |
---|---|---|
committer | John Crispin <john@phrozen.org> | 2017-08-18 18:41:41 +0200 |
commit | 1f068588efddf0175e954ffc07ec8478bddd52c7 (patch) | |
tree | 3e65bb1b6d076cac2597db4bcbf71a9fedee1099 /target/linux/mediatek/patches-4.9/0047-net-next-mediatek-split-IRQ-register-locking-into-TX.patch | |
parent | 364befeccf01c07049b492d90e98c2c13457c7c3 (diff) | |
download | mtk-20170518-1f068588efddf0175e954ffc07ec8478bddd52c7.zip mtk-20170518-1f068588efddf0175e954ffc07ec8478bddd52c7.tar.gz mtk-20170518-1f068588efddf0175e954ffc07ec8478bddd52c7.tar.bz2 |
mediatek: update to latest kernel patchset from v4.13-rc
Signed-off-by: Muciri Gatimu <muciri@openmesh.com>
Signed-off-by: Shashidhar Lakkavalli <shashidhar.lakkavalli@openmesh.com>
Signed-off-by: John Crispin <john@phrozen.org>
Diffstat (limited to 'target/linux/mediatek/patches-4.9/0047-net-next-mediatek-split-IRQ-register-locking-into-TX.patch')
-rw-r--r-- | target/linux/mediatek/patches-4.9/0047-net-next-mediatek-split-IRQ-register-locking-into-TX.patch | 208 |
1 files changed, 208 insertions, 0 deletions
diff --git a/target/linux/mediatek/patches-4.9/0047-net-next-mediatek-split-IRQ-register-locking-into-TX.patch b/target/linux/mediatek/patches-4.9/0047-net-next-mediatek-split-IRQ-register-locking-into-TX.patch new file mode 100644 index 0000000..27a78a6 --- /dev/null +++ b/target/linux/mediatek/patches-4.9/0047-net-next-mediatek-split-IRQ-register-locking-into-TX.patch @@ -0,0 +1,208 @@ +From 5afceece38fa30e3c71e7ed9ac62aa70ba8cfbb1 Mon Sep 17 00:00:00 2001 +From: John Crispin <john@phrozen.org> +Date: Fri, 16 Jun 2017 10:00:30 +0200 +Subject: [PATCH 47/57] net-next: mediatek: split IRQ register locking into TX + and RX + +Originally the driver only utilized the new QDMA engine. The current code +still assumes this is the case when locking the IRQ mask register. Since +RX now runs on the old style PDMA engine we can add a second lock. This +patch reduces the IRQ latency as the TX and RX path no longer need to wait +on each other under heavy load. + +Signed-off-by: John Crispin <john@phrozen.org> +--- + drivers/net/ethernet/mediatek/mtk_eth_soc.c | 79 ++++++++++++++++++----------- + drivers/net/ethernet/mediatek/mtk_eth_soc.h | 5 +- + 2 files changed, 54 insertions(+), 30 deletions(-) + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -372,28 +372,48 @@ static void mtk_mdio_cleanup(struct mtk_ + mdiobus_unregister(eth->mii_bus); + } + +-static inline void mtk_irq_disable(struct mtk_eth *eth, +- unsigned reg, u32 mask) ++static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask) + { + unsigned long flags; + u32 val; + +- spin_lock_irqsave(ð->irq_lock, flags); +- val = mtk_r32(eth, reg); +- mtk_w32(eth, val & ~mask, reg); +- spin_unlock_irqrestore(ð->irq_lock, flags); ++ spin_lock_irqsave(ð->tx_irq_lock, flags); ++ val = mtk_r32(eth, MTK_QDMA_INT_MASK); ++ mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK); ++ spin_unlock_irqrestore(ð->tx_irq_lock, flags); + } + +-static inline void mtk_irq_enable(struct mtk_eth *eth, +- unsigned reg, u32 mask) ++static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask) + { + unsigned long flags; + u32 val; + +- spin_lock_irqsave(ð->irq_lock, flags); +- val = mtk_r32(eth, reg); +- mtk_w32(eth, val | mask, reg); +- spin_unlock_irqrestore(ð->irq_lock, flags); ++ spin_lock_irqsave(ð->tx_irq_lock, flags); ++ val = mtk_r32(eth, MTK_QDMA_INT_MASK); ++ mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK); ++ spin_unlock_irqrestore(ð->tx_irq_lock, flags); ++} ++ ++static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask) ++{ ++ unsigned long flags; ++ u32 val; ++ ++ spin_lock_irqsave(ð->rx_irq_lock, flags); ++ val = mtk_r32(eth, MTK_PDMA_INT_MASK); ++ mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK); ++ spin_unlock_irqrestore(ð->rx_irq_lock, flags); ++} ++ ++static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask) ++{ ++ unsigned long flags; ++ u32 val; ++ ++ spin_lock_irqsave(ð->rx_irq_lock, flags); ++ val = mtk_r32(eth, MTK_PDMA_INT_MASK); ++ mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK); ++ spin_unlock_irqrestore(ð->rx_irq_lock, flags); + } + + static int mtk_set_mac_address(struct net_device *dev, void *p) +@@ -1116,7 +1136,7 @@ static int mtk_napi_tx(struct napi_struc + return budget; + + napi_complete(napi); +- mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); ++ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); + + return tx_done; + } +@@ -1150,7 +1170,7 @@ poll_again: + goto poll_again; + } + napi_complete(napi); +- mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); ++ mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); + + return rx_done + budget - remain_budget; + } +@@ -1699,7 +1719,7 @@ static irqreturn_t mtk_handle_irq_rx(int + + if (likely(napi_schedule_prep(ð->rx_napi))) { + __napi_schedule(ð->rx_napi); +- mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); ++ mtk_rx_irq_disable(eth, MTK_RX_DONE_INT); + } + + return IRQ_HANDLED; +@@ -1711,7 +1731,7 @@ static irqreturn_t mtk_handle_irq_tx(int + + if (likely(napi_schedule_prep(ð->tx_napi))) { + __napi_schedule(ð->tx_napi); +- mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); ++ mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); + } + + return IRQ_HANDLED; +@@ -1723,11 +1743,11 @@ static void mtk_poll_controller(struct n + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + +- mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); +- mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); ++ mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); ++ mtk_rx_irq_disable(eth, MTK_RX_DONE_INT); + mtk_handle_irq_rx(eth->irq[2], dev); +- mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); +- mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); ++ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); ++ mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); + } + #endif + +@@ -1770,8 +1790,8 @@ static int mtk_open(struct net_device *d + + napi_enable(ð->tx_napi); + napi_enable(ð->rx_napi); +- mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); +- mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); ++ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); ++ mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); + } + atomic_inc(ð->dma_refcnt); + +@@ -1816,8 +1836,8 @@ static int mtk_stop(struct net_device *d + if (!atomic_dec_and_test(ð->dma_refcnt)) + return 0; + +- mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); +- mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); ++ mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); ++ mtk_rx_irq_disable(eth, MTK_RX_DONE_INT); + napi_disable(ð->tx_napi); + napi_disable(ð->rx_napi); + +@@ -1911,8 +1931,8 @@ static int mtk_hw_init(struct mtk_eth *e + mtk_w32(eth, 0, MTK_PDMA_DELAY_INT); + mtk_w32(eth, 0, MTK_QDMA_DELAY_INT); + #endif +- mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0); +- mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0); ++ mtk_tx_irq_disable(eth, ~0); ++ mtk_rx_irq_disable(eth, ~0); + mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); + mtk_w32(eth, 0, MTK_RST_GL); + +@@ -1983,8 +2003,8 @@ static void mtk_uninit(struct net_device + phy_disconnect(dev->phydev); + if (of_phy_is_fixed_link(mac->of_node)) + of_phy_deregister_fixed_link(mac->of_node); +- mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0); +- mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0); ++ mtk_tx_irq_disable(eth, ~0); ++ mtk_rx_irq_disable(eth, ~0); + } + + static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +@@ -2442,7 +2462,8 @@ static int mtk_probe(struct platform_dev + return PTR_ERR(eth->base); + + spin_lock_init(ð->page_lock); +- spin_lock_init(ð->irq_lock); ++ spin_lock_init(ð->tx_irq_lock); ++ spin_lock_init(ð->rx_irq_lock); + + eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "mediatek,ethsys"); +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h +@@ -526,6 +526,8 @@ struct mtk_rx_ring { + * @dev: The device pointer + * @base: The mapped register i/o base + * @page_lock: Make sure that register operations are atomic ++ * @tx_irq__lock: Make sure that IRQ register operations are atomic ++ * @rx_irq__lock: Make sure that IRQ register operations are atomic + * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a + * dummy for NAPI to work + * @netdev: The netdev instances +@@ -555,7 +557,8 @@ struct mtk_eth { + struct device *dev; + void __iomem *base; + spinlock_t page_lock; +- spinlock_t irq_lock; ++ spinlock_t tx_irq_lock; ++ spinlock_t rx_irq_lock; + struct net_device dummy_dev; + struct net_device *netdev[MTK_MAX_DEVS]; + struct mtk_mac *mac[MTK_MAX_DEVS]; |