diff options
Diffstat (limited to 'target/linux/mediatek/patches-4.4/0091-net-next-mediatek-WIP.patch')
-rw-r--r-- | target/linux/mediatek/patches-4.4/0091-net-next-mediatek-WIP.patch | 249 |
1 files changed, 249 insertions, 0 deletions
diff --git a/target/linux/mediatek/patches-4.4/0091-net-next-mediatek-WIP.patch b/target/linux/mediatek/patches-4.4/0091-net-next-mediatek-WIP.patch new file mode 100644 index 0000000..b852e67 --- /dev/null +++ b/target/linux/mediatek/patches-4.4/0091-net-next-mediatek-WIP.patch @@ -0,0 +1,249 @@ +From 34e10b96d5ccb99fb78251051bc5652b09359983 Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Thu, 28 Apr 2016 07:58:22 +0200 +Subject: [PATCH 91/91] net-next: mediatek WIP + +Signed-off-by: John Crispin <blogic@openwrt.org> +--- + drivers/net/ethernet/mediatek/mtk_eth_soc.c | 89 ++++++++++++--------------- + drivers/net/ethernet/mediatek/mtk_eth_soc.h | 5 +- + 2 files changed, 44 insertions(+), 50 deletions(-) + +diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +index 5d33053..2e05920 100644 +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -326,7 +326,7 @@ static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask) + val = mtk_r32(eth, MTK_QDMA_INT_MASK); + mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK); + /* flush write */ +- mtk_r32(eth, MTK_QDMA_INT_MASK); ++// mtk_r32(eth, MTK_QDMA_INT_MASK); + spin_unlock_irqrestore(ð->irq_lock, flags); + } + +@@ -339,7 +339,7 @@ static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask) + val = mtk_r32(eth, MTK_QDMA_INT_MASK); + mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK); + /* flush write */ +- mtk_r32(eth, MTK_QDMA_INT_MASK); ++// mtk_r32(eth, MTK_QDMA_INT_MASK); + spin_unlock_irqrestore(ð->irq_lock, flags); + } + +@@ -710,10 +710,26 @@ static inline int mtk_cal_txd_req(struct sk_buff *skb) + return nfrags; + } + ++static int mtk_queue_stopped(struct mtk_eth *eth) ++{ ++ int i; ++ ++ for (i = 0; i < MTK_MAC_COUNT; i++) { ++ if (!eth->netdev[i]) ++ continue; ++ if (netif_queue_stopped(eth->netdev[i])) ++ return 1; ++ } ++ ++ return 0; ++} ++ + static void mtk_wake_queue(struct mtk_eth *eth) + { + int i; + ++ printk("%s:%s[%d]w\n", __FILE__, __func__, __LINE__); ++ + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i]) + continue; +@@ -725,6 +741,7 @@ static void mtk_stop_queue(struct mtk_eth *eth) + { + int i; + ++ printk("%s:%s[%d]s\n", __FILE__, __func__, __LINE__); + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i]) + continue; +@@ -775,12 +792,9 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) + if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0) + goto drop; + +- if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) { ++ if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) + mtk_stop_queue(eth); +- if (unlikely(atomic_read(&ring->free_count) > +- ring->thresh)) +- mtk_wake_queue(eth); +- } ++ + spin_unlock_irqrestore(ð->page_lock, flags); + + return NETDEV_TX_OK; +@@ -927,7 +941,6 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget) + } + mtk_tx_unmap(eth->dev, tx_buf); + +- ring->last_free->txd2 = next_cpu; + ring->last_free = desc; + atomic_inc(&ring->free_count); + +@@ -945,11 +958,8 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget) + netdev_completed_queue(eth->netdev[i], done, bytes); + } + +- /* read hw index again make sure no new tx packet */ +- if (cpu == dma && cpu == mtk_r32(eth, MTK_QTX_DRX_PTR)) +- mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS); +- +- if (atomic_read(&ring->free_count) > ring->thresh) ++ if (mtk_queue_stopped(eth) && ++ (atomic_read(&ring->free_count) > ring->thresh)) + mtk_wake_queue(eth); + + return done; +@@ -973,10 +983,11 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget) + int tx_done = 0; + + mtk_handle_status_irq(eth); +- +- status = mtk_r32(eth, MTK_QMTK_INT_STATUS); ++ mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS); + tx_done = mtk_poll_tx(eth, budget); ++ + if (unlikely(netif_msg_intr(eth))) { ++ status = mtk_r32(eth, MTK_QMTK_INT_STATUS); + mask = mtk_r32(eth, MTK_QDMA_INT_MASK); + dev_info(eth->dev, + "done tx %d, intr 0x%08x/0x%x\n", +@@ -1002,9 +1013,12 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget) + u32 status, mask; + int rx_done = 0; + +- status = mtk_r32(eth, MTK_QMTK_INT_STATUS); ++ mtk_handle_status_irq(eth); ++ mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS); + rx_done = mtk_poll_rx(napi, budget, eth); ++ + if (unlikely(netif_msg_intr(eth))) { ++ status = mtk_r32(eth, MTK_QMTK_INT_STATUS); + mask = mtk_r32(eth, MTK_QDMA_INT_MASK); + dev_info(eth->dev, + "done rx %d, intr 0x%08x/0x%x\n", +@@ -1052,9 +1066,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth) + + atomic_set(&ring->free_count, MTK_DMA_SIZE - 2); + ring->next_free = &ring->dma[0]; +- ring->last_free = &ring->dma[MTK_DMA_SIZE - 2]; +- ring->thresh = max((unsigned long)MTK_DMA_SIZE >> 2, +- MAX_SKB_FRAGS); ++ ring->last_free = &ring->dma[MTK_DMA_SIZE - 1]; ++ ring->thresh = MAX_SKB_FRAGS; + + /* make sure that all changes to the dma ring are flushed before we + * continue +@@ -1259,21 +1272,11 @@ static void mtk_tx_timeout(struct net_device *dev) + static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth) + { + struct mtk_eth *eth = _eth; +- u32 status; +- +- status = mtk_r32(eth, MTK_QMTK_INT_STATUS); +- status &= ~MTK_TX_DONE_INT; +- +- if (unlikely(!status)) +- return IRQ_NONE; + +- if (status & MTK_RX_DONE_INT) { +- if (likely(napi_schedule_prep(ð->rx_napi))) { +- mtk_irq_disable(eth, MTK_RX_DONE_INT); +- __napi_schedule(ð->rx_napi); +- } ++ if (likely(napi_schedule_prep(ð->rx_napi))) { ++ __napi_schedule(ð->rx_napi); ++ mtk_irq_disable(eth, MTK_RX_DONE_INT); + } +- mtk_w32(eth, status, MTK_QMTK_INT_STATUS); + + return IRQ_HANDLED; + } +@@ -1281,21 +1284,11 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth) + static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth) + { + struct mtk_eth *eth = _eth; +- u32 status; +- +- status = mtk_r32(eth, MTK_QMTK_INT_STATUS); +- status &= ~MTK_RX_DONE_INT; +- +- if (unlikely(!status)) +- return IRQ_NONE; + +- if (status & MTK_TX_DONE_INT) { +- if (likely(napi_schedule_prep(ð->tx_napi))) { +- mtk_irq_disable(eth, MTK_TX_DONE_INT); +- __napi_schedule(ð->tx_napi); +- } ++ if (likely(napi_schedule_prep(ð->tx_napi))) { ++ __napi_schedule(ð->tx_napi); ++ mtk_irq_disable(eth, MTK_TX_DONE_INT); + } +- mtk_w32(eth, status, MTK_QMTK_INT_STATUS); + + return IRQ_HANDLED; + } +@@ -1326,7 +1319,7 @@ static int mtk_start_dma(struct mtk_eth *eth) + mtk_w32(eth, + MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN | + MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS | +- MTK_RX_BT_32DWORDS, ++ MTK_RX_BT_32DWORDS | MTK_NDP_CO_PRO, + MTK_QDMA_GLO_CFG); + + return 0; +@@ -1440,7 +1433,7 @@ static int __init mtk_hw_init(struct mtk_eth *eth) + + /* disable delay and normal interrupt */ + mtk_w32(eth, 0, MTK_QDMA_DELAY_INT); +- mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT); ++ mtk_irq_disable(eth, ~0); + mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); + mtk_w32(eth, 0, MTK_RST_GL); + +@@ -1765,7 +1758,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) + mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; + + SET_NETDEV_DEV(eth->netdev[id], eth->dev); +- eth->netdev[id]->watchdog_timeo = HZ; ++ eth->netdev[id]->watchdog_timeo = 4 * HZ; + eth->netdev[id]->netdev_ops = &mtk_netdev_ops; + eth->netdev[id]->base_addr = (unsigned long)eth->base; + eth->netdev[id]->vlan_features = MTK_HW_FEATURES & +diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h +index 5093518..6b22445 100644 +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h +@@ -18,9 +18,9 @@ + #define MTK_QDMA_PAGE_SIZE 2048 + #define MTK_MAX_RX_LENGTH 1536 + #define MTK_TX_DMA_BUF_LEN 0x3fff +-#define MTK_DMA_SIZE 256 +-#define MTK_NAPI_WEIGHT 64 + #define MTK_MAC_COUNT 2 ++#define MTK_DMA_SIZE (256 * MTK_MAC_COUNT) ++#define MTK_NAPI_WEIGHT (64 * MTK_MAC_COUNT) + #define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) + #define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN) + #define MTK_DMA_DUMMY_DESC 0xffffffff +@@ -95,6 +95,7 @@ + #define MTK_QDMA_GLO_CFG 0x1A04 + #define MTK_RX_2B_OFFSET BIT(31) + #define MTK_RX_BT_32DWORDS (3 << 11) ++#define MTK_NDP_CO_PRO BIT(10) + #define MTK_TX_WB_DDONE BIT(6) + #define MTK_DMA_SIZE_16DWORDS (2 << 4) + #define MTK_RX_DMA_BUSY BIT(3) +-- +1.7.10.4 + |