--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h @@ -29,16 +29,15 @@ CKCTL_6338_SAR_EN | \ CKCTL_6338_SPI_EN) -#define CKCTL_6345_CPU_EN (1 << 0) -#define CKCTL_6345_BUS_EN (1 << 1) -#define CKCTL_6345_EBI_EN (1 << 2) -#define CKCTL_6345_UART_EN (1 << 3) -#define CKCTL_6345_ADSLPHY_EN (1 << 4) -#define CKCTL_6345_ENET_EN (1 << 7) -#define CKCTL_6345_USBH_EN (1 << 8) +#define CKCTL_6345_CPU_EN (1 << 16) +#define CKCTL_6345_BUS_EN (1 << 17) +#define CKCTL_6345_EBI_EN (1 << 18) +#define CKCTL_6345_UART_EN (1 << 19) +#define CKCTL_6345_ADSLPHY_EN (1 << 20) +#define CKCTL_6345_ENET_EN (1 << 23) +#define CKCTL_6345_USBH_EN (1 << 24) -#define CKCTL_6345_ALL_SAFE_EN (CKCTL_6345_ENET_EN | \ - CKCTL_6345_USBH_EN | \ +#define CKCTL_6345_ALL_SAFE_EN (CKCTL_6345_USBH_EN | \ CKCTL_6345_ADSLPHY_EN) #define CKCTL_6348_ADSLPHY_EN (1 << 0) @@ -547,6 +546,39 @@ #define ENETDMA_SRAM4_REG(x) (0x20c + (x) * 0x10) +/* Broadcom 6345 ENET DMA definitions */ +#define ENETDMA_6345_CHANCFG_REG(x) (0x00 + (x) * 0x40) +#define ENETDMA_6345_CHANCFG_EN_SHIFT 0 +#define ENETDMA_6345_CHANCFG_EN_MASK (1 << ENETDMA_6345_CHANCFG_EN_SHIFT) +#define ENETDMA_6345_PKTHALT_SHIFT 1 +#define ENETDMA_6345_PKTHALT_MASK (1 << ENETDMA_6345_PKTHALT_SHIFT) +#define ENETDMA_6345_CHAINING_SHIFT 2 +#define ENETDMA_6345_CHAINING_MASK (1 << ENETDMA_6345_CHAINING_SHIFT) +#define ENETDMA_6345_WRAP_EN_SHIFT 3 +#define ENETDMA_6345_WRAP_EN_MASK (1 << ENETDMA_6345_WRAP_EN_SHIFT) +#define ENETDMA_6345_FLOWC_EN_SHIFT 4 +#define ENETDMA_6345_FLOWC_EN_MASK (1 << ENETDMA_6345_FLOWC_EN_SHIFT) + +#define ENETDMA_6345_MAXBURST_REG(x) (0x04 + (x) * 0x40) + +#define ENETDMA_6345_RSTART_REG(x) (0x08 + (x) * 0x40) + +#define ENETDMA_6345_LEN_REG(x) (0x0C + (x) * 0x40) + +#define ENETDMA_6345_BSTAT_REG(x) (0x10 + (x) * 0x40) + +#define ENETDMA_6345_IR_REG(x) (0x14 + (x) * 0x40) +#define ENETDMA_6345_IR_BUFDONE_MASK (1 << 0) +#define ENETDMA_6345_IR_PKTDONE_MASK (1 << 1) +#define ENETDMA_6345_IR_NOTOWNER_MASK (1 << 2) + +#define ENETDMA_6345_IRMASK_REG(x) (0x18 + (x) * 0x40) + +#define ENETDMA_6345_FC_REG(x) (0x1C + (x) * 0x40) + +#define ENETDMA_6345_BUFALLOC_REG(x) (0x20 + (x) * 0x40) + + /************************************************************************* * _REG relative to RSET_OHCI_PRIV *************************************************************************/ --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -32,6 +32,7 @@ #include <linux/if_vlan.h> #include <bcm63xx_dev_enet.h> +#include <bcm63xx_cpu.h> #include "bcm63xx_enet.h" static char bcm_enet_driver_name[] = "bcm63xx_enet"; @@ -178,6 +179,7 @@ static void bcm_enet_mdio_write_mii(stru static int bcm_enet_refill_rx(struct net_device *dev) { struct bcm_enet_priv *priv; + unsigned int desc_shift = BCMCPU_IS_6345() ? DMADESC_6345_SHIFT : 0; priv = netdev_priv(dev); @@ -206,7 +208,7 @@ static int bcm_enet_refill_rx(struct net len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT; len_stat |= DMADESC_OWNER_MASK; if (priv->rx_dirty_desc == priv->rx_ring_size - 1) { - len_stat |= DMADESC_WRAP_MASK; + len_stat |= (DMADESC_WRAP_MASK >> desc_shift); priv->rx_dirty_desc = 0; } else { priv->rx_dirty_desc++; @@ -217,7 +219,10 @@ static int bcm_enet_refill_rx(struct net priv->rx_desc_count++; /* tell dma engine we allocated one buffer */ - enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan)); + if (!BCMCPU_IS_6345()) + enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan)); + else + enet_dma_writel(priv, 1, ENETDMA_6345_BUFALLOC_REG(priv->rx_chan)); } /* If rx ring is still empty, set a timer to try allocating @@ -255,6 +260,7 @@ static int bcm_enet_receive_queue(struct struct bcm_enet_priv *priv; struct device *kdev; int processed; + unsigned int desc_shift = BCMCPU_IS_6345() ? DMADESC_6345_SHIFT : 0; priv = netdev_priv(dev); kdev = &priv->pdev->dev; @@ -293,7 +299,7 @@ static int bcm_enet_receive_queue(struct /* if the packet does not have start of packet _and_ * end of packet flag set, then just recycle it */ - if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) { + if ((len_stat & (DMADESC_ESOP_MASK >> desc_shift)) != (DMADESC_ESOP_MASK >> desc_shift)) { dev->stats.rx_dropped++; continue; } @@ -353,8 +359,15 @@ static int bcm_enet_receive_queue(struct bcm_enet_refill_rx(dev); /* kick rx dma */ - enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, - ENETDMA_CHANCFG_REG(priv->rx_chan)); + if (!BCMCPU_IS_6345()) + enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, + ENETDMA_CHANCFG_REG(priv->rx_chan)); + else + enet_dma_writel(priv, ENETDMA_6345_CHANCFG_EN_MASK | + ENETDMA_6345_CHAINING_MASK | + ENETDMA_6345_WRAP_EN_MASK | + ENETDMA_6345_FLOWC_EN_MASK, + ENETDMA_6345_CHANCFG_REG(priv->rx_chan)); } return processed; @@ -429,10 +442,21 @@ static int bcm_enet_poll(struct napi_str dev = priv->net_dev; /* ack interrupts */ - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IR_REG(priv->rx_chan)); - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IR_REG(priv->tx_chan)); + if (!BCMCPU_IS_6345()) { + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, + ENETDMA_IR_REG(priv->rx_chan)); + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, + ENETDMA_IR_REG(priv->tx_chan)); + } else { + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK | + ENETDMA_IR_PKTDONE_MASK | + ENETDMA_IR_NOTOWNER_MASK, + ENETDMA_6345_IR_REG(priv->rx_chan)); + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK | + ENETDMA_IR_PKTDONE_MASK | + ENETDMA_IR_NOTOWNER_MASK, + ENETDMA_6345_IR_REG(priv->tx_chan)); + } /* reclaim sent skb */ tx_work_done = bcm_enet_tx_reclaim(dev, 0); @@ -451,10 +475,21 @@ static int bcm_enet_poll(struct napi_str napi_complete(napi); /* restore rx/tx interrupt */ - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IRMASK_REG(priv->rx_chan)); - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IRMASK_REG(priv->tx_chan)); + if (!BCMCPU_IS_6345()) { + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, + ENETDMA_IRMASK_REG(priv->rx_chan)); + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, + ENETDMA_IRMASK_REG(priv->tx_chan)); + } else { + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK | + ENETDMA_IR_PKTDONE_MASK | + ENETDMA_IR_NOTOWNER_MASK, + ENETDMA_6345_IRMASK_REG(priv->rx_chan)); + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK | + ENETDMA_IR_PKTDONE_MASK | + ENETDMA_IR_NOTOWNER_MASK, + ENETDMA_6345_IRMASK_REG(priv->tx_chan)); + } return rx_work_done; } @@ -497,8 +532,13 @@ static irqreturn_t bcm_enet_isr_dma(int priv = netdev_priv(dev); /* mask rx/tx interrupts */ - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); + if (!BCMCPU_IS_6345()) { + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); + } else { + enet_dma_writel(priv, 0, ENETDMA_6345_IRMASK_REG(priv->rx_chan)); + enet_dma_writel(priv, 0, ENETDMA_6345_IRMASK_REG(priv->tx_chan)); + } napi_schedule(&priv->napi); @@ -514,6 +554,7 @@ static int bcm_enet_start_xmit(struct sk struct bcm_enet_desc *desc; u32 len_stat; int ret; + unsigned int desc_shift = BCMCPU_IS_6345() ? DMADESC_6345_SHIFT : 0; priv = netdev_priv(dev); @@ -539,14 +580,13 @@ static int bcm_enet_start_xmit(struct sk DMA_TO_DEVICE); len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK; - len_stat |= DMADESC_ESOP_MASK | - DMADESC_APPEND_CRC | - DMADESC_OWNER_MASK; + len_stat |= ((DMADESC_ESOP_MASK >> desc_shift) | + DMADESC_APPEND_CRC | DMADESC_OWNER_MASK); priv->tx_curr_desc++; if (priv->tx_curr_desc == priv->tx_ring_size) { priv->tx_curr_desc = 0; - len_stat |= DMADESC_WRAP_MASK; + len_stat |= (DMADESC_WRAP_MASK >> desc_shift); } priv->tx_desc_count--; @@ -557,8 +597,15 @@ static int bcm_enet_start_xmit(struct sk wmb(); /* kick tx dma */ - enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, - ENETDMA_CHANCFG_REG(priv->tx_chan)); + if (!BCMCPU_IS_6345()) + enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, + ENETDMA_CHANCFG_REG(priv->tx_chan)); + else + enet_dma_writel(priv, ENETDMA_6345_CHANCFG_EN_MASK | + ENETDMA_6345_CHAINING_MASK | + ENETDMA_6345_WRAP_EN_MASK | + ENETDMA_6345_FLOWC_EN_MASK, + ENETDMA_6345_CHANCFG_REG(priv->tx_chan)); /* stop queue if no more desc available */ if (!priv->tx_desc_count) @@ -686,6 +733,9 @@ static void bcm_enet_set_flow(struct bcm val &= ~ENET_RXCFG_ENFLOW_MASK; enet_writel(priv, val, ENET_RXCFG_REG); + if (BCMCPU_IS_6345()) + return; + /* tx flow control (pause frame generation) */ val = enet_dma_readl(priv, ENETDMA_CFG_REG); if (tx_en) @@ -833,8 +883,13 @@ static int bcm_enet_open(struct net_devi /* mask all interrupts and request them */ enet_writel(priv, 0, ENET_IRMASK_REG); - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); + if (!BCMCPU_IS_6345()) { + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); + } else { + enet_dma_writel(priv, 0, ENETDMA_6345_IRMASK_REG(priv->rx_chan)); + enet_dma_writel(priv, 0, ENETDMA_6345_IRMASK_REG(priv->tx_chan)); + } ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev); if (ret) @@ -913,8 +968,12 @@ static int bcm_enet_open(struct net_devi priv->rx_curr_desc = 0; /* initialize flow control buffer allocation */ - enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, - ENETDMA_BUFALLOC_REG(priv->rx_chan)); + if (!BCMCPU_IS_6345()) + enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, + ENETDMA_BUFALLOC_REG(priv->rx_chan)); + else + enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, + ENETDMA_6345_BUFALLOC_REG(priv->rx_chan)); if (bcm_enet_refill_rx(dev)) { dev_err(kdev, "cannot allocate rx skb queue\n"); @@ -923,37 +982,62 @@ static int bcm_enet_open(struct net_devi } /* write rx & tx ring addresses */ - enet_dma_writel(priv, priv->rx_desc_dma, - ENETDMA_RSTART_REG(priv->rx_chan)); - enet_dma_writel(priv, priv->tx_desc_dma, - ENETDMA_RSTART_REG(priv->tx_chan)); + if (!BCMCPU_IS_6345()) { + enet_dma_writel(priv, priv->rx_desc_dma, + ENETDMA_RSTART_REG(priv->rx_chan)); + enet_dma_writel(priv, priv->tx_desc_dma, + ENETDMA_RSTART_REG(priv->tx_chan)); + } else { + enet_dma_writel(priv, priv->rx_desc_dma, + ENETDMA_6345_RSTART_REG(priv->rx_chan)); + enet_dma_writel(priv, priv->tx_desc_dma, + ENETDMA_6345_RSTART_REG(priv->tx_chan)); + } /* clear remaining state ram for rx & tx channel */ - enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan)); - enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan)); - enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan)); + if (!BCMCPU_IS_6345()) { + enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan)); + enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan)); + enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan)); + enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan)); + enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan)); + enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan)); + } else { + enet_dma_writel(priv, 0, ENETDMA_6345_FC_REG(priv->rx_chan)); + enet_dma_writel(priv, 0, ENETDMA_6345_FC_REG(priv->tx_chan)); + } /* set max rx/tx length */ enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG); enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG); /* set dma maximum burst len */ - enet_dma_writel(priv, BCMENET_DMA_MAXBURST, - ENETDMA_MAXBURST_REG(priv->rx_chan)); - enet_dma_writel(priv, BCMENET_DMA_MAXBURST, - ENETDMA_MAXBURST_REG(priv->tx_chan)); + if (!BCMCPU_IS_6345()) { + enet_dma_writel(priv, BCMENET_DMA_MAXBURST, + ENETDMA_MAXBURST_REG(priv->rx_chan)); + enet_dma_writel(priv, BCMENET_DMA_MAXBURST, + ENETDMA_MAXBURST_REG(priv->tx_chan)); + } else { + enet_dma_writel(priv, BCMENET_DMA_MAXBURST, + ENETDMA_6345_MAXBURST_REG(priv->rx_chan)); + enet_dma_writel(priv, BCMENET_DMA_MAXBURST, + ENETDMA_6345_MAXBURST_REG(priv->tx_chan)); + } /* set correct transmit fifo watermark */ enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG); /* set flow control low/high threshold to 1/3 / 2/3 */ - val = priv->rx_ring_size / 3; - enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); - val = (priv->rx_ring_size * 2) / 3; - enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); + if (!BCMCPU_IS_6345()) { + val = priv->rx_ring_size / 3; + enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); + val = (priv->rx_ring_size * 2) / 3; + enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); + } else { + enet_dma_writel(priv, 5, ENETDMA_6345_FC_REG(priv->rx_chan)); + enet_dma_writel(priv, priv->rx_ring_size, ENETDMA_6345_LEN_REG(priv->rx_chan)); + enet_dma_writel(priv, priv->tx_ring_size, ENETDMA_6345_LEN_REG(priv->tx_chan)); + } /* all set, enable mac and interrupts, start dma engine and * kick rx dma channel */ @@ -961,27 +1045,58 @@ static int bcm_enet_open(struct net_devi val = enet_readl(priv, ENET_CTL_REG); val |= ENET_CTL_ENABLE_MASK; enet_writel(priv, val, ENET_CTL_REG); - enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); - enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, - ENETDMA_CHANCFG_REG(priv->rx_chan)); + + if (!BCMCPU_IS_6345()) { + enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); + enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, + ENETDMA_CHANCFG_REG(priv->rx_chan)); + } else { + enet_dma_writel(priv, ENETDMA_6345_CHANCFG_EN_MASK | + ENETDMA_6345_CHAINING_MASK | + ENETDMA_6345_WRAP_EN_MASK | + ENETDMA_6345_FLOWC_EN_MASK, + ENETDMA_6345_CHANCFG_REG(priv->rx_chan)); + } /* watch "mib counters about to overflow" interrupt */ enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); /* watch "packet transferred" interrupt in rx and tx */ - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IR_REG(priv->rx_chan)); - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IR_REG(priv->tx_chan)); + if (!BCMCPU_IS_6345()) { + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, + ENETDMA_IR_REG(priv->rx_chan)); + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, + ENETDMA_IR_REG(priv->tx_chan)); + } else { + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK | + ENETDMA_IR_PKTDONE_MASK | + ENETDMA_IR_NOTOWNER_MASK, + ENETDMA_6345_IR_REG(priv->rx_chan)); + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK | + ENETDMA_IR_PKTDONE_MASK | + ENETDMA_IR_NOTOWNER_MASK, + ENETDMA_6345_IR_REG(priv->tx_chan)); + } /* make sure we enable napi before rx interrupt */ napi_enable(&priv->napi); - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IRMASK_REG(priv->rx_chan)); - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IRMASK_REG(priv->tx_chan)); + if (!BCMCPU_IS_6345()) { + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, + ENETDMA_IRMASK_REG(priv->rx_chan)); + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, + ENETDMA_IRMASK_REG(priv->tx_chan)); + } else { + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK | + ENETDMA_IR_PKTDONE_MASK | + ENETDMA_IR_NOTOWNER_MASK, + ENETDMA_6345_IRMASK_REG(priv->rx_chan)); + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK | + ENETDMA_IR_PKTDONE_MASK | + ENETDMA_IR_NOTOWNER_MASK, + ENETDMA_6345_IRMASK_REG(priv->tx_chan)); + } if (priv->has_phy) phy_start(priv->phydev); @@ -1061,13 +1176,19 @@ static void bcm_enet_disable_dma(struct { int limit; - enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan)); + if (!BCMCPU_IS_6345()) + enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan)); + else + enet_dma_writel(priv, 0, ENETDMA_6345_CHANCFG_REG(chan)); limit = 1000; do { u32 val; - val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan)); + if (!BCMCPU_IS_6345()) + val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan)); + else + val = enet_dma_readl(priv, ENETDMA_6345_CHANCFG_REG(chan)); if (!(val & ENETDMA_CHANCFG_EN_MASK)) break; udelay(1); @@ -1094,8 +1215,13 @@ static int bcm_enet_stop(struct net_devi /* mask all interrupts */ enet_writel(priv, 0, ENET_IRMASK_REG); - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); + if (!BCMCPU_IS_6345()) { + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); + } else { + enet_dma_writel(priv, 0, ENETDMA_6345_IRMASK_REG(priv->rx_chan)); + enet_dma_writel(priv, 0, ENETDMA_6345_IRMASK_REG(priv->tx_chan)); + } /* make sure no mib update is scheduled */ cancel_work_sync(&priv->mib_update_task); @@ -1622,6 +1748,7 @@ static int __devinit bcm_enet_probe(stru const char *clk_name; unsigned int iomem_size; int i, ret; + unsigned int chan_offset = 0; /* stop if shared driver failed, assume driver->probe will be * called in the same order we register devices (correct ?) */ @@ -1661,10 +1788,13 @@ static int __devinit bcm_enet_probe(stru priv->irq_tx = res_irq_tx->start; priv->mac_id = pdev->id; + if (BCMCPU_IS_6345()) + chan_offset = 1; + /* get rx & tx dma channel id for this mac */ if (priv->mac_id == 0) { - priv->rx_chan = 0; - priv->tx_chan = 1; + priv->rx_chan = 0 + chan_offset; + priv->tx_chan = 1 + chan_offset; clk_name = "enet0"; } else { priv->rx_chan = 2; --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h @@ -46,6 +46,9 @@ struct bcm_enet_desc { #define DMADESC_ESOP_MASK (DMADESC_EOP_MASK | DMADESC_SOP_MASK) #define DMADESC_WRAP_MASK (1 << 12) +/* Shift down for EOP, SOP and WRAP bits */ +#define DMADESC_6345_SHIFT (3) + #define DMADESC_UNDER_MASK (1 << 9) #define DMADESC_APPEND_CRC (1 << 8) #define DMADESC_OVSIZE_MASK (1 << 4) --- a/arch/mips/bcm63xx/dev-enet.c +++ b/arch/mips/bcm63xx/dev-enet.c @@ -104,7 +104,7 @@ int __init bcm63xx_enet_register(int uni if (unit > 1) return -ENODEV; - if (unit == 1 && BCMCPU_IS_6338()) + if (unit == 1 && (BCMCPU_IS_6338() || BCMCPU_IS_6345())) return -ENODEV; if (!shared_device_registered) {