1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
|
--- a/drivers/net/ethernet/ar231x/ar231x.c
+++ b/drivers/net/ethernet/ar231x/ar231x.c
@@ -745,6 +745,7 @@ static void ar231x_load_rx_ring(struct n
for (i = 0; i < nr_bufs; i++) {
struct sk_buff *skb;
ar231x_descr_t *rd;
+ int offset = RX_OFFSET;
if (sp->rx_skb[idx])
break;
@@ -760,7 +761,9 @@ static void ar231x_load_rx_ring(struct n
* Make sure IP header starts on a fresh cache line.
*/
skb->dev = dev;
- skb_reserve(skb, RX_OFFSET);
+ if (sp->phy_dev)
+ offset += sp->phy_dev->pkt_align;
+ skb_reserve(skb, offset);
sp->rx_skb[idx] = skb;
rd = (ar231x_descr_t *) & sp->rx_ring[idx];
@@ -834,20 +837,23 @@ static int ar231x_rx_int(struct net_devi
/* alloc new buffer. */
skb_new = netdev_alloc_skb(dev, AR2313_BUFSIZE + RX_OFFSET);
if (skb_new != NULL) {
+ int offset;
skb = sp->rx_skb[idx];
/* set skb */
skb_put(skb,
((status >> DMA_RX_LEN_SHIFT) & 0x3fff) - CRC_LEN);
-
dev->stats.rx_bytes += skb->len;
- skb->protocol = eth_type_trans(skb, dev);
- /* pass the packet to upper layers */
- netif_rx(skb);
+ /* pass the packet to upper layers */
+ sp->rx(skb);
skb_new->dev = dev;
+
/* 16 bit align */
- skb_reserve(skb_new, RX_OFFSET);
+ offset = RX_OFFSET;
+ if (sp->phy_dev)
+ offset += sp->phy_dev->pkt_align;
+ skb_reserve(skb_new, offset);
/* reset descriptor's curr_addr */
rxdesc->addr = virt_to_phys(skb_new->data);
@@ -1258,6 +1264,8 @@ static int ar231x_mdiobus_probe (struct
return PTR_ERR(phydev);
}
+ sp->rx = phydev->netif_rx;
+
/* mask with MAC supported features */
phydev->supported &= (SUPPORTED_10baseT_Half
| SUPPORTED_10baseT_Full
--- a/drivers/net/ethernet/ar231x/ar231x.h
+++ b/drivers/net/ethernet/ar231x/ar231x.h
@@ -222,6 +222,8 @@ typedef struct {
*/
struct ar231x_private {
struct net_device *dev;
+ int (*rx)(struct sk_buff *skb);
+
int version;
u32 mb[2];
|