summaryrefslogtreecommitdiff
path: root/target/linux/ar7/patches-2.6.25/160-cpmac-rx-ring-use-eoq.diff
blob: 24d70e7db843c324e6259a4ef72bd5d547e77fb6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -187,6 +187,7 @@
 #define CPMAC_EOQ			0x1000
 	struct sk_buff *skb;
 	struct cpmac_desc *next;
+	struct cpmac_desc *prev;
 	dma_addr_t mapping;
 	dma_addr_t data_mapping;
 };
@@ -242,6 +243,16 @@
 	printk("\n");
 }
 
+static void cpmac_dump_all_desc(struct net_device *dev)
+{
+        struct cpmac_priv *priv = netdev_priv(dev);
+        struct cpmac_desc *dump = priv->rx_head;
+        do {
+                cpmac_dump_desc(dev, dump);
+                dump = dump->next;
+        } while (dump != priv->rx_head);
+}
+
 static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
 {
 	int i;
@@ -413,21 +424,40 @@
 static int cpmac_poll(struct napi_struct *napi, int budget)
 {
 	struct sk_buff *skb;
-	struct cpmac_desc *desc;
-	int received = 0;
+	struct cpmac_desc *desc, *restart;
 	struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
+	int received = 0, processed = 0;
 
 	spin_lock(&priv->rx_lock);
 	if (unlikely(!priv->rx_head)) {
 		if (netif_msg_rx_err(priv) && net_ratelimit())
 			printk(KERN_WARNING "%s: rx: polling, but no queue\n",
 			       priv->dev->name);
+		spin_unlock(&priv->rx_lock);
 		netif_rx_complete(priv->dev, napi);
 		return 0;
 	}
 
 	desc = priv->rx_head;
+	restart = NULL;
 	while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
+		processed++;
+		
+		if ((desc->dataflags & CPMAC_EOQ) != 0) {
+			/* The last update to eoq->hw_next didn't happen soon enough, and the
+			* receiver stopped here. Remember this descriptor so we can restart
+			* the receiver after freeing some space.
+			*/
+			if (unlikely(restart)) {
+				if (netif_msg_rx_err(priv))
+					printk(KERN_ERR "%s: poll found a duplicate EOQ: %p and %p\n",
+						priv->dev->name, restart, desc);
+				goto fatal_error;
+			}
+			
+			restart = desc->next;
+		}
+
 		skb = cpmac_rx_one(priv, desc);
 		if (likely(skb)) {
 			netif_receive_skb(skb);
@@ -436,19 +466,81 @@
 		desc = desc->next;
 	}
 
+	if (desc != priv->rx_head) {
+		/* We freed some buffers, but not the whole ring, add what we did free to the rx list */ 
+		desc->prev->hw_next = (u32)0;
+		priv->rx_head->prev->hw_next = priv->rx_head->mapping;
+	}
+
+	/* Optimization: If we did not actually process an EOQ (perhaps because of 
+	* quota limits), check to see if the tail of the queue has EOQ set. We
+	* should immediately restart in that case so that the receiver can restart
+	* and run in parallel with more packet processing. This lets us handle slightly
+	* larger bursts before running out of ring space (assuming dev->weight < ring_size)
+	*/
+	if (!restart &&
+	     (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ)) == CPMAC_EOQ &&
+	     (priv->rx_head->dataflags & CPMAC_OWN) != 0) {
+		/* reset EOQ so the poll loop (above) doesn't try to restart this when it
+		* eventually gets to this descriptor.
+		*/
+		priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
+		restart = priv->rx_head;
+	}
+
+	if (restart) {
+		priv->dev->stats.rx_errors++;
+		priv->dev->stats.rx_fifo_errors++;
+		if (netif_msg_rx_err(priv) && net_ratelimit())
+			printk(KERN_WARNING "%s: rx dma ring overrun\n", priv->dev->name);
+
+		if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
+			if (netif_msg_drv(priv))
+				printk(KERN_ERR "%s: cpmac_poll is trying to restart rx from a descriptor that's not free: %p\n",
+					priv->dev->name, restart);
+				goto fatal_error;
+		}
+
+		cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
+	}
+
 	priv->rx_head = desc;
 	spin_unlock(&priv->rx_lock);
 	if (unlikely(netif_msg_rx_status(priv)))
 		printk(KERN_DEBUG "%s: poll processed %d packets\n",
 		       priv->dev->name, received);
-	if (desc->dataflags & CPMAC_OWN) {
+	if (processed == 0) {
+		/* we ran out of packets to read, revert to interrupt-driven mode */ 
 		netif_rx_complete(priv->dev, napi);
-		cpmac_write(priv->regs, CPMAC_RX_PTR(0), (u32)desc->mapping);
 		cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
 		return 0;
 	}
 
 	return 1;
+
+fatal_error:
+	/* Something went horribly wrong. Reset hardware to try to recover rather than wedging. */ 
+	
+	if (netif_msg_drv(priv)) {
+		printk(KERN_ERR "%s: cpmac_poll is confused. Resetting hardware\n", priv->dev->name);
+		cpmac_dump_all_desc(priv->dev);
+		printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
+			priv->dev->name,
+			cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
+			cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
+	}
+
+	spin_unlock(&priv->rx_lock);
+	netif_rx_complete(priv->dev, napi);
+	netif_stop_queue(priv->dev);
+	napi_disable(&priv->napi);
+	
+	atomic_inc(&priv->reset_pending);
+	cpmac_hw_stop(priv->dev);
+	if (!schedule_work(&priv->reset_work))
+		atomic_dec(&priv->reset_pending);
+	return 0;
+ 
 }
 
 static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -625,8 +717,10 @@
 			desc->dataflags = CPMAC_OWN;
 			dev->stats.rx_dropped++;
 		}
+		desc->hw_next = desc->next->mapping;
 		desc = desc->next;
 	}
+	priv->rx_head->prev->hw_next = 0;
 }
 
 static void cpmac_clear_tx(struct net_device *dev)
@@ -928,9 +1022,12 @@
 		desc->buflen = CPMAC_SKB_SIZE;
 		desc->dataflags = CPMAC_OWN;
 		desc->next = &priv->rx_head[(i + 1) % priv->ring_size];
+		desc->next->prev = desc;
 		desc->hw_next = (u32)desc->next->mapping;
 	}
 
+	priv->rx_head->prev->hw_next = (u32)0;
+
 	if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED,
 			       dev->name, dev))) {
 		if (netif_msg_drv(priv))