]>
Commit | Line | Data |
---|---|---|
d6aa60a1 DD |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 2009 Cavium Networks | |
7 | */ | |
8 | ||
9 | #include <linux/capability.h> | |
10 | #include <linux/dma-mapping.h> | |
11 | #include <linux/init.h> | |
12 | #include <linux/platform_device.h> | |
13 | #include <linux/netdevice.h> | |
14 | #include <linux/etherdevice.h> | |
15 | #include <linux/if_vlan.h> | |
5a0e3ad6 | 16 | #include <linux/slab.h> |
d6aa60a1 DD |
17 | #include <linux/phy.h> |
18 | #include <linux/spinlock.h> | |
19 | ||
20 | #include <asm/octeon/octeon.h> | |
21 | #include <asm/octeon/cvmx-mixx-defs.h> | |
22 | #include <asm/octeon/cvmx-agl-defs.h> | |
23 | ||
24 | #define DRV_NAME "octeon_mgmt" | |
25 | #define DRV_VERSION "2.0" | |
26 | #define DRV_DESCRIPTION \ | |
27 | "Cavium Networks Octeon MII (management) port Network Driver" | |
28 | ||
29 | #define OCTEON_MGMT_NAPI_WEIGHT 16 | |
30 | ||
31 | /* | |
32 | * Ring sizes that are powers of two allow for more efficient modulo | |
33 | * opertions. | |
34 | */ | |
35 | #define OCTEON_MGMT_RX_RING_SIZE 512 | |
36 | #define OCTEON_MGMT_TX_RING_SIZE 128 | |
37 | ||
38 | /* Allow 8 bytes for vlan and FCS. */ | |
39 | #define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) | |
40 | ||
41 | union mgmt_port_ring_entry { | |
42 | u64 d64; | |
43 | struct { | |
44 | u64 reserved_62_63:2; | |
45 | /* Length of the buffer/packet in bytes */ | |
46 | u64 len:14; | |
47 | /* For TX, signals that the packet should be timestamped */ | |
48 | u64 tstamp:1; | |
49 | /* The RX error code */ | |
50 | u64 code:7; | |
51 | #define RING_ENTRY_CODE_DONE 0xf | |
52 | #define RING_ENTRY_CODE_MORE 0x10 | |
53 | /* Physical address of the buffer */ | |
54 | u64 addr:40; | |
55 | } s; | |
56 | }; | |
57 | ||
58 | struct octeon_mgmt { | |
59 | struct net_device *netdev; | |
60 | int port; | |
61 | int irq; | |
62 | u64 *tx_ring; | |
63 | dma_addr_t tx_ring_handle; | |
64 | unsigned int tx_next; | |
65 | unsigned int tx_next_clean; | |
66 | unsigned int tx_current_fill; | |
67 | /* The tx_list lock also protects the ring related variables */ | |
68 | struct sk_buff_head tx_list; | |
69 | ||
70 | /* RX variables only touched in napi_poll. No locking necessary. */ | |
71 | u64 *rx_ring; | |
72 | dma_addr_t rx_ring_handle; | |
73 | unsigned int rx_next; | |
74 | unsigned int rx_next_fill; | |
75 | unsigned int rx_current_fill; | |
76 | struct sk_buff_head rx_list; | |
77 | ||
78 | spinlock_t lock; | |
79 | unsigned int last_duplex; | |
80 | unsigned int last_link; | |
81 | struct device *dev; | |
82 | struct napi_struct napi; | |
83 | struct tasklet_struct tx_clean_tasklet; | |
84 | struct phy_device *phydev; | |
85 | }; | |
86 | ||
87 | static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable) | |
88 | { | |
89 | int port = p->port; | |
90 | union cvmx_mixx_intena mix_intena; | |
91 | unsigned long flags; | |
92 | ||
93 | spin_lock_irqsave(&p->lock, flags); | |
94 | mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port)); | |
95 | mix_intena.s.ithena = enable ? 1 : 0; | |
96 | cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); | |
97 | spin_unlock_irqrestore(&p->lock, flags); | |
98 | } | |
99 | ||
100 | static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable) | |
101 | { | |
102 | int port = p->port; | |
103 | union cvmx_mixx_intena mix_intena; | |
104 | unsigned long flags; | |
105 | ||
106 | spin_lock_irqsave(&p->lock, flags); | |
107 | mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port)); | |
108 | mix_intena.s.othena = enable ? 1 : 0; | |
109 | cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); | |
110 | spin_unlock_irqrestore(&p->lock, flags); | |
111 | } | |
112 | ||
113 | static inline void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p) | |
114 | { | |
115 | octeon_mgmt_set_rx_irq(p, 1); | |
116 | } | |
117 | ||
118 | static inline void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p) | |
119 | { | |
120 | octeon_mgmt_set_rx_irq(p, 0); | |
121 | } | |
122 | ||
123 | static inline void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p) | |
124 | { | |
125 | octeon_mgmt_set_tx_irq(p, 1); | |
126 | } | |
127 | ||
128 | static inline void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p) | |
129 | { | |
130 | octeon_mgmt_set_tx_irq(p, 0); | |
131 | } | |
132 | ||
133 | static unsigned int ring_max_fill(unsigned int ring_size) | |
134 | { | |
135 | return ring_size - 8; | |
136 | } | |
137 | ||
138 | static unsigned int ring_size_to_bytes(unsigned int ring_size) | |
139 | { | |
140 | return ring_size * sizeof(union mgmt_port_ring_entry); | |
141 | } | |
142 | ||
143 | static void octeon_mgmt_rx_fill_ring(struct net_device *netdev) | |
144 | { | |
145 | struct octeon_mgmt *p = netdev_priv(netdev); | |
146 | int port = p->port; | |
147 | ||
148 | while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) { | |
149 | unsigned int size; | |
150 | union mgmt_port_ring_entry re; | |
151 | struct sk_buff *skb; | |
152 | ||
153 | /* CN56XX pass 1 needs 8 bytes of padding. */ | |
154 | size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN; | |
155 | ||
156 | skb = netdev_alloc_skb(netdev, size); | |
157 | if (!skb) | |
158 | break; | |
159 | skb_reserve(skb, NET_IP_ALIGN); | |
160 | __skb_queue_tail(&p->rx_list, skb); | |
161 | ||
162 | re.d64 = 0; | |
163 | re.s.len = size; | |
164 | re.s.addr = dma_map_single(p->dev, skb->data, | |
165 | size, | |
166 | DMA_FROM_DEVICE); | |
167 | ||
168 | /* Put it in the ring. */ | |
169 | p->rx_ring[p->rx_next_fill] = re.d64; | |
170 | dma_sync_single_for_device(p->dev, p->rx_ring_handle, | |
171 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
172 | DMA_BIDIRECTIONAL); | |
173 | p->rx_next_fill = | |
174 | (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE; | |
175 | p->rx_current_fill++; | |
176 | /* Ring the bell. */ | |
177 | cvmx_write_csr(CVMX_MIXX_IRING2(port), 1); | |
178 | } | |
179 | } | |
180 | ||
181 | static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) | |
182 | { | |
183 | int port = p->port; | |
184 | union cvmx_mixx_orcnt mix_orcnt; | |
185 | union mgmt_port_ring_entry re; | |
186 | struct sk_buff *skb; | |
187 | int cleaned = 0; | |
188 | unsigned long flags; | |
189 | ||
190 | mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); | |
191 | while (mix_orcnt.s.orcnt) { | |
4d30b801 DD |
192 | spin_lock_irqsave(&p->tx_list.lock, flags); |
193 | ||
194 | mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); | |
195 | ||
196 | if (mix_orcnt.s.orcnt == 0) { | |
197 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | |
198 | break; | |
199 | } | |
200 | ||
d6aa60a1 DD |
201 | dma_sync_single_for_cpu(p->dev, p->tx_ring_handle, |
202 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
203 | DMA_BIDIRECTIONAL); | |
204 | ||
d6aa60a1 DD |
205 | re.d64 = p->tx_ring[p->tx_next_clean]; |
206 | p->tx_next_clean = | |
207 | (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE; | |
208 | skb = __skb_dequeue(&p->tx_list); | |
209 | ||
210 | mix_orcnt.u64 = 0; | |
211 | mix_orcnt.s.orcnt = 1; | |
212 | ||
213 | /* Acknowledge to hardware that we have the buffer. */ | |
214 | cvmx_write_csr(CVMX_MIXX_ORCNT(port), mix_orcnt.u64); | |
215 | p->tx_current_fill--; | |
216 | ||
217 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | |
218 | ||
219 | dma_unmap_single(p->dev, re.s.addr, re.s.len, | |
220 | DMA_TO_DEVICE); | |
221 | dev_kfree_skb_any(skb); | |
222 | cleaned++; | |
223 | ||
224 | mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); | |
225 | } | |
226 | ||
227 | if (cleaned && netif_queue_stopped(p->netdev)) | |
228 | netif_wake_queue(p->netdev); | |
229 | } | |
230 | ||
231 | static void octeon_mgmt_clean_tx_tasklet(unsigned long arg) | |
232 | { | |
233 | struct octeon_mgmt *p = (struct octeon_mgmt *)arg; | |
234 | octeon_mgmt_clean_tx_buffers(p); | |
235 | octeon_mgmt_enable_tx_irq(p); | |
236 | } | |
237 | ||
238 | static void octeon_mgmt_update_rx_stats(struct net_device *netdev) | |
239 | { | |
240 | struct octeon_mgmt *p = netdev_priv(netdev); | |
241 | int port = p->port; | |
242 | unsigned long flags; | |
243 | u64 drop, bad; | |
244 | ||
245 | /* These reads also clear the count registers. */ | |
246 | drop = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port)); | |
247 | bad = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port)); | |
248 | ||
249 | if (drop || bad) { | |
250 | /* Do an atomic update. */ | |
251 | spin_lock_irqsave(&p->lock, flags); | |
252 | netdev->stats.rx_errors += bad; | |
253 | netdev->stats.rx_dropped += drop; | |
254 | spin_unlock_irqrestore(&p->lock, flags); | |
255 | } | |
256 | } | |
257 | ||
258 | static void octeon_mgmt_update_tx_stats(struct net_device *netdev) | |
259 | { | |
260 | struct octeon_mgmt *p = netdev_priv(netdev); | |
261 | int port = p->port; | |
262 | unsigned long flags; | |
263 | ||
264 | union cvmx_agl_gmx_txx_stat0 s0; | |
265 | union cvmx_agl_gmx_txx_stat1 s1; | |
266 | ||
267 | /* These reads also clear the count registers. */ | |
268 | s0.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT0(port)); | |
269 | s1.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT1(port)); | |
270 | ||
271 | if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) { | |
272 | /* Do an atomic update. */ | |
273 | spin_lock_irqsave(&p->lock, flags); | |
274 | netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol; | |
275 | netdev->stats.collisions += s1.s.scol + s1.s.mcol; | |
276 | spin_unlock_irqrestore(&p->lock, flags); | |
277 | } | |
278 | } | |
279 | ||
280 | /* | |
281 | * Dequeue a receive skb and its corresponding ring entry. The ring | |
282 | * entry is returned, *pskb is updated to point to the skb. | |
283 | */ | |
284 | static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p, | |
285 | struct sk_buff **pskb) | |
286 | { | |
287 | union mgmt_port_ring_entry re; | |
288 | ||
289 | dma_sync_single_for_cpu(p->dev, p->rx_ring_handle, | |
290 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
291 | DMA_BIDIRECTIONAL); | |
292 | ||
293 | re.d64 = p->rx_ring[p->rx_next]; | |
294 | p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE; | |
295 | p->rx_current_fill--; | |
296 | *pskb = __skb_dequeue(&p->rx_list); | |
297 | ||
298 | dma_unmap_single(p->dev, re.s.addr, | |
299 | ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM, | |
300 | DMA_FROM_DEVICE); | |
301 | ||
302 | return re.d64; | |
303 | } | |
304 | ||
305 | ||
306 | static int octeon_mgmt_receive_one(struct octeon_mgmt *p) | |
307 | { | |
308 | int port = p->port; | |
309 | struct net_device *netdev = p->netdev; | |
310 | union cvmx_mixx_ircnt mix_ircnt; | |
311 | union mgmt_port_ring_entry re; | |
312 | struct sk_buff *skb; | |
313 | struct sk_buff *skb2; | |
314 | struct sk_buff *skb_new; | |
315 | union mgmt_port_ring_entry re2; | |
316 | int rc = 1; | |
317 | ||
318 | ||
319 | re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb); | |
320 | if (likely(re.s.code == RING_ENTRY_CODE_DONE)) { | |
321 | /* A good packet, send it up. */ | |
322 | skb_put(skb, re.s.len); | |
323 | good: | |
324 | skb->protocol = eth_type_trans(skb, netdev); | |
325 | netdev->stats.rx_packets++; | |
326 | netdev->stats.rx_bytes += skb->len; | |
d6aa60a1 DD |
327 | netif_receive_skb(skb); |
328 | rc = 0; | |
329 | } else if (re.s.code == RING_ENTRY_CODE_MORE) { | |
330 | /* | |
331 | * Packet split across skbs. This can happen if we | |
332 | * increase the MTU. Buffers that are already in the | |
333 | * rx ring can then end up being too small. As the rx | |
334 | * ring is refilled, buffers sized for the new MTU | |
335 | * will be used and we should go back to the normal | |
336 | * non-split case. | |
337 | */ | |
338 | skb_put(skb, re.s.len); | |
339 | do { | |
340 | re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); | |
341 | if (re2.s.code != RING_ENTRY_CODE_MORE | |
342 | && re2.s.code != RING_ENTRY_CODE_DONE) | |
343 | goto split_error; | |
344 | skb_put(skb2, re2.s.len); | |
345 | skb_new = skb_copy_expand(skb, 0, skb2->len, | |
346 | GFP_ATOMIC); | |
347 | if (!skb_new) | |
348 | goto split_error; | |
349 | if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new), | |
350 | skb2->len)) | |
351 | goto split_error; | |
352 | skb_put(skb_new, skb2->len); | |
353 | dev_kfree_skb_any(skb); | |
354 | dev_kfree_skb_any(skb2); | |
355 | skb = skb_new; | |
356 | } while (re2.s.code == RING_ENTRY_CODE_MORE); | |
357 | goto good; | |
358 | } else { | |
359 | /* Some other error, discard it. */ | |
360 | dev_kfree_skb_any(skb); | |
361 | /* | |
362 | * Error statistics are accumulated in | |
363 | * octeon_mgmt_update_rx_stats. | |
364 | */ | |
365 | } | |
366 | goto done; | |
367 | split_error: | |
368 | /* Discard the whole mess. */ | |
369 | dev_kfree_skb_any(skb); | |
370 | dev_kfree_skb_any(skb2); | |
371 | while (re2.s.code == RING_ENTRY_CODE_MORE) { | |
372 | re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); | |
373 | dev_kfree_skb_any(skb2); | |
374 | } | |
375 | netdev->stats.rx_errors++; | |
376 | ||
377 | done: | |
378 | /* Tell the hardware we processed a packet. */ | |
379 | mix_ircnt.u64 = 0; | |
380 | mix_ircnt.s.ircnt = 1; | |
381 | cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64); | |
382 | return rc; | |
383 | ||
384 | } | |
385 | ||
386 | static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget) | |
387 | { | |
388 | int port = p->port; | |
389 | unsigned int work_done = 0; | |
390 | union cvmx_mixx_ircnt mix_ircnt; | |
391 | int rc; | |
392 | ||
393 | ||
394 | mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port)); | |
395 | while (work_done < budget && mix_ircnt.s.ircnt) { | |
396 | ||
397 | rc = octeon_mgmt_receive_one(p); | |
398 | if (!rc) | |
399 | work_done++; | |
400 | ||
401 | /* Check for more packets. */ | |
402 | mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port)); | |
403 | } | |
404 | ||
405 | octeon_mgmt_rx_fill_ring(p->netdev); | |
406 | ||
407 | return work_done; | |
408 | } | |
409 | ||
410 | static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget) | |
411 | { | |
412 | struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi); | |
413 | struct net_device *netdev = p->netdev; | |
414 | unsigned int work_done = 0; | |
415 | ||
416 | work_done = octeon_mgmt_receive_packets(p, budget); | |
417 | ||
418 | if (work_done < budget) { | |
419 | /* We stopped because no more packets were available. */ | |
420 | napi_complete(napi); | |
421 | octeon_mgmt_enable_rx_irq(p); | |
422 | } | |
423 | octeon_mgmt_update_rx_stats(netdev); | |
424 | ||
425 | return work_done; | |
426 | } | |
427 | ||
428 | /* Reset the hardware to clean state. */ | |
429 | static void octeon_mgmt_reset_hw(struct octeon_mgmt *p) | |
430 | { | |
431 | union cvmx_mixx_ctl mix_ctl; | |
432 | union cvmx_mixx_bist mix_bist; | |
433 | union cvmx_agl_gmx_bist agl_gmx_bist; | |
434 | ||
435 | mix_ctl.u64 = 0; | |
436 | cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64); | |
437 | do { | |
438 | mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(p->port)); | |
439 | } while (mix_ctl.s.busy); | |
440 | mix_ctl.s.reset = 1; | |
441 | cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64); | |
442 | cvmx_read_csr(CVMX_MIXX_CTL(p->port)); | |
443 | cvmx_wait(64); | |
444 | ||
445 | mix_bist.u64 = cvmx_read_csr(CVMX_MIXX_BIST(p->port)); | |
446 | if (mix_bist.u64) | |
447 | dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n", | |
448 | (unsigned long long)mix_bist.u64); | |
449 | ||
450 | agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST); | |
451 | if (agl_gmx_bist.u64) | |
452 | dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n", | |
453 | (unsigned long long)agl_gmx_bist.u64); | |
454 | } | |
455 | ||
456 | struct octeon_mgmt_cam_state { | |
457 | u64 cam[6]; | |
458 | u64 cam_mask; | |
459 | int cam_index; | |
460 | }; | |
461 | ||
462 | static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs, | |
463 | unsigned char *addr) | |
464 | { | |
465 | int i; | |
466 | ||
467 | for (i = 0; i < 6; i++) | |
468 | cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index)); | |
469 | cs->cam_mask |= (1ULL << cs->cam_index); | |
470 | cs->cam_index++; | |
471 | } | |
472 | ||
473 | static void octeon_mgmt_set_rx_filtering(struct net_device *netdev) | |
474 | { | |
475 | struct octeon_mgmt *p = netdev_priv(netdev); | |
476 | int port = p->port; | |
d6aa60a1 DD |
477 | union cvmx_agl_gmx_rxx_adr_ctl adr_ctl; |
478 | union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx; | |
479 | unsigned long flags; | |
480 | unsigned int prev_packet_enable; | |
481 | unsigned int cam_mode = 1; /* 1 - Accept on CAM match */ | |
482 | unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */ | |
483 | struct octeon_mgmt_cam_state cam_state; | |
22bedad3 | 484 | struct netdev_hw_addr *ha; |
d6aa60a1 DD |
485 | int available_cam_entries; |
486 | ||
487 | memset(&cam_state, 0, sizeof(cam_state)); | |
488 | ||
62538d24 | 489 | if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) { |
d6aa60a1 DD |
490 | cam_mode = 0; |
491 | available_cam_entries = 8; | |
492 | } else { | |
493 | /* | |
494 | * One CAM entry for the primary address, leaves seven | |
495 | * for the secondary addresses. | |
496 | */ | |
62538d24 | 497 | available_cam_entries = 7 - netdev->uc.count; |
d6aa60a1 DD |
498 | } |
499 | ||
500 | if (netdev->flags & IFF_MULTICAST) { | |
4cd24eaf JP |
501 | if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) || |
502 | netdev_mc_count(netdev) > available_cam_entries) | |
62538d24 | 503 | multicast_mode = 2; /* 2 - Accept all multicast. */ |
d6aa60a1 DD |
504 | else |
505 | multicast_mode = 0; /* 0 - Use CAM. */ | |
506 | } | |
507 | ||
508 | if (cam_mode == 1) { | |
509 | /* Add primary address. */ | |
510 | octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr); | |
62538d24 DD |
511 | netdev_for_each_uc_addr(ha, netdev) |
512 | octeon_mgmt_cam_state_add(&cam_state, ha->addr); | |
d6aa60a1 DD |
513 | } |
514 | if (multicast_mode == 0) { | |
22bedad3 JP |
515 | netdev_for_each_mc_addr(ha, netdev) |
516 | octeon_mgmt_cam_state_add(&cam_state, ha->addr); | |
d6aa60a1 DD |
517 | } |
518 | ||
519 | ||
520 | spin_lock_irqsave(&p->lock, flags); | |
521 | ||
522 | /* Disable packet I/O. */ | |
523 | agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); | |
524 | prev_packet_enable = agl_gmx_prtx.s.en; | |
525 | agl_gmx_prtx.s.en = 0; | |
526 | cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64); | |
527 | ||
528 | ||
529 | adr_ctl.u64 = 0; | |
530 | adr_ctl.s.cam_mode = cam_mode; | |
531 | adr_ctl.s.mcst = multicast_mode; | |
532 | adr_ctl.s.bcst = 1; /* Allow broadcast */ | |
533 | ||
534 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), adr_ctl.u64); | |
535 | ||
536 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), cam_state.cam[0]); | |
537 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), cam_state.cam[1]); | |
538 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), cam_state.cam[2]); | |
539 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), cam_state.cam[3]); | |
540 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), cam_state.cam[4]); | |
541 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), cam_state.cam[5]); | |
542 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), cam_state.cam_mask); | |
543 | ||
544 | /* Restore packet I/O. */ | |
545 | agl_gmx_prtx.s.en = prev_packet_enable; | |
546 | cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64); | |
547 | ||
548 | spin_unlock_irqrestore(&p->lock, flags); | |
549 | } | |
550 | ||
551 | static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr) | |
552 | { | |
553 | struct sockaddr *sa = addr; | |
554 | ||
555 | if (!is_valid_ether_addr(sa->sa_data)) | |
556 | return -EADDRNOTAVAIL; | |
557 | ||
558 | memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN); | |
559 | ||
560 | octeon_mgmt_set_rx_filtering(netdev); | |
561 | ||
562 | return 0; | |
563 | } | |
564 | ||
565 | static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) | |
566 | { | |
567 | struct octeon_mgmt *p = netdev_priv(netdev); | |
568 | int port = p->port; | |
569 | int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM; | |
570 | ||
571 | /* | |
572 | * Limit the MTU to make sure the ethernet packets are between | |
573 | * 64 bytes and 16383 bytes. | |
574 | */ | |
575 | if (size_without_fcs < 64 || size_without_fcs > 16383) { | |
576 | dev_warn(p->dev, "MTU must be between %d and %d.\n", | |
577 | 64 - OCTEON_MGMT_RX_HEADROOM, | |
578 | 16383 - OCTEON_MGMT_RX_HEADROOM); | |
579 | return -EINVAL; | |
580 | } | |
581 | ||
582 | netdev->mtu = new_mtu; | |
583 | ||
584 | cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_MAX(port), size_without_fcs); | |
585 | cvmx_write_csr(CVMX_AGL_GMX_RXX_JABBER(port), | |
586 | (size_without_fcs + 7) & 0xfff8); | |
587 | ||
588 | return 0; | |
589 | } | |
590 | ||
591 | static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id) | |
592 | { | |
593 | struct net_device *netdev = dev_id; | |
594 | struct octeon_mgmt *p = netdev_priv(netdev); | |
595 | int port = p->port; | |
596 | union cvmx_mixx_isr mixx_isr; | |
597 | ||
598 | mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(port)); | |
599 | ||
600 | /* Clear any pending interrupts */ | |
601 | cvmx_write_csr(CVMX_MIXX_ISR(port), | |
602 | cvmx_read_csr(CVMX_MIXX_ISR(port))); | |
603 | cvmx_read_csr(CVMX_MIXX_ISR(port)); | |
604 | ||
605 | if (mixx_isr.s.irthresh) { | |
606 | octeon_mgmt_disable_rx_irq(p); | |
607 | napi_schedule(&p->napi); | |
608 | } | |
609 | if (mixx_isr.s.orthresh) { | |
610 | octeon_mgmt_disable_tx_irq(p); | |
611 | tasklet_schedule(&p->tx_clean_tasklet); | |
612 | } | |
613 | ||
614 | return IRQ_HANDLED; | |
615 | } | |
616 | ||
617 | static int octeon_mgmt_ioctl(struct net_device *netdev, | |
618 | struct ifreq *rq, int cmd) | |
619 | { | |
620 | struct octeon_mgmt *p = netdev_priv(netdev); | |
621 | ||
622 | if (!netif_running(netdev)) | |
623 | return -EINVAL; | |
624 | ||
625 | if (!p->phydev) | |
626 | return -EINVAL; | |
627 | ||
628 | return phy_mii_ioctl(p->phydev, if_mii(rq), cmd); | |
629 | } | |
630 | ||
631 | static void octeon_mgmt_adjust_link(struct net_device *netdev) | |
632 | { | |
633 | struct octeon_mgmt *p = netdev_priv(netdev); | |
634 | int port = p->port; | |
635 | union cvmx_agl_gmx_prtx_cfg prtx_cfg; | |
636 | unsigned long flags; | |
637 | int link_changed = 0; | |
638 | ||
639 | spin_lock_irqsave(&p->lock, flags); | |
640 | if (p->phydev->link) { | |
641 | if (!p->last_link) | |
642 | link_changed = 1; | |
643 | if (p->last_duplex != p->phydev->duplex) { | |
644 | p->last_duplex = p->phydev->duplex; | |
645 | prtx_cfg.u64 = | |
646 | cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); | |
647 | prtx_cfg.s.duplex = p->phydev->duplex; | |
648 | cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), | |
649 | prtx_cfg.u64); | |
650 | } | |
651 | } else { | |
652 | if (p->last_link) | |
653 | link_changed = -1; | |
654 | } | |
655 | p->last_link = p->phydev->link; | |
656 | spin_unlock_irqrestore(&p->lock, flags); | |
657 | ||
658 | if (link_changed != 0) { | |
659 | if (link_changed > 0) { | |
660 | netif_carrier_on(netdev); | |
661 | pr_info("%s: Link is up - %d/%s\n", netdev->name, | |
662 | p->phydev->speed, | |
663 | DUPLEX_FULL == p->phydev->duplex ? | |
664 | "Full" : "Half"); | |
665 | } else { | |
666 | netif_carrier_off(netdev); | |
667 | pr_info("%s: Link is down\n", netdev->name); | |
668 | } | |
669 | } | |
670 | } | |
671 | ||
672 | static int octeon_mgmt_init_phy(struct net_device *netdev) | |
673 | { | |
674 | struct octeon_mgmt *p = netdev_priv(netdev); | |
675 | char phy_id[20]; | |
676 | ||
677 | if (octeon_is_simulation()) { | |
678 | /* No PHYs in the simulator. */ | |
679 | netif_carrier_on(netdev); | |
680 | return 0; | |
681 | } | |
682 | ||
683 | snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "0", p->port); | |
684 | ||
685 | p->phydev = phy_connect(netdev, phy_id, octeon_mgmt_adjust_link, 0, | |
686 | PHY_INTERFACE_MODE_MII); | |
687 | ||
688 | if (IS_ERR(p->phydev)) { | |
689 | p->phydev = NULL; | |
690 | return -1; | |
691 | } | |
692 | ||
693 | phy_start_aneg(p->phydev); | |
694 | ||
695 | return 0; | |
696 | } | |
697 | ||
698 | static int octeon_mgmt_open(struct net_device *netdev) | |
699 | { | |
700 | struct octeon_mgmt *p = netdev_priv(netdev); | |
701 | int port = p->port; | |
702 | union cvmx_mixx_ctl mix_ctl; | |
703 | union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode; | |
704 | union cvmx_mixx_oring1 oring1; | |
705 | union cvmx_mixx_iring1 iring1; | |
706 | union cvmx_agl_gmx_prtx_cfg prtx_cfg; | |
707 | union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; | |
708 | union cvmx_mixx_irhwm mix_irhwm; | |
709 | union cvmx_mixx_orhwm mix_orhwm; | |
710 | union cvmx_mixx_intena mix_intena; | |
711 | struct sockaddr sa; | |
712 | ||
713 | /* Allocate ring buffers. */ | |
714 | p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
715 | GFP_KERNEL); | |
716 | if (!p->tx_ring) | |
717 | return -ENOMEM; | |
718 | p->tx_ring_handle = | |
719 | dma_map_single(p->dev, p->tx_ring, | |
720 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
721 | DMA_BIDIRECTIONAL); | |
722 | p->tx_next = 0; | |
723 | p->tx_next_clean = 0; | |
724 | p->tx_current_fill = 0; | |
725 | ||
726 | ||
727 | p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
728 | GFP_KERNEL); | |
729 | if (!p->rx_ring) | |
730 | goto err_nomem; | |
731 | p->rx_ring_handle = | |
732 | dma_map_single(p->dev, p->rx_ring, | |
733 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
734 | DMA_BIDIRECTIONAL); | |
735 | ||
736 | p->rx_next = 0; | |
737 | p->rx_next_fill = 0; | |
738 | p->rx_current_fill = 0; | |
739 | ||
740 | octeon_mgmt_reset_hw(p); | |
741 | ||
742 | mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port)); | |
743 | ||
744 | /* Bring it out of reset if needed. */ | |
745 | if (mix_ctl.s.reset) { | |
746 | mix_ctl.s.reset = 0; | |
747 | cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64); | |
748 | do { | |
749 | mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port)); | |
750 | } while (mix_ctl.s.reset); | |
751 | } | |
752 | ||
753 | agl_gmx_inf_mode.u64 = 0; | |
754 | agl_gmx_inf_mode.s.en = 1; | |
755 | cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); | |
756 | ||
757 | oring1.u64 = 0; | |
758 | oring1.s.obase = p->tx_ring_handle >> 3; | |
759 | oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE; | |
760 | cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64); | |
761 | ||
762 | iring1.u64 = 0; | |
763 | iring1.s.ibase = p->rx_ring_handle >> 3; | |
764 | iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE; | |
765 | cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64); | |
766 | ||
767 | /* Disable packet I/O. */ | |
768 | prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); | |
769 | prtx_cfg.s.en = 0; | |
770 | cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64); | |
771 | ||
772 | memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN); | |
773 | octeon_mgmt_set_mac_address(netdev, &sa); | |
774 | ||
775 | octeon_mgmt_change_mtu(netdev, netdev->mtu); | |
776 | ||
777 | /* | |
778 | * Enable the port HW. Packets are not allowed until | |
779 | * cvmx_mgmt_port_enable() is called. | |
780 | */ | |
781 | mix_ctl.u64 = 0; | |
782 | mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */ | |
783 | mix_ctl.s.en = 1; /* Enable the port */ | |
784 | mix_ctl.s.nbtarb = 0; /* Arbitration mode */ | |
785 | /* MII CB-request FIFO programmable high watermark */ | |
786 | mix_ctl.s.mrq_hwm = 1; | |
787 | cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64); | |
788 | ||
789 | if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) | |
790 | || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { | |
791 | /* | |
792 | * Force compensation values, as they are not | |
793 | * determined properly by HW | |
794 | */ | |
795 | union cvmx_agl_gmx_drv_ctl drv_ctl; | |
796 | ||
797 | drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL); | |
798 | if (port) { | |
799 | drv_ctl.s.byp_en1 = 1; | |
800 | drv_ctl.s.nctl1 = 6; | |
801 | drv_ctl.s.pctl1 = 6; | |
802 | } else { | |
803 | drv_ctl.s.byp_en = 1; | |
804 | drv_ctl.s.nctl = 6; | |
805 | drv_ctl.s.pctl = 6; | |
806 | } | |
807 | cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64); | |
808 | } | |
809 | ||
810 | octeon_mgmt_rx_fill_ring(netdev); | |
811 | ||
812 | /* Clear statistics. */ | |
813 | /* Clear on read. */ | |
814 | cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_CTL(port), 1); | |
815 | cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port), 0); | |
816 | cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port), 0); | |
817 | ||
818 | cvmx_write_csr(CVMX_AGL_GMX_TXX_STATS_CTL(port), 1); | |
819 | cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT0(port), 0); | |
820 | cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT1(port), 0); | |
821 | ||
822 | /* Clear any pending interrupts */ | |
823 | cvmx_write_csr(CVMX_MIXX_ISR(port), cvmx_read_csr(CVMX_MIXX_ISR(port))); | |
824 | ||
825 | if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name, | |
826 | netdev)) { | |
827 | dev_err(p->dev, "request_irq(%d) failed.\n", p->irq); | |
828 | goto err_noirq; | |
829 | } | |
830 | ||
831 | /* Interrupt every single RX packet */ | |
832 | mix_irhwm.u64 = 0; | |
833 | mix_irhwm.s.irhwm = 0; | |
834 | cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64); | |
835 | ||
836 | /* Interrupt when we have 5 or more packets to clean. */ | |
837 | mix_orhwm.u64 = 0; | |
838 | mix_orhwm.s.orhwm = 5; | |
839 | cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64); | |
840 | ||
841 | /* Enable receive and transmit interrupts */ | |
842 | mix_intena.u64 = 0; | |
843 | mix_intena.s.ithena = 1; | |
844 | mix_intena.s.othena = 1; | |
845 | cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); | |
846 | ||
847 | ||
848 | /* Enable packet I/O. */ | |
849 | ||
850 | rxx_frm_ctl.u64 = 0; | |
851 | rxx_frm_ctl.s.pre_align = 1; | |
852 | /* | |
853 | * When set, disables the length check for non-min sized pkts | |
854 | * with padding in the client data. | |
855 | */ | |
856 | rxx_frm_ctl.s.pad_len = 1; | |
857 | /* When set, disables the length check for VLAN pkts */ | |
858 | rxx_frm_ctl.s.vlan_len = 1; | |
859 | /* When set, PREAMBLE checking is less strict */ | |
860 | rxx_frm_ctl.s.pre_free = 1; | |
861 | /* Control Pause Frames can match station SMAC */ | |
862 | rxx_frm_ctl.s.ctl_smac = 0; | |
863 | /* Control Pause Frames can match globally assign Multicast address */ | |
864 | rxx_frm_ctl.s.ctl_mcst = 1; | |
865 | /* Forward pause information to TX block */ | |
866 | rxx_frm_ctl.s.ctl_bck = 1; | |
867 | /* Drop Control Pause Frames */ | |
868 | rxx_frm_ctl.s.ctl_drp = 1; | |
869 | /* Strip off the preamble */ | |
870 | rxx_frm_ctl.s.pre_strp = 1; | |
871 | /* | |
872 | * This port is configured to send PREAMBLE+SFD to begin every | |
873 | * frame. GMX checks that the PREAMBLE is sent correctly. | |
874 | */ | |
875 | rxx_frm_ctl.s.pre_chk = 1; | |
876 | cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64); | |
877 | ||
878 | /* Enable the AGL block */ | |
879 | agl_gmx_inf_mode.u64 = 0; | |
880 | agl_gmx_inf_mode.s.en = 1; | |
881 | cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); | |
882 | ||
883 | /* Configure the port duplex and enables */ | |
884 | prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); | |
885 | prtx_cfg.s.tx_en = 1; | |
886 | prtx_cfg.s.rx_en = 1; | |
887 | prtx_cfg.s.en = 1; | |
888 | p->last_duplex = 1; | |
889 | prtx_cfg.s.duplex = p->last_duplex; | |
890 | cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64); | |
891 | ||
892 | p->last_link = 0; | |
893 | netif_carrier_off(netdev); | |
894 | ||
895 | if (octeon_mgmt_init_phy(netdev)) { | |
896 | dev_err(p->dev, "Cannot initialize PHY.\n"); | |
897 | goto err_noirq; | |
898 | } | |
899 | ||
900 | netif_wake_queue(netdev); | |
901 | napi_enable(&p->napi); | |
902 | ||
903 | return 0; | |
904 | err_noirq: | |
905 | octeon_mgmt_reset_hw(p); | |
906 | dma_unmap_single(p->dev, p->rx_ring_handle, | |
907 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
908 | DMA_BIDIRECTIONAL); | |
909 | kfree(p->rx_ring); | |
910 | err_nomem: | |
911 | dma_unmap_single(p->dev, p->tx_ring_handle, | |
912 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
913 | DMA_BIDIRECTIONAL); | |
914 | kfree(p->tx_ring); | |
915 | return -ENOMEM; | |
916 | } | |
917 | ||
918 | static int octeon_mgmt_stop(struct net_device *netdev) | |
919 | { | |
920 | struct octeon_mgmt *p = netdev_priv(netdev); | |
921 | ||
922 | napi_disable(&p->napi); | |
923 | netif_stop_queue(netdev); | |
924 | ||
925 | if (p->phydev) | |
926 | phy_disconnect(p->phydev); | |
927 | ||
928 | netif_carrier_off(netdev); | |
929 | ||
930 | octeon_mgmt_reset_hw(p); | |
931 | ||
932 | ||
933 | free_irq(p->irq, netdev); | |
934 | ||
935 | /* dma_unmap is a nop on Octeon, so just free everything. */ | |
936 | skb_queue_purge(&p->tx_list); | |
937 | skb_queue_purge(&p->rx_list); | |
938 | ||
939 | dma_unmap_single(p->dev, p->rx_ring_handle, | |
940 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
941 | DMA_BIDIRECTIONAL); | |
942 | kfree(p->rx_ring); | |
943 | ||
944 | dma_unmap_single(p->dev, p->tx_ring_handle, | |
945 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
946 | DMA_BIDIRECTIONAL); | |
947 | kfree(p->tx_ring); | |
948 | ||
949 | ||
950 | return 0; | |
951 | } | |
952 | ||
953 | static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) | |
954 | { | |
955 | struct octeon_mgmt *p = netdev_priv(netdev); | |
956 | int port = p->port; | |
957 | union mgmt_port_ring_entry re; | |
958 | unsigned long flags; | |
959 | ||
960 | re.d64 = 0; | |
961 | re.s.len = skb->len; | |
962 | re.s.addr = dma_map_single(p->dev, skb->data, | |
963 | skb->len, | |
964 | DMA_TO_DEVICE); | |
965 | ||
966 | spin_lock_irqsave(&p->tx_list.lock, flags); | |
967 | ||
968 | if (unlikely(p->tx_current_fill >= | |
969 | ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) { | |
970 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | |
971 | ||
972 | dma_unmap_single(p->dev, re.s.addr, re.s.len, | |
973 | DMA_TO_DEVICE); | |
974 | ||
975 | netif_stop_queue(netdev); | |
976 | return NETDEV_TX_BUSY; | |
977 | } | |
978 | ||
979 | __skb_queue_tail(&p->tx_list, skb); | |
980 | ||
981 | /* Put it in the ring. */ | |
982 | p->tx_ring[p->tx_next] = re.d64; | |
983 | p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE; | |
984 | p->tx_current_fill++; | |
985 | ||
986 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | |
987 | ||
988 | dma_sync_single_for_device(p->dev, p->tx_ring_handle, | |
989 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
990 | DMA_BIDIRECTIONAL); | |
991 | ||
992 | netdev->stats.tx_packets++; | |
993 | netdev->stats.tx_bytes += skb->len; | |
994 | ||
995 | /* Ring the bell. */ | |
996 | cvmx_write_csr(CVMX_MIXX_ORING2(port), 1); | |
997 | ||
998 | netdev->trans_start = jiffies; | |
999 | octeon_mgmt_clean_tx_buffers(p); | |
1000 | octeon_mgmt_update_tx_stats(netdev); | |
1001 | return NETDEV_TX_OK; | |
1002 | } | |
1003 | ||
1004 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1005 | static void octeon_mgmt_poll_controller(struct net_device *netdev) | |
1006 | { | |
1007 | struct octeon_mgmt *p = netdev_priv(netdev); | |
1008 | ||
1009 | octeon_mgmt_receive_packets(p, 16); | |
1010 | octeon_mgmt_update_rx_stats(netdev); | |
1011 | return; | |
1012 | } | |
1013 | #endif | |
1014 | ||
1015 | static void octeon_mgmt_get_drvinfo(struct net_device *netdev, | |
1016 | struct ethtool_drvinfo *info) | |
1017 | { | |
1018 | strncpy(info->driver, DRV_NAME, sizeof(info->driver)); | |
1019 | strncpy(info->version, DRV_VERSION, sizeof(info->version)); | |
1020 | strncpy(info->fw_version, "N/A", sizeof(info->fw_version)); | |
1021 | strncpy(info->bus_info, "N/A", sizeof(info->bus_info)); | |
1022 | info->n_stats = 0; | |
1023 | info->testinfo_len = 0; | |
1024 | info->regdump_len = 0; | |
1025 | info->eedump_len = 0; | |
1026 | } | |
1027 | ||
1028 | static int octeon_mgmt_get_settings(struct net_device *netdev, | |
1029 | struct ethtool_cmd *cmd) | |
1030 | { | |
1031 | struct octeon_mgmt *p = netdev_priv(netdev); | |
1032 | ||
1033 | if (p->phydev) | |
1034 | return phy_ethtool_gset(p->phydev, cmd); | |
1035 | ||
1036 | return -EINVAL; | |
1037 | } | |
1038 | ||
1039 | static int octeon_mgmt_set_settings(struct net_device *netdev, | |
1040 | struct ethtool_cmd *cmd) | |
1041 | { | |
1042 | struct octeon_mgmt *p = netdev_priv(netdev); | |
1043 | ||
1044 | if (!capable(CAP_NET_ADMIN)) | |
1045 | return -EPERM; | |
1046 | ||
1047 | if (p->phydev) | |
1048 | return phy_ethtool_sset(p->phydev, cmd); | |
1049 | ||
1050 | return -EINVAL; | |
1051 | } | |
1052 | ||
1053 | static const struct ethtool_ops octeon_mgmt_ethtool_ops = { | |
1054 | .get_drvinfo = octeon_mgmt_get_drvinfo, | |
1055 | .get_link = ethtool_op_get_link, | |
1056 | .get_settings = octeon_mgmt_get_settings, | |
1057 | .set_settings = octeon_mgmt_set_settings | |
1058 | }; | |
1059 | ||
1060 | static const struct net_device_ops octeon_mgmt_ops = { | |
1061 | .ndo_open = octeon_mgmt_open, | |
1062 | .ndo_stop = octeon_mgmt_stop, | |
1063 | .ndo_start_xmit = octeon_mgmt_xmit, | |
1064 | .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering, | |
1065 | .ndo_set_multicast_list = octeon_mgmt_set_rx_filtering, | |
1066 | .ndo_set_mac_address = octeon_mgmt_set_mac_address, | |
1067 | .ndo_do_ioctl = octeon_mgmt_ioctl, | |
1068 | .ndo_change_mtu = octeon_mgmt_change_mtu, | |
1069 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1070 | .ndo_poll_controller = octeon_mgmt_poll_controller, | |
1071 | #endif | |
1072 | }; | |
1073 | ||
1074 | static int __init octeon_mgmt_probe(struct platform_device *pdev) | |
1075 | { | |
1076 | struct resource *res_irq; | |
1077 | struct net_device *netdev; | |
1078 | struct octeon_mgmt *p; | |
1079 | int i; | |
1080 | ||
1081 | netdev = alloc_etherdev(sizeof(struct octeon_mgmt)); | |
1082 | if (netdev == NULL) | |
1083 | return -ENOMEM; | |
1084 | ||
1085 | dev_set_drvdata(&pdev->dev, netdev); | |
1086 | p = netdev_priv(netdev); | |
1087 | netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll, | |
1088 | OCTEON_MGMT_NAPI_WEIGHT); | |
1089 | ||
1090 | p->netdev = netdev; | |
1091 | p->dev = &pdev->dev; | |
1092 | ||
1093 | p->port = pdev->id; | |
1094 | snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port); | |
1095 | ||
1096 | res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | |
1097 | if (!res_irq) | |
1098 | goto err; | |
1099 | ||
1100 | p->irq = res_irq->start; | |
1101 | spin_lock_init(&p->lock); | |
1102 | ||
1103 | skb_queue_head_init(&p->tx_list); | |
1104 | skb_queue_head_init(&p->rx_list); | |
1105 | tasklet_init(&p->tx_clean_tasklet, | |
1106 | octeon_mgmt_clean_tx_tasklet, (unsigned long)p); | |
1107 | ||
1108 | netdev->netdev_ops = &octeon_mgmt_ops; | |
1109 | netdev->ethtool_ops = &octeon_mgmt_ethtool_ops; | |
1110 | ||
1111 | ||
1112 | /* The mgmt ports get the first N MACs. */ | |
1113 | for (i = 0; i < 6; i++) | |
1114 | netdev->dev_addr[i] = octeon_bootinfo->mac_addr_base[i]; | |
1115 | netdev->dev_addr[5] += p->port; | |
1116 | ||
1117 | if (p->port >= octeon_bootinfo->mac_addr_count) | |
1118 | dev_err(&pdev->dev, | |
e5834820 HS |
1119 | "Error %s: Using MAC outside of the assigned range: %pM\n", |
1120 | netdev->name, netdev->dev_addr); | |
d6aa60a1 DD |
1121 | |
1122 | if (register_netdev(netdev)) | |
1123 | goto err; | |
1124 | ||
1125 | dev_info(&pdev->dev, "Version " DRV_VERSION "\n"); | |
1126 | return 0; | |
1127 | err: | |
1128 | free_netdev(netdev); | |
1129 | return -ENOENT; | |
1130 | } | |
1131 | ||
1132 | static int __exit octeon_mgmt_remove(struct platform_device *pdev) | |
1133 | { | |
1134 | struct net_device *netdev = dev_get_drvdata(&pdev->dev); | |
1135 | ||
1136 | unregister_netdev(netdev); | |
1137 | free_netdev(netdev); | |
1138 | return 0; | |
1139 | } | |
1140 | ||
1141 | static struct platform_driver octeon_mgmt_driver = { | |
1142 | .driver = { | |
1143 | .name = "octeon_mgmt", | |
1144 | .owner = THIS_MODULE, | |
1145 | }, | |
1146 | .probe = octeon_mgmt_probe, | |
1147 | .remove = __exit_p(octeon_mgmt_remove), | |
1148 | }; | |
1149 | ||
1150 | extern void octeon_mdiobus_force_mod_depencency(void); | |
1151 | ||
1152 | static int __init octeon_mgmt_mod_init(void) | |
1153 | { | |
1154 | /* Force our mdiobus driver module to be loaded first. */ | |
1155 | octeon_mdiobus_force_mod_depencency(); | |
1156 | return platform_driver_register(&octeon_mgmt_driver); | |
1157 | } | |
1158 | ||
1159 | static void __exit octeon_mgmt_mod_exit(void) | |
1160 | { | |
1161 | platform_driver_unregister(&octeon_mgmt_driver); | |
1162 | } | |
1163 | ||
1164 | module_init(octeon_mgmt_mod_init); | |
1165 | module_exit(octeon_mgmt_mod_exit); | |
1166 | ||
1167 | MODULE_DESCRIPTION(DRV_DESCRIPTION); | |
1168 | MODULE_AUTHOR("David Daney"); | |
1169 | MODULE_LICENSE("GPL"); | |
1170 | MODULE_VERSION(DRV_VERSION); |