]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/ibm_newemac/core.c
add driver for enc28j60 ethernet chip
[net-next-2.6.git] / drivers / net / ibm_newemac / core.c
CommitLineData
1d3bb996
DG
1/*
2 * drivers/net/ibm_newemac/core.c
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller.
5 *
17cf803a
BH
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
8 *
9 * Based on the arch/ppc version of the driver:
10 *
1d3bb996
DG
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13 *
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 * Armin Kuster <akuster@mvista.com>
18 * Johnnie Peters <jpeters@mvista.com>
19 *
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
24 *
25 */
26
27#include <linux/sched.h>
28#include <linux/string.h>
29#include <linux/errno.h>
30#include <linux/delay.h>
31#include <linux/types.h>
32#include <linux/pci.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/crc32.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#include <linux/bitops.h>
39#include <linux/workqueue.h>
40
41#include <asm/processor.h>
42#include <asm/io.h>
43#include <asm/dma.h>
44#include <asm/uaccess.h>
45
46#include "core.h"
47
48/*
49 * Lack of dma_unmap_???? calls is intentional.
50 *
51 * API-correct usage requires additional support state information to be
52 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
53 * EMAC design (e.g. TX buffer passed from network stack can be split into
54 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
55 * maintaining such information will add additional overhead.
56 * Current DMA API implementation for 4xx processors only ensures cache coherency
57 * and dma_unmap_???? routines are empty and are likely to stay this way.
58 * I decided to omit dma_unmap_??? calls because I don't want to add additional
59 * complexity just for the sake of following some abstract API, when it doesn't
60 * add any real benefit to the driver. I understand that this decision maybe
61 * controversial, but I really tried to make code API-correct and efficient
62 * at the same time and didn't come up with code I liked :(. --ebs
63 */
64
65#define DRV_NAME "emac"
66#define DRV_VERSION "3.54"
67#define DRV_DESC "PPC 4xx OCP EMAC driver"
68
69MODULE_DESCRIPTION(DRV_DESC);
70MODULE_AUTHOR
71 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
72MODULE_LICENSE("GPL");
73
74/*
75 * PPC64 doesn't (yet) have a cacheable_memcpy
76 */
77#ifdef CONFIG_PPC64
78#define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
79#endif
80
81/* minimum number of free TX descriptors required to wake up TX process */
82#define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
83
84/* If packet size is less than this number, we allocate small skb and copy packet
85 * contents into it instead of just sending original big skb up
86 */
87#define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
88
89/* Since multiple EMACs share MDIO lines in various ways, we need
90 * to avoid re-using the same PHY ID in cases where the arch didn't
91 * setup precise phy_map entries
92 *
93 * XXX This is something that needs to be reworked as we can have multiple
94 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
95 * probably require in that case to have explicit PHY IDs in the device-tree
96 */
97static u32 busy_phy_map;
98static DEFINE_MUTEX(emac_phy_map_lock);
99
100/* This is the wait queue used to wait on any event related to probe, that
101 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
102 */
103static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
104
105/* Having stable interface names is a doomed idea. However, it would be nice
106 * if we didn't have completely random interface names at boot too :-) It's
107 * just a matter of making everybody's life easier. Since we are doing
108 * threaded probing, it's a bit harder though. The base idea here is that
109 * we make up a list of all emacs in the device-tree before we register the
110 * driver. Every emac will then wait for the previous one in the list to
111 * initialize before itself. We should also keep that list ordered by
112 * cell_index.
113 * That list is only 4 entries long, meaning that additional EMACs don't
114 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
115 */
116
117#define EMAC_BOOT_LIST_SIZE 4
118static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
119
120/* How long should I wait for dependent devices ? */
121#define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
122
123/* I don't want to litter system log with timeout errors
124 * when we have brain-damaged PHY.
125 */
126static inline void emac_report_timeout_error(struct emac_instance *dev,
127 const char *error)
128{
129 if (net_ratelimit())
130 printk(KERN_ERR "%s: %s\n", dev->ndev->name, error);
131}
132
133/* PHY polling intervals */
134#define PHY_POLL_LINK_ON HZ
135#define PHY_POLL_LINK_OFF (HZ / 5)
136
137/* Graceful stop timeouts in us.
138 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
139 */
140#define STOP_TIMEOUT_10 1230
141#define STOP_TIMEOUT_100 124
142#define STOP_TIMEOUT_1000 13
143#define STOP_TIMEOUT_1000_JUMBO 73
144
145/* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
146static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
147 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
148 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
149 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
150 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
151 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
152 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
153 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
154 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
155 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
156 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
157 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
158 "tx_bd_excessive_collisions", "tx_bd_late_collision",
159 "tx_bd_multple_collisions", "tx_bd_single_collision",
160 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
161 "tx_errors"
162};
163
164static irqreturn_t emac_irq(int irq, void *dev_instance);
165static void emac_clean_tx_ring(struct emac_instance *dev);
166static void __emac_set_multicast_list(struct emac_instance *dev);
167
168static inline int emac_phy_supports_gige(int phy_mode)
169{
170 return phy_mode == PHY_MODE_GMII ||
171 phy_mode == PHY_MODE_RGMII ||
172 phy_mode == PHY_MODE_TBI ||
173 phy_mode == PHY_MODE_RTBI;
174}
175
176static inline int emac_phy_gpcs(int phy_mode)
177{
178 return phy_mode == PHY_MODE_TBI ||
179 phy_mode == PHY_MODE_RTBI;
180}
181
182static inline void emac_tx_enable(struct emac_instance *dev)
183{
184 struct emac_regs __iomem *p = dev->emacp;
185 u32 r;
186
187 DBG(dev, "tx_enable" NL);
188
189 r = in_be32(&p->mr0);
190 if (!(r & EMAC_MR0_TXE))
191 out_be32(&p->mr0, r | EMAC_MR0_TXE);
192}
193
194static void emac_tx_disable(struct emac_instance *dev)
195{
196 struct emac_regs __iomem *p = dev->emacp;
197 u32 r;
198
199 DBG(dev, "tx_disable" NL);
200
201 r = in_be32(&p->mr0);
202 if (r & EMAC_MR0_TXE) {
203 int n = dev->stop_timeout;
204 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
205 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
206 udelay(1);
207 --n;
208 }
209 if (unlikely(!n))
210 emac_report_timeout_error(dev, "TX disable timeout");
211 }
212}
213
214static void emac_rx_enable(struct emac_instance *dev)
215{
216 struct emac_regs __iomem *p = dev->emacp;
217 u32 r;
218
219 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
220 goto out;
221
222 DBG(dev, "rx_enable" NL);
223
224 r = in_be32(&p->mr0);
225 if (!(r & EMAC_MR0_RXE)) {
226 if (unlikely(!(r & EMAC_MR0_RXI))) {
227 /* Wait if previous async disable is still in progress */
228 int n = dev->stop_timeout;
229 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
230 udelay(1);
231 --n;
232 }
233 if (unlikely(!n))
234 emac_report_timeout_error(dev,
235 "RX disable timeout");
236 }
237 out_be32(&p->mr0, r | EMAC_MR0_RXE);
238 }
239 out:
240 ;
241}
242
243static void emac_rx_disable(struct emac_instance *dev)
244{
245 struct emac_regs __iomem *p = dev->emacp;
246 u32 r;
247
248 DBG(dev, "rx_disable" NL);
249
250 r = in_be32(&p->mr0);
251 if (r & EMAC_MR0_RXE) {
252 int n = dev->stop_timeout;
253 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
254 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
255 udelay(1);
256 --n;
257 }
258 if (unlikely(!n))
259 emac_report_timeout_error(dev, "RX disable timeout");
260 }
261}
262
263static inline void emac_netif_stop(struct emac_instance *dev)
264{
265 netif_tx_lock_bh(dev->ndev);
266 dev->no_mcast = 1;
267 netif_tx_unlock_bh(dev->ndev);
268 dev->ndev->trans_start = jiffies; /* prevent tx timeout */
269 mal_poll_disable(dev->mal, &dev->commac);
270 netif_tx_disable(dev->ndev);
271}
272
273static inline void emac_netif_start(struct emac_instance *dev)
274{
275 netif_tx_lock_bh(dev->ndev);
276 dev->no_mcast = 0;
277 if (dev->mcast_pending && netif_running(dev->ndev))
278 __emac_set_multicast_list(dev);
279 netif_tx_unlock_bh(dev->ndev);
280
281 netif_wake_queue(dev->ndev);
282
283 /* NOTE: unconditional netif_wake_queue is only appropriate
284 * so long as all callers are assured to have free tx slots
285 * (taken from tg3... though the case where that is wrong is
286 * not terribly harmful)
287 */
288 mal_poll_enable(dev->mal, &dev->commac);
289}
290
291static inline void emac_rx_disable_async(struct emac_instance *dev)
292{
293 struct emac_regs __iomem *p = dev->emacp;
294 u32 r;
295
296 DBG(dev, "rx_disable_async" NL);
297
298 r = in_be32(&p->mr0);
299 if (r & EMAC_MR0_RXE)
300 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
301}
302
303static int emac_reset(struct emac_instance *dev)
304{
305 struct emac_regs __iomem *p = dev->emacp;
306 int n = 20;
307
308 DBG(dev, "reset" NL);
309
310 if (!dev->reset_failed) {
311 /* 40x erratum suggests stopping RX channel before reset,
312 * we stop TX as well
313 */
314 emac_rx_disable(dev);
315 emac_tx_disable(dev);
316 }
317
318 out_be32(&p->mr0, EMAC_MR0_SRST);
319 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
320 --n;
321
322 if (n) {
323 dev->reset_failed = 0;
324 return 0;
325 } else {
326 emac_report_timeout_error(dev, "reset timeout");
327 dev->reset_failed = 1;
328 return -ETIMEDOUT;
329 }
330}
331
332static void emac_hash_mc(struct emac_instance *dev)
333{
334 struct emac_regs __iomem *p = dev->emacp;
335 u16 gaht[4] = { 0 };
336 struct dev_mc_list *dmi;
337
338 DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
339
340 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
341 int bit;
342 DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
343 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
344 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
345
346 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
347 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
348 }
349 out_be32(&p->gaht1, gaht[0]);
350 out_be32(&p->gaht2, gaht[1]);
351 out_be32(&p->gaht3, gaht[2]);
352 out_be32(&p->gaht4, gaht[3]);
353}
354
355static inline u32 emac_iff2rmr(struct net_device *ndev)
356{
357 struct emac_instance *dev = netdev_priv(ndev);
358 u32 r;
359
360 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
361
362 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
363 r |= EMAC4_RMR_BASE;
364 else
365 r |= EMAC_RMR_BASE;
366
367 if (ndev->flags & IFF_PROMISC)
368 r |= EMAC_RMR_PME;
369 else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
370 r |= EMAC_RMR_PMME;
371 else if (ndev->mc_count > 0)
372 r |= EMAC_RMR_MAE;
373
374 return r;
375}
376
377static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
378{
379 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
380
381 DBG2(dev, "__emac_calc_base_mr1" NL);
382
383 switch(tx_size) {
384 case 2048:
385 ret |= EMAC_MR1_TFS_2K;
386 break;
387 default:
388 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
389 dev->ndev->name, tx_size);
390 }
391
392 switch(rx_size) {
393 case 16384:
394 ret |= EMAC_MR1_RFS_16K;
395 break;
396 case 4096:
397 ret |= EMAC_MR1_RFS_4K;
398 break;
399 default:
400 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
401 dev->ndev->name, rx_size);
402 }
403
404 return ret;
405}
406
407static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
408{
409 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
4696c3c4 410 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
1d3bb996
DG
411
412 DBG2(dev, "__emac4_calc_base_mr1" NL);
413
414 switch(tx_size) {
415 case 4096:
416 ret |= EMAC4_MR1_TFS_4K;
417 break;
418 case 2048:
419 ret |= EMAC4_MR1_TFS_2K;
420 break;
421 default:
422 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
423 dev->ndev->name, tx_size);
424 }
425
426 switch(rx_size) {
427 case 16384:
428 ret |= EMAC4_MR1_RFS_16K;
429 break;
430 case 4096:
431 ret |= EMAC4_MR1_RFS_4K;
432 break;
433 case 2048:
434 ret |= EMAC4_MR1_RFS_2K;
435 break;
436 default:
437 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
438 dev->ndev->name, rx_size);
439 }
440
441 return ret;
442}
443
444static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
445{
446 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
447 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
448 __emac_calc_base_mr1(dev, tx_size, rx_size);
449}
450
451static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
452{
453 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
454 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
455 else
456 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
457}
458
459static inline u32 emac_calc_rwmr(struct emac_instance *dev,
460 unsigned int low, unsigned int high)
461{
462 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
463 return (low << 22) | ( (high & 0x3ff) << 6);
464 else
465 return (low << 23) | ( (high & 0x1ff) << 7);
466}
467
468static int emac_configure(struct emac_instance *dev)
469{
470 struct emac_regs __iomem *p = dev->emacp;
471 struct net_device *ndev = dev->ndev;
911b237d 472 int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
1d3bb996
DG
473 u32 r, mr1 = 0;
474
475 DBG(dev, "configure" NL);
476
911b237d
BH
477 if (!link) {
478 out_be32(&p->mr1, in_be32(&p->mr1)
479 | EMAC_MR1_FDE | EMAC_MR1_ILE);
480 udelay(100);
481 } else if (emac_reset(dev) < 0)
1d3bb996
DG
482 return -ETIMEDOUT;
483
484 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
485 tah_reset(dev->tah_dev);
486
911b237d
BH
487 DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
488 link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
1d3bb996
DG
489
490 /* Default fifo sizes */
491 tx_size = dev->tx_fifo_size;
492 rx_size = dev->rx_fifo_size;
493
911b237d
BH
494 /* No link, force loopback */
495 if (!link)
496 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
497
1d3bb996 498 /* Check for full duplex */
911b237d 499 else if (dev->phy.duplex == DUPLEX_FULL)
1d3bb996
DG
500 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
501
502 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
503 dev->stop_timeout = STOP_TIMEOUT_10;
504 switch (dev->phy.speed) {
505 case SPEED_1000:
506 if (emac_phy_gpcs(dev->phy.mode)) {
507 mr1 |= EMAC_MR1_MF_1000GPCS |
508 EMAC_MR1_MF_IPPA(dev->phy.address);
509
510 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
511 * identify this GPCS PHY later.
512 */
513 out_be32(&p->ipcr, 0xdeadbeef);
514 } else
515 mr1 |= EMAC_MR1_MF_1000;
516
517 /* Extended fifo sizes */
518 tx_size = dev->tx_fifo_size_gige;
519 rx_size = dev->rx_fifo_size_gige;
520
521 if (dev->ndev->mtu > ETH_DATA_LEN) {
522 mr1 |= EMAC_MR1_JPSM;
523 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
524 } else
525 dev->stop_timeout = STOP_TIMEOUT_1000;
526 break;
527 case SPEED_100:
528 mr1 |= EMAC_MR1_MF_100;
529 dev->stop_timeout = STOP_TIMEOUT_100;
530 break;
531 default: /* make gcc happy */
532 break;
533 }
534
535 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
536 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
537 dev->phy.speed);
538 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
539 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
540
541 /* on 40x erratum forces us to NOT use integrated flow control,
542 * let's hope it works on 44x ;)
543 */
544 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
545 dev->phy.duplex == DUPLEX_FULL) {
546 if (dev->phy.pause)
547 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
548 else if (dev->phy.asym_pause)
549 mr1 |= EMAC_MR1_APP;
550 }
551
552 /* Add base settings & fifo sizes & program MR1 */
553 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
554 out_be32(&p->mr1, mr1);
555
556 /* Set individual MAC address */
557 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
558 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
559 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
560 ndev->dev_addr[5]);
561
562 /* VLAN Tag Protocol ID */
563 out_be32(&p->vtpid, 0x8100);
564
565 /* Receive mode register */
566 r = emac_iff2rmr(ndev);
567 if (r & EMAC_RMR_MAE)
568 emac_hash_mc(dev);
569 out_be32(&p->rmr, r);
570
571 /* FIFOs thresholds */
572 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
573 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
574 tx_size / 2 / dev->fifo_entry_size);
575 else
576 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
577 tx_size / 2 / dev->fifo_entry_size);
578 out_be32(&p->tmr1, r);
579 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
580
581 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
582 there should be still enough space in FIFO to allow the our link
583 partner time to process this frame and also time to send PAUSE
584 frame itself.
585
586 Here is the worst case scenario for the RX FIFO "headroom"
587 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
588
589 1) One maximum-length frame on TX 1522 bytes
590 2) One PAUSE frame time 64 bytes
591 3) PAUSE frame decode time allowance 64 bytes
592 4) One maximum-length frame on RX 1522 bytes
593 5) Round-trip propagation delay of the link (100Mb) 15 bytes
594 ----------
595 3187 bytes
596
597 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
598 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
599 */
600 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
601 rx_size / 4 / dev->fifo_entry_size);
602 out_be32(&p->rwmr, r);
603
604 /* Set PAUSE timer to the maximum */
605 out_be32(&p->ptr, 0xffff);
606
607 /* IRQ sources */
608 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
609 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
610 EMAC_ISR_IRE | EMAC_ISR_TE;
611 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
612 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
613 EMAC4_ISR_RXOE | */;
614 out_be32(&p->iser, r);
615
616 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
617 if (emac_phy_gpcs(dev->phy.mode))
618 emac_mii_reset_phy(&dev->phy);
619
620 return 0;
621}
622
623static void emac_reinitialize(struct emac_instance *dev)
624{
625 DBG(dev, "reinitialize" NL);
626
627 emac_netif_stop(dev);
628 if (!emac_configure(dev)) {
629 emac_tx_enable(dev);
630 emac_rx_enable(dev);
631 }
632 emac_netif_start(dev);
633}
634
635static void emac_full_tx_reset(struct emac_instance *dev)
636{
637 DBG(dev, "full_tx_reset" NL);
638
639 emac_tx_disable(dev);
640 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
641 emac_clean_tx_ring(dev);
642 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
643
644 emac_configure(dev);
645
646 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
647 emac_tx_enable(dev);
648 emac_rx_enable(dev);
649}
650
651static void emac_reset_work(struct work_struct *work)
652{
653 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
654
655 DBG(dev, "reset_work" NL);
656
657 mutex_lock(&dev->link_lock);
61dbcece
BH
658 if (dev->opened) {
659 emac_netif_stop(dev);
660 emac_full_tx_reset(dev);
661 emac_netif_start(dev);
662 }
1d3bb996
DG
663 mutex_unlock(&dev->link_lock);
664}
665
666static void emac_tx_timeout(struct net_device *ndev)
667{
668 struct emac_instance *dev = netdev_priv(ndev);
669
670 DBG(dev, "tx_timeout" NL);
671
672 schedule_work(&dev->reset_work);
673}
674
675
676static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
677{
678 int done = !!(stacr & EMAC_STACR_OC);
679
680 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
681 done = !done;
682
683 return done;
684};
685
686static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
687{
688 struct emac_regs __iomem *p = dev->emacp;
689 u32 r = 0;
690 int n, err = -ETIMEDOUT;
691
692 mutex_lock(&dev->mdio_lock);
693
694 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
695
696 /* Enable proper MDIO port */
697 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
698 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
699 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
700 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
701
702 /* Wait for management interface to become idle */
703 n = 10;
704 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
705 udelay(1);
706 if (!--n) {
707 DBG2(dev, " -> timeout wait idle\n");
708 goto bail;
709 }
710 }
711
712 /* Issue read command */
713 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
714 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
715 else
716 r = EMAC_STACR_BASE(dev->opb_bus_freq);
717 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
718 r |= EMAC_STACR_OC;
bff713b5 719 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
1d3bb996
DG
720 r |= EMACX_STACR_STAC_READ;
721 else
722 r |= EMAC_STACR_STAC_READ;
723 r |= (reg & EMAC_STACR_PRA_MASK)
724 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
725 out_be32(&p->stacr, r);
726
727 /* Wait for read to complete */
728 n = 100;
729 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
730 udelay(1);
731 if (!--n) {
732 DBG2(dev, " -> timeout wait complete\n");
733 goto bail;
734 }
735 }
736
737 if (unlikely(r & EMAC_STACR_PHYE)) {
738 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
739 err = -EREMOTEIO;
740 goto bail;
741 }
742
743 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
744
745 DBG2(dev, "mdio_read -> %04x" NL, r);
746 err = 0;
747 bail:
748 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
749 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
750 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
751 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
752 mutex_unlock(&dev->mdio_lock);
753
754 return err == 0 ? r : err;
755}
756
757static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
758 u16 val)
759{
760 struct emac_regs __iomem *p = dev->emacp;
761 u32 r = 0;
762 int n, err = -ETIMEDOUT;
763
764 mutex_lock(&dev->mdio_lock);
765
766 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
767
768 /* Enable proper MDIO port */
769 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
770 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
771 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
772 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
773
774 /* Wait for management interface to be idle */
775 n = 10;
776 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
777 udelay(1);
778 if (!--n) {
779 DBG2(dev, " -> timeout wait idle\n");
780 goto bail;
781 }
782 }
783
784 /* Issue write command */
785 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
786 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
787 else
788 r = EMAC_STACR_BASE(dev->opb_bus_freq);
789 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
790 r |= EMAC_STACR_OC;
bff713b5 791 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
1d3bb996
DG
792 r |= EMACX_STACR_STAC_WRITE;
793 else
794 r |= EMAC_STACR_STAC_WRITE;
795 r |= (reg & EMAC_STACR_PRA_MASK) |
796 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
797 (val << EMAC_STACR_PHYD_SHIFT);
798 out_be32(&p->stacr, r);
799
800 /* Wait for write to complete */
801 n = 100;
802 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
803 udelay(1);
804 if (!--n) {
805 DBG2(dev, " -> timeout wait complete\n");
806 goto bail;
807 }
808 }
809 err = 0;
810 bail:
811 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
812 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
813 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
814 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
815 mutex_unlock(&dev->mdio_lock);
816}
817
818static int emac_mdio_read(struct net_device *ndev, int id, int reg)
819{
820 struct emac_instance *dev = netdev_priv(ndev);
821 int res;
822
823 res = __emac_mdio_read(dev->mdio_instance ? dev->mdio_instance : dev,
824 (u8) id, (u8) reg);
825 return res;
826}
827
828static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
829{
830 struct emac_instance *dev = netdev_priv(ndev);
831
832 __emac_mdio_write(dev->mdio_instance ? dev->mdio_instance : dev,
833 (u8) id, (u8) reg, (u16) val);
834}
835
836/* Tx lock BH */
837static void __emac_set_multicast_list(struct emac_instance *dev)
838{
839 struct emac_regs __iomem *p = dev->emacp;
840 u32 rmr = emac_iff2rmr(dev->ndev);
841
842 DBG(dev, "__multicast %08x" NL, rmr);
843
844 /* I decided to relax register access rules here to avoid
845 * full EMAC reset.
846 *
847 * There is a real problem with EMAC4 core if we use MWSW_001 bit
848 * in MR1 register and do a full EMAC reset.
849 * One TX BD status update is delayed and, after EMAC reset, it
850 * never happens, resulting in TX hung (it'll be recovered by TX
851 * timeout handler eventually, but this is just gross).
852 * So we either have to do full TX reset or try to cheat here :)
853 *
854 * The only required change is to RX mode register, so I *think* all
855 * we need is just to stop RX channel. This seems to work on all
856 * tested SoCs. --ebs
857 *
858 * If we need the full reset, we might just trigger the workqueue
859 * and do it async... a bit nasty but should work --BenH
860 */
861 dev->mcast_pending = 0;
862 emac_rx_disable(dev);
863 if (rmr & EMAC_RMR_MAE)
864 emac_hash_mc(dev);
865 out_be32(&p->rmr, rmr);
866 emac_rx_enable(dev);
867}
868
869/* Tx lock BH */
870static void emac_set_multicast_list(struct net_device *ndev)
871{
872 struct emac_instance *dev = netdev_priv(ndev);
873
874 DBG(dev, "multicast" NL);
875
876 BUG_ON(!netif_running(dev->ndev));
877
878 if (dev->no_mcast) {
879 dev->mcast_pending = 1;
880 return;
881 }
882 __emac_set_multicast_list(dev);
883}
884
885static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
886{
887 int rx_sync_size = emac_rx_sync_size(new_mtu);
888 int rx_skb_size = emac_rx_skb_size(new_mtu);
889 int i, ret = 0;
890
891 mutex_lock(&dev->link_lock);
892 emac_netif_stop(dev);
893 emac_rx_disable(dev);
894 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
895
896 if (dev->rx_sg_skb) {
897 ++dev->estats.rx_dropped_resize;
898 dev_kfree_skb(dev->rx_sg_skb);
899 dev->rx_sg_skb = NULL;
900 }
901
902 /* Make a first pass over RX ring and mark BDs ready, dropping
903 * non-processed packets on the way. We need this as a separate pass
904 * to simplify error recovery in the case of allocation failure later.
905 */
906 for (i = 0; i < NUM_RX_BUFF; ++i) {
907 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
908 ++dev->estats.rx_dropped_resize;
909
910 dev->rx_desc[i].data_len = 0;
911 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
912 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
913 }
914
915 /* Reallocate RX ring only if bigger skb buffers are required */
916 if (rx_skb_size <= dev->rx_skb_size)
917 goto skip;
918
919 /* Second pass, allocate new skbs */
920 for (i = 0; i < NUM_RX_BUFF; ++i) {
921 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
922 if (!skb) {
923 ret = -ENOMEM;
924 goto oom;
925 }
926
927 BUG_ON(!dev->rx_skb[i]);
928 dev_kfree_skb(dev->rx_skb[i]);
929
930 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
931 dev->rx_desc[i].data_ptr =
932 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
933 DMA_FROM_DEVICE) + 2;
934 dev->rx_skb[i] = skb;
935 }
936 skip:
937 /* Check if we need to change "Jumbo" bit in MR1 */
938 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
939 /* This is to prevent starting RX channel in emac_rx_enable() */
940 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
941
942 dev->ndev->mtu = new_mtu;
943 emac_full_tx_reset(dev);
944 }
945
946 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
947 oom:
948 /* Restart RX */
949 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
950 dev->rx_slot = 0;
951 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
952 emac_rx_enable(dev);
953 emac_netif_start(dev);
954 mutex_unlock(&dev->link_lock);
955
956 return ret;
957}
958
959/* Process ctx, rtnl_lock semaphore */
960static int emac_change_mtu(struct net_device *ndev, int new_mtu)
961{
962 struct emac_instance *dev = netdev_priv(ndev);
963 int ret = 0;
964
965 if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
966 return -EINVAL;
967
968 DBG(dev, "change_mtu(%d)" NL, new_mtu);
969
970 if (netif_running(ndev)) {
971 /* Check if we really need to reinitalize RX ring */
972 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
973 ret = emac_resize_rx_ring(dev, new_mtu);
974 }
975
976 if (!ret) {
977 ndev->mtu = new_mtu;
978 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
979 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
980 }
981
982 return ret;
983}
984
985static void emac_clean_tx_ring(struct emac_instance *dev)
986{
987 int i;
988
989 for (i = 0; i < NUM_TX_BUFF; ++i) {
990 if (dev->tx_skb[i]) {
991 dev_kfree_skb(dev->tx_skb[i]);
992 dev->tx_skb[i] = NULL;
993 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
994 ++dev->estats.tx_dropped;
995 }
996 dev->tx_desc[i].ctrl = 0;
997 dev->tx_desc[i].data_ptr = 0;
998 }
999}
1000
1001static void emac_clean_rx_ring(struct emac_instance *dev)
1002{
1003 int i;
1004
1005 for (i = 0; i < NUM_RX_BUFF; ++i)
1006 if (dev->rx_skb[i]) {
1007 dev->rx_desc[i].ctrl = 0;
1008 dev_kfree_skb(dev->rx_skb[i]);
1009 dev->rx_skb[i] = NULL;
1010 dev->rx_desc[i].data_ptr = 0;
1011 }
1012
1013 if (dev->rx_sg_skb) {
1014 dev_kfree_skb(dev->rx_sg_skb);
1015 dev->rx_sg_skb = NULL;
1016 }
1017}
1018
1019static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1020 gfp_t flags)
1021{
1022 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1023 if (unlikely(!skb))
1024 return -ENOMEM;
1025
1026 dev->rx_skb[slot] = skb;
1027 dev->rx_desc[slot].data_len = 0;
1028
1029 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1030 dev->rx_desc[slot].data_ptr =
1031 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1032 DMA_FROM_DEVICE) + 2;
1033 wmb();
1034 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1035 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1036
1037 return 0;
1038}
1039
1040static void emac_print_link_status(struct emac_instance *dev)
1041{
1042 if (netif_carrier_ok(dev->ndev))
1043 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1044 dev->ndev->name, dev->phy.speed,
1045 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1046 dev->phy.pause ? ", pause enabled" :
1047 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1048 else
1049 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1050}
1051
1052/* Process ctx, rtnl_lock semaphore */
1053static int emac_open(struct net_device *ndev)
1054{
1055 struct emac_instance *dev = netdev_priv(ndev);
1056 int err, i;
1057
1058 DBG(dev, "open" NL);
1059
1060 /* Setup error IRQ handler */
1061 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1062 if (err) {
1063 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1064 ndev->name, dev->emac_irq);
1065 return err;
1066 }
1067
1068 /* Allocate RX ring */
1069 for (i = 0; i < NUM_RX_BUFF; ++i)
1070 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1071 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1072 ndev->name);
1073 goto oom;
1074 }
1075
1076 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1077 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1078 dev->rx_sg_skb = NULL;
1079
1080 mutex_lock(&dev->link_lock);
61dbcece 1081 dev->opened = 1;
1d3bb996 1082
61dbcece 1083 /* Start PHY polling now.
1d3bb996
DG
1084 */
1085 if (dev->phy.address >= 0) {
1086 int link_poll_interval;
1087 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1088 dev->phy.def->ops->read_link(&dev->phy);
1089 netif_carrier_on(dev->ndev);
1090 link_poll_interval = PHY_POLL_LINK_ON;
1091 } else {
1092 netif_carrier_off(dev->ndev);
1093 link_poll_interval = PHY_POLL_LINK_OFF;
1094 }
1095 dev->link_polling = 1;
1096 wmb();
1097 schedule_delayed_work(&dev->link_work, link_poll_interval);
1098 emac_print_link_status(dev);
1099 } else
1100 netif_carrier_on(dev->ndev);
1101
1102 emac_configure(dev);
1103 mal_poll_add(dev->mal, &dev->commac);
1104 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1105 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1106 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1107 emac_tx_enable(dev);
1108 emac_rx_enable(dev);
1109 emac_netif_start(dev);
1110
1111 mutex_unlock(&dev->link_lock);
1112
1113 return 0;
1114 oom:
1115 emac_clean_rx_ring(dev);
1116 free_irq(dev->emac_irq, dev);
1117
1118 return -ENOMEM;
1119}
1120
1121/* BHs disabled */
1122#if 0
1123static int emac_link_differs(struct emac_instance *dev)
1124{
1125 u32 r = in_be32(&dev->emacp->mr1);
1126
1127 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1128 int speed, pause, asym_pause;
1129
1130 if (r & EMAC_MR1_MF_1000)
1131 speed = SPEED_1000;
1132 else if (r & EMAC_MR1_MF_100)
1133 speed = SPEED_100;
1134 else
1135 speed = SPEED_10;
1136
1137 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1138 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1139 pause = 1;
1140 asym_pause = 0;
1141 break;
1142 case EMAC_MR1_APP:
1143 pause = 0;
1144 asym_pause = 1;
1145 break;
1146 default:
1147 pause = asym_pause = 0;
1148 }
1149 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1150 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1151}
1152#endif
1153
1154static void emac_link_timer(struct work_struct *work)
1155{
1156 struct emac_instance *dev =
1157 container_of((struct delayed_work *)work,
1158 struct emac_instance, link_work);
1159 int link_poll_interval;
1160
1161 mutex_lock(&dev->link_lock);
1d3bb996
DG
1162 DBG2(dev, "link timer" NL);
1163
61dbcece
BH
1164 if (!dev->opened)
1165 goto bail;
1166
1d3bb996
DG
1167 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1168 if (!netif_carrier_ok(dev->ndev)) {
1169 /* Get new link parameters */
1170 dev->phy.def->ops->read_link(&dev->phy);
1171
1172 netif_carrier_on(dev->ndev);
1173 emac_netif_stop(dev);
1174 emac_full_tx_reset(dev);
1175 emac_netif_start(dev);
1176 emac_print_link_status(dev);
1177 }
1178 link_poll_interval = PHY_POLL_LINK_ON;
1179 } else {
1180 if (netif_carrier_ok(dev->ndev)) {
1d3bb996
DG
1181 netif_carrier_off(dev->ndev);
1182 netif_tx_disable(dev->ndev);
911b237d 1183 emac_reinitialize(dev);
1d3bb996
DG
1184 emac_print_link_status(dev);
1185 }
1186 link_poll_interval = PHY_POLL_LINK_OFF;
1187 }
1188 schedule_delayed_work(&dev->link_work, link_poll_interval);
61dbcece 1189 bail:
1d3bb996
DG
1190 mutex_unlock(&dev->link_lock);
1191}
1192
1193static void emac_force_link_update(struct emac_instance *dev)
1194{
1195 netif_carrier_off(dev->ndev);
61dbcece 1196 smp_rmb();
1d3bb996
DG
1197 if (dev->link_polling) {
1198 cancel_rearming_delayed_work(&dev->link_work);
1199 if (dev->link_polling)
1200 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1201 }
1202}
1203
1204/* Process ctx, rtnl_lock semaphore */
1205static int emac_close(struct net_device *ndev)
1206{
1207 struct emac_instance *dev = netdev_priv(ndev);
1208
1209 DBG(dev, "close" NL);
1210
61dbcece
BH
1211 if (dev->phy.address >= 0) {
1212 dev->link_polling = 0;
1d3bb996 1213 cancel_rearming_delayed_work(&dev->link_work);
61dbcece
BH
1214 }
1215 mutex_lock(&dev->link_lock);
1d3bb996 1216 emac_netif_stop(dev);
61dbcece
BH
1217 dev->opened = 0;
1218 mutex_unlock(&dev->link_lock);
1d3bb996
DG
1219
1220 emac_rx_disable(dev);
1221 emac_tx_disable(dev);
1222 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1223 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1224 mal_poll_del(dev->mal, &dev->commac);
1225
1226 emac_clean_tx_ring(dev);
1227 emac_clean_rx_ring(dev);
1228
1229 free_irq(dev->emac_irq, dev);
1230
1231 return 0;
1232}
1233
1234static inline u16 emac_tx_csum(struct emac_instance *dev,
1235 struct sk_buff *skb)
1236{
1237 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH &&
1238 skb->ip_summed == CHECKSUM_PARTIAL)) {
1239 ++dev->stats.tx_packets_csum;
1240 return EMAC_TX_CTRL_TAH_CSUM;
1241 }
1242 return 0;
1243}
1244
1245static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1246{
1247 struct emac_regs __iomem *p = dev->emacp;
1248 struct net_device *ndev = dev->ndev;
1249
1250 /* Send the packet out. If the if makes a significant perf
1251 * difference, then we can store the TMR0 value in "dev"
1252 * instead
1253 */
1254 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1d3bb996 1255 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
07c2c76e 1256 else
1257 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1d3bb996
DG
1258
1259 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1260 netif_stop_queue(ndev);
1261 DBG2(dev, "stopped TX queue" NL);
1262 }
1263
1264 ndev->trans_start = jiffies;
1265 ++dev->stats.tx_packets;
1266 dev->stats.tx_bytes += len;
1267
1268 return 0;
1269}
1270
1271/* Tx lock BH */
1272static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1273{
1274 struct emac_instance *dev = netdev_priv(ndev);
1275 unsigned int len = skb->len;
1276 int slot;
1277
1278 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1279 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1280
1281 slot = dev->tx_slot++;
1282 if (dev->tx_slot == NUM_TX_BUFF) {
1283 dev->tx_slot = 0;
1284 ctrl |= MAL_TX_CTRL_WRAP;
1285 }
1286
1287 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1288
1289 dev->tx_skb[slot] = skb;
1290 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1291 skb->data, len,
1292 DMA_TO_DEVICE);
1293 dev->tx_desc[slot].data_len = (u16) len;
1294 wmb();
1295 dev->tx_desc[slot].ctrl = ctrl;
1296
1297 return emac_xmit_finish(dev, len);
1298}
1299
1300#ifdef CONFIG_IBM_NEW_EMAC_TAH
1301static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1302 u32 pd, int len, int last, u16 base_ctrl)
1303{
1304 while (1) {
1305 u16 ctrl = base_ctrl;
1306 int chunk = min(len, MAL_MAX_TX_SIZE);
1307 len -= chunk;
1308
1309 slot = (slot + 1) % NUM_TX_BUFF;
1310
1311 if (last && !len)
1312 ctrl |= MAL_TX_CTRL_LAST;
1313 if (slot == NUM_TX_BUFF - 1)
1314 ctrl |= MAL_TX_CTRL_WRAP;
1315
1316 dev->tx_skb[slot] = NULL;
1317 dev->tx_desc[slot].data_ptr = pd;
1318 dev->tx_desc[slot].data_len = (u16) chunk;
1319 dev->tx_desc[slot].ctrl = ctrl;
1320 ++dev->tx_cnt;
1321
1322 if (!len)
1323 break;
1324
1325 pd += chunk;
1326 }
1327 return slot;
1328}
1329
1330/* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1331static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1332{
1333 struct emac_instance *dev = netdev_priv(ndev);
1334 int nr_frags = skb_shinfo(skb)->nr_frags;
1335 int len = skb->len, chunk;
1336 int slot, i;
1337 u16 ctrl;
1338 u32 pd;
1339
1340 /* This is common "fast" path */
1341 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1342 return emac_start_xmit(skb, ndev);
1343
1344 len -= skb->data_len;
1345
1346 /* Note, this is only an *estimation*, we can still run out of empty
1347 * slots because of the additional fragmentation into
1348 * MAL_MAX_TX_SIZE-sized chunks
1349 */
1350 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1351 goto stop_queue;
1352
1353 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1354 emac_tx_csum(dev, skb);
1355 slot = dev->tx_slot;
1356
1357 /* skb data */
1358 dev->tx_skb[slot] = NULL;
1359 chunk = min(len, MAL_MAX_TX_SIZE);
1360 dev->tx_desc[slot].data_ptr = pd =
1361 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1362 dev->tx_desc[slot].data_len = (u16) chunk;
1363 len -= chunk;
1364 if (unlikely(len))
1365 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1366 ctrl);
1367 /* skb fragments */
1368 for (i = 0; i < nr_frags; ++i) {
1369 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1370 len = frag->size;
1371
1372 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1373 goto undo_frame;
1374
1375 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1376 DMA_TO_DEVICE);
1377
1378 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1379 ctrl);
1380 }
1381
1382 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1383
1384 /* Attach skb to the last slot so we don't release it too early */
1385 dev->tx_skb[slot] = skb;
1386
1387 /* Send the packet out */
1388 if (dev->tx_slot == NUM_TX_BUFF - 1)
1389 ctrl |= MAL_TX_CTRL_WRAP;
1390 wmb();
1391 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1392 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1393
1394 return emac_xmit_finish(dev, skb->len);
1395
1396 undo_frame:
1397 /* Well, too bad. Our previous estimation was overly optimistic.
1398 * Undo everything.
1399 */
1400 while (slot != dev->tx_slot) {
1401 dev->tx_desc[slot].ctrl = 0;
1402 --dev->tx_cnt;
1403 if (--slot < 0)
1404 slot = NUM_TX_BUFF - 1;
1405 }
1406 ++dev->estats.tx_undo;
1407
1408 stop_queue:
1409 netif_stop_queue(ndev);
1410 DBG2(dev, "stopped TX queue" NL);
1411 return 1;
1412}
1413#else
1414# define emac_start_xmit_sg emac_start_xmit
1415#endif /* !defined(CONFIG_IBM_NEW_EMAC_TAH) */
1416
1417/* Tx lock BHs */
1418static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1419{
1420 struct emac_error_stats *st = &dev->estats;
1421
1422 DBG(dev, "BD TX error %04x" NL, ctrl);
1423
1424 ++st->tx_bd_errors;
1425 if (ctrl & EMAC_TX_ST_BFCS)
1426 ++st->tx_bd_bad_fcs;
1427 if (ctrl & EMAC_TX_ST_LCS)
1428 ++st->tx_bd_carrier_loss;
1429 if (ctrl & EMAC_TX_ST_ED)
1430 ++st->tx_bd_excessive_deferral;
1431 if (ctrl & EMAC_TX_ST_EC)
1432 ++st->tx_bd_excessive_collisions;
1433 if (ctrl & EMAC_TX_ST_LC)
1434 ++st->tx_bd_late_collision;
1435 if (ctrl & EMAC_TX_ST_MC)
1436 ++st->tx_bd_multple_collisions;
1437 if (ctrl & EMAC_TX_ST_SC)
1438 ++st->tx_bd_single_collision;
1439 if (ctrl & EMAC_TX_ST_UR)
1440 ++st->tx_bd_underrun;
1441 if (ctrl & EMAC_TX_ST_SQE)
1442 ++st->tx_bd_sqe;
1443}
1444
1445static void emac_poll_tx(void *param)
1446{
1447 struct emac_instance *dev = param;
1448 u32 bad_mask;
1449
1450 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1451
1452 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1453 bad_mask = EMAC_IS_BAD_TX_TAH;
1454 else
1455 bad_mask = EMAC_IS_BAD_TX;
1456
1457 netif_tx_lock_bh(dev->ndev);
1458 if (dev->tx_cnt) {
1459 u16 ctrl;
1460 int slot = dev->ack_slot, n = 0;
1461 again:
1462 ctrl = dev->tx_desc[slot].ctrl;
1463 if (!(ctrl & MAL_TX_CTRL_READY)) {
1464 struct sk_buff *skb = dev->tx_skb[slot];
1465 ++n;
1466
1467 if (skb) {
1468 dev_kfree_skb(skb);
1469 dev->tx_skb[slot] = NULL;
1470 }
1471 slot = (slot + 1) % NUM_TX_BUFF;
1472
1473 if (unlikely(ctrl & bad_mask))
1474 emac_parse_tx_error(dev, ctrl);
1475
1476 if (--dev->tx_cnt)
1477 goto again;
1478 }
1479 if (n) {
1480 dev->ack_slot = slot;
1481 if (netif_queue_stopped(dev->ndev) &&
1482 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1483 netif_wake_queue(dev->ndev);
1484
1485 DBG2(dev, "tx %d pkts" NL, n);
1486 }
1487 }
1488 netif_tx_unlock_bh(dev->ndev);
1489}
1490
1491static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1492 int len)
1493{
1494 struct sk_buff *skb = dev->rx_skb[slot];
1495
1496 DBG2(dev, "recycle %d %d" NL, slot, len);
1497
1498 if (len)
1499 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1500 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1501
1502 dev->rx_desc[slot].data_len = 0;
1503 wmb();
1504 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1505 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1506}
1507
1508static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1509{
1510 struct emac_error_stats *st = &dev->estats;
1511
1512 DBG(dev, "BD RX error %04x" NL, ctrl);
1513
1514 ++st->rx_bd_errors;
1515 if (ctrl & EMAC_RX_ST_OE)
1516 ++st->rx_bd_overrun;
1517 if (ctrl & EMAC_RX_ST_BP)
1518 ++st->rx_bd_bad_packet;
1519 if (ctrl & EMAC_RX_ST_RP)
1520 ++st->rx_bd_runt_packet;
1521 if (ctrl & EMAC_RX_ST_SE)
1522 ++st->rx_bd_short_event;
1523 if (ctrl & EMAC_RX_ST_AE)
1524 ++st->rx_bd_alignment_error;
1525 if (ctrl & EMAC_RX_ST_BFCS)
1526 ++st->rx_bd_bad_fcs;
1527 if (ctrl & EMAC_RX_ST_PTL)
1528 ++st->rx_bd_packet_too_long;
1529 if (ctrl & EMAC_RX_ST_ORE)
1530 ++st->rx_bd_out_of_range;
1531 if (ctrl & EMAC_RX_ST_IRE)
1532 ++st->rx_bd_in_range;
1533}
1534
1535static inline void emac_rx_csum(struct emac_instance *dev,
1536 struct sk_buff *skb, u16 ctrl)
1537{
1538#ifdef CONFIG_IBM_NEW_EMAC_TAH
1539 if (!ctrl && dev->tah_dev) {
1540 skb->ip_summed = CHECKSUM_UNNECESSARY;
1541 ++dev->stats.rx_packets_csum;
1542 }
1543#endif
1544}
1545
1546static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1547{
1548 if (likely(dev->rx_sg_skb != NULL)) {
1549 int len = dev->rx_desc[slot].data_len;
1550 int tot_len = dev->rx_sg_skb->len + len;
1551
1552 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1553 ++dev->estats.rx_dropped_mtu;
1554 dev_kfree_skb(dev->rx_sg_skb);
1555 dev->rx_sg_skb = NULL;
1556 } else {
bef1bc95 1557 cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1d3bb996
DG
1558 dev->rx_skb[slot]->data, len);
1559 skb_put(dev->rx_sg_skb, len);
1560 emac_recycle_rx_skb(dev, slot, len);
1561 return 0;
1562 }
1563 }
1564 emac_recycle_rx_skb(dev, slot, 0);
1565 return -1;
1566}
1567
1568/* NAPI poll context */
1569static int emac_poll_rx(void *param, int budget)
1570{
1571 struct emac_instance *dev = param;
1572 int slot = dev->rx_slot, received = 0;
1573
1574 DBG2(dev, "poll_rx(%d)" NL, budget);
1575
1576 again:
1577 while (budget > 0) {
1578 int len;
1579 struct sk_buff *skb;
1580 u16 ctrl = dev->rx_desc[slot].ctrl;
1581
1582 if (ctrl & MAL_RX_CTRL_EMPTY)
1583 break;
1584
1585 skb = dev->rx_skb[slot];
1586 mb();
1587 len = dev->rx_desc[slot].data_len;
1588
1589 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1590 goto sg;
1591
1592 ctrl &= EMAC_BAD_RX_MASK;
1593 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1594 emac_parse_rx_error(dev, ctrl);
1595 ++dev->estats.rx_dropped_error;
1596 emac_recycle_rx_skb(dev, slot, 0);
1597 len = 0;
1598 goto next;
1599 }
1600
1601 if (len && len < EMAC_RX_COPY_THRESH) {
1602 struct sk_buff *copy_skb =
1603 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1604 if (unlikely(!copy_skb))
1605 goto oom;
1606
1607 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1608 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1609 len + 2);
1610 emac_recycle_rx_skb(dev, slot, len);
1611 skb = copy_skb;
1612 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1613 goto oom;
1614
1615 skb_put(skb, len);
1616 push_packet:
1617 skb->dev = dev->ndev;
1618 skb->protocol = eth_type_trans(skb, dev->ndev);
1619 emac_rx_csum(dev, skb, ctrl);
1620
1621 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1622 ++dev->estats.rx_dropped_stack;
1623 next:
1624 ++dev->stats.rx_packets;
1625 skip:
1626 dev->stats.rx_bytes += len;
1627 slot = (slot + 1) % NUM_RX_BUFF;
1628 --budget;
1629 ++received;
1630 continue;
1631 sg:
1632 if (ctrl & MAL_RX_CTRL_FIRST) {
1633 BUG_ON(dev->rx_sg_skb);
1634 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1635 DBG(dev, "rx OOM %d" NL, slot);
1636 ++dev->estats.rx_dropped_oom;
1637 emac_recycle_rx_skb(dev, slot, 0);
1638 } else {
1639 dev->rx_sg_skb = skb;
1640 skb_put(skb, len);
1641 }
1642 } else if (!emac_rx_sg_append(dev, slot) &&
1643 (ctrl & MAL_RX_CTRL_LAST)) {
1644
1645 skb = dev->rx_sg_skb;
1646 dev->rx_sg_skb = NULL;
1647
1648 ctrl &= EMAC_BAD_RX_MASK;
1649 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1650 emac_parse_rx_error(dev, ctrl);
1651 ++dev->estats.rx_dropped_error;
1652 dev_kfree_skb(skb);
1653 len = 0;
1654 } else
1655 goto push_packet;
1656 }
1657 goto skip;
1658 oom:
1659 DBG(dev, "rx OOM %d" NL, slot);
1660 /* Drop the packet and recycle skb */
1661 ++dev->estats.rx_dropped_oom;
1662 emac_recycle_rx_skb(dev, slot, 0);
1663 goto next;
1664 }
1665
1666 if (received) {
1667 DBG2(dev, "rx %d BDs" NL, received);
1668 dev->rx_slot = slot;
1669 }
1670
1671 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1672 mb();
1673 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1674 DBG2(dev, "rx restart" NL);
1675 received = 0;
1676 goto again;
1677 }
1678
1679 if (dev->rx_sg_skb) {
1680 DBG2(dev, "dropping partial rx packet" NL);
1681 ++dev->estats.rx_dropped_error;
1682 dev_kfree_skb(dev->rx_sg_skb);
1683 dev->rx_sg_skb = NULL;
1684 }
1685
1686 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1687 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1688 emac_rx_enable(dev);
1689 dev->rx_slot = 0;
1690 }
1691 return received;
1692}
1693
1694/* NAPI poll context */
1695static int emac_peek_rx(void *param)
1696{
1697 struct emac_instance *dev = param;
1698
1699 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1700}
1701
1702/* NAPI poll context */
1703static int emac_peek_rx_sg(void *param)
1704{
1705 struct emac_instance *dev = param;
1706
1707 int slot = dev->rx_slot;
1708 while (1) {
1709 u16 ctrl = dev->rx_desc[slot].ctrl;
1710 if (ctrl & MAL_RX_CTRL_EMPTY)
1711 return 0;
1712 else if (ctrl & MAL_RX_CTRL_LAST)
1713 return 1;
1714
1715 slot = (slot + 1) % NUM_RX_BUFF;
1716
1717 /* I'm just being paranoid here :) */
1718 if (unlikely(slot == dev->rx_slot))
1719 return 0;
1720 }
1721}
1722
1723/* Hard IRQ */
1724static void emac_rxde(void *param)
1725{
1726 struct emac_instance *dev = param;
1727
1728 ++dev->estats.rx_stopped;
1729 emac_rx_disable_async(dev);
1730}
1731
1732/* Hard IRQ */
1733static irqreturn_t emac_irq(int irq, void *dev_instance)
1734{
1735 struct emac_instance *dev = dev_instance;
1736 struct emac_regs __iomem *p = dev->emacp;
1737 struct emac_error_stats *st = &dev->estats;
1738 u32 isr;
1739
1740 spin_lock(&dev->lock);
1741
1742 isr = in_be32(&p->isr);
1743 out_be32(&p->isr, isr);
1744
1745 DBG(dev, "isr = %08x" NL, isr);
1746
1747 if (isr & EMAC4_ISR_TXPE)
1748 ++st->tx_parity;
1749 if (isr & EMAC4_ISR_RXPE)
1750 ++st->rx_parity;
1751 if (isr & EMAC4_ISR_TXUE)
1752 ++st->tx_underrun;
1753 if (isr & EMAC4_ISR_RXOE)
1754 ++st->rx_fifo_overrun;
1755 if (isr & EMAC_ISR_OVR)
1756 ++st->rx_overrun;
1757 if (isr & EMAC_ISR_BP)
1758 ++st->rx_bad_packet;
1759 if (isr & EMAC_ISR_RP)
1760 ++st->rx_runt_packet;
1761 if (isr & EMAC_ISR_SE)
1762 ++st->rx_short_event;
1763 if (isr & EMAC_ISR_ALE)
1764 ++st->rx_alignment_error;
1765 if (isr & EMAC_ISR_BFCS)
1766 ++st->rx_bad_fcs;
1767 if (isr & EMAC_ISR_PTLE)
1768 ++st->rx_packet_too_long;
1769 if (isr & EMAC_ISR_ORE)
1770 ++st->rx_out_of_range;
1771 if (isr & EMAC_ISR_IRE)
1772 ++st->rx_in_range;
1773 if (isr & EMAC_ISR_SQE)
1774 ++st->tx_sqe;
1775 if (isr & EMAC_ISR_TE)
1776 ++st->tx_errors;
1777
1778 spin_unlock(&dev->lock);
1779
1780 return IRQ_HANDLED;
1781}
1782
1783static struct net_device_stats *emac_stats(struct net_device *ndev)
1784{
1785 struct emac_instance *dev = netdev_priv(ndev);
1786 struct emac_stats *st = &dev->stats;
1787 struct emac_error_stats *est = &dev->estats;
1788 struct net_device_stats *nst = &dev->nstats;
1789 unsigned long flags;
1790
1791 DBG2(dev, "stats" NL);
1792
1793 /* Compute "legacy" statistics */
1794 spin_lock_irqsave(&dev->lock, flags);
1795 nst->rx_packets = (unsigned long)st->rx_packets;
1796 nst->rx_bytes = (unsigned long)st->rx_bytes;
1797 nst->tx_packets = (unsigned long)st->tx_packets;
1798 nst->tx_bytes = (unsigned long)st->tx_bytes;
1799 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1800 est->rx_dropped_error +
1801 est->rx_dropped_resize +
1802 est->rx_dropped_mtu);
1803 nst->tx_dropped = (unsigned long)est->tx_dropped;
1804
1805 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1806 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1807 est->rx_fifo_overrun +
1808 est->rx_overrun);
1809 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1810 est->rx_alignment_error);
1811 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1812 est->rx_bad_fcs);
1813 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1814 est->rx_bd_short_event +
1815 est->rx_bd_packet_too_long +
1816 est->rx_bd_out_of_range +
1817 est->rx_bd_in_range +
1818 est->rx_runt_packet +
1819 est->rx_short_event +
1820 est->rx_packet_too_long +
1821 est->rx_out_of_range +
1822 est->rx_in_range);
1823
1824 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1825 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1826 est->tx_underrun);
1827 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1828 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1829 est->tx_bd_excessive_collisions +
1830 est->tx_bd_late_collision +
1831 est->tx_bd_multple_collisions);
1832 spin_unlock_irqrestore(&dev->lock, flags);
1833 return nst;
1834}
1835
1836static struct mal_commac_ops emac_commac_ops = {
1837 .poll_tx = &emac_poll_tx,
1838 .poll_rx = &emac_poll_rx,
1839 .peek_rx = &emac_peek_rx,
1840 .rxde = &emac_rxde,
1841};
1842
1843static struct mal_commac_ops emac_commac_sg_ops = {
1844 .poll_tx = &emac_poll_tx,
1845 .poll_rx = &emac_poll_rx,
1846 .peek_rx = &emac_peek_rx_sg,
1847 .rxde = &emac_rxde,
1848};
1849
1850/* Ethtool support */
1851static int emac_ethtool_get_settings(struct net_device *ndev,
1852 struct ethtool_cmd *cmd)
1853{
1854 struct emac_instance *dev = netdev_priv(ndev);
1855
1856 cmd->supported = dev->phy.features;
1857 cmd->port = PORT_MII;
1858 cmd->phy_address = dev->phy.address;
1859 cmd->transceiver =
1860 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1861
1862 mutex_lock(&dev->link_lock);
1863 cmd->advertising = dev->phy.advertising;
1864 cmd->autoneg = dev->phy.autoneg;
1865 cmd->speed = dev->phy.speed;
1866 cmd->duplex = dev->phy.duplex;
1867 mutex_unlock(&dev->link_lock);
1868
1869 return 0;
1870}
1871
1872static int emac_ethtool_set_settings(struct net_device *ndev,
1873 struct ethtool_cmd *cmd)
1874{
1875 struct emac_instance *dev = netdev_priv(ndev);
1876 u32 f = dev->phy.features;
1877
1878 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1879 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1880
1881 /* Basic sanity checks */
1882 if (dev->phy.address < 0)
1883 return -EOPNOTSUPP;
1884 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1885 return -EINVAL;
1886 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1887 return -EINVAL;
1888 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1889 return -EINVAL;
1890
1891 if (cmd->autoneg == AUTONEG_DISABLE) {
1892 switch (cmd->speed) {
1893 case SPEED_10:
1894 if (cmd->duplex == DUPLEX_HALF
1895 && !(f & SUPPORTED_10baseT_Half))
1896 return -EINVAL;
1897 if (cmd->duplex == DUPLEX_FULL
1898 && !(f & SUPPORTED_10baseT_Full))
1899 return -EINVAL;
1900 break;
1901 case SPEED_100:
1902 if (cmd->duplex == DUPLEX_HALF
1903 && !(f & SUPPORTED_100baseT_Half))
1904 return -EINVAL;
1905 if (cmd->duplex == DUPLEX_FULL
1906 && !(f & SUPPORTED_100baseT_Full))
1907 return -EINVAL;
1908 break;
1909 case SPEED_1000:
1910 if (cmd->duplex == DUPLEX_HALF
1911 && !(f & SUPPORTED_1000baseT_Half))
1912 return -EINVAL;
1913 if (cmd->duplex == DUPLEX_FULL
1914 && !(f & SUPPORTED_1000baseT_Full))
1915 return -EINVAL;
1916 break;
1917 default:
1918 return -EINVAL;
1919 }
1920
1921 mutex_lock(&dev->link_lock);
1922 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1923 cmd->duplex);
1924 mutex_unlock(&dev->link_lock);
1925
1926 } else {
1927 if (!(f & SUPPORTED_Autoneg))
1928 return -EINVAL;
1929
1930 mutex_lock(&dev->link_lock);
1931 dev->phy.def->ops->setup_aneg(&dev->phy,
1932 (cmd->advertising & f) |
1933 (dev->phy.advertising &
1934 (ADVERTISED_Pause |
1935 ADVERTISED_Asym_Pause)));
1936 mutex_unlock(&dev->link_lock);
1937 }
1938 emac_force_link_update(dev);
1939
1940 return 0;
1941}
1942
1943static void emac_ethtool_get_ringparam(struct net_device *ndev,
1944 struct ethtool_ringparam *rp)
1945{
1946 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1947 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1948}
1949
1950static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1951 struct ethtool_pauseparam *pp)
1952{
1953 struct emac_instance *dev = netdev_priv(ndev);
1954
1955 mutex_lock(&dev->link_lock);
1956 if ((dev->phy.features & SUPPORTED_Autoneg) &&
1957 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1958 pp->autoneg = 1;
1959
1960 if (dev->phy.duplex == DUPLEX_FULL) {
1961 if (dev->phy.pause)
1962 pp->rx_pause = pp->tx_pause = 1;
1963 else if (dev->phy.asym_pause)
1964 pp->tx_pause = 1;
1965 }
1966 mutex_unlock(&dev->link_lock);
1967}
1968
1969static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1970{
1971 struct emac_instance *dev = netdev_priv(ndev);
1972
eb4d84f1 1973 return dev->tah_dev != NULL;
1d3bb996
DG
1974}
1975
1976static int emac_get_regs_len(struct emac_instance *dev)
1977{
1978 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1979 return sizeof(struct emac_ethtool_regs_subhdr) +
1980 EMAC4_ETHTOOL_REGS_SIZE;
1981 else
1982 return sizeof(struct emac_ethtool_regs_subhdr) +
1983 EMAC_ETHTOOL_REGS_SIZE;
1984}
1985
1986static int emac_ethtool_get_regs_len(struct net_device *ndev)
1987{
1988 struct emac_instance *dev = netdev_priv(ndev);
1989 int size;
1990
1991 size = sizeof(struct emac_ethtool_regs_hdr) +
1992 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
1993 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
1994 size += zmii_get_regs_len(dev->zmii_dev);
1995 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
1996 size += rgmii_get_regs_len(dev->rgmii_dev);
1997 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1998 size += tah_get_regs_len(dev->tah_dev);
1999
2000 return size;
2001}
2002
2003static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2004{
2005 struct emac_ethtool_regs_subhdr *hdr = buf;
2006
2007 hdr->index = dev->cell_index;
2008 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2009 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2010 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE);
2011 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE);
2012 } else {
2013 hdr->version = EMAC_ETHTOOL_REGS_VER;
2014 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
2015 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
2016 }
2017}
2018
2019static void emac_ethtool_get_regs(struct net_device *ndev,
2020 struct ethtool_regs *regs, void *buf)
2021{
2022 struct emac_instance *dev = netdev_priv(ndev);
2023 struct emac_ethtool_regs_hdr *hdr = buf;
2024
2025 hdr->components = 0;
2026 buf = hdr + 1;
2027
2028 buf = mal_dump_regs(dev->mal, buf);
2029 buf = emac_dump_regs(dev, buf);
2030 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2031 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2032 buf = zmii_dump_regs(dev->zmii_dev, buf);
2033 }
2034 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2035 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2036 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2037 }
2038 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2039 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2040 buf = tah_dump_regs(dev->tah_dev, buf);
2041 }
2042}
2043
2044static int emac_ethtool_nway_reset(struct net_device *ndev)
2045{
2046 struct emac_instance *dev = netdev_priv(ndev);
2047 int res = 0;
2048
2049 DBG(dev, "nway_reset" NL);
2050
2051 if (dev->phy.address < 0)
2052 return -EOPNOTSUPP;
2053
2054 mutex_lock(&dev->link_lock);
2055 if (!dev->phy.autoneg) {
2056 res = -EINVAL;
2057 goto out;
2058 }
2059
2060 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2061 out:
2062 mutex_unlock(&dev->link_lock);
2063 emac_force_link_update(dev);
2064 return res;
2065}
2066
2067static int emac_ethtool_get_stats_count(struct net_device *ndev)
2068{
2069 return EMAC_ETHTOOL_STATS_COUNT;
2070}
2071
2072static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2073 u8 * buf)
2074{
2075 if (stringset == ETH_SS_STATS)
2076 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2077}
2078
2079static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2080 struct ethtool_stats *estats,
2081 u64 * tmp_stats)
2082{
2083 struct emac_instance *dev = netdev_priv(ndev);
2084
2085 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2086 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2087 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2088}
2089
2090static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2091 struct ethtool_drvinfo *info)
2092{
2093 struct emac_instance *dev = netdev_priv(ndev);
2094
2095 strcpy(info->driver, "ibm_emac");
2096 strcpy(info->version, DRV_VERSION);
2097 info->fw_version[0] = '\0';
2098 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2099 dev->cell_index, dev->ofdev->node->full_name);
2100 info->n_stats = emac_ethtool_get_stats_count(ndev);
2101 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2102}
2103
2104static const struct ethtool_ops emac_ethtool_ops = {
2105 .get_settings = emac_ethtool_get_settings,
2106 .set_settings = emac_ethtool_set_settings,
2107 .get_drvinfo = emac_ethtool_get_drvinfo,
2108
2109 .get_regs_len = emac_ethtool_get_regs_len,
2110 .get_regs = emac_ethtool_get_regs,
2111
2112 .nway_reset = emac_ethtool_nway_reset,
2113
2114 .get_ringparam = emac_ethtool_get_ringparam,
2115 .get_pauseparam = emac_ethtool_get_pauseparam,
2116
2117 .get_rx_csum = emac_ethtool_get_rx_csum,
2118
2119 .get_strings = emac_ethtool_get_strings,
2120 .get_stats_count = emac_ethtool_get_stats_count,
2121 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2122
2123 .get_link = ethtool_op_get_link,
2124 .get_tx_csum = ethtool_op_get_tx_csum,
2125 .get_sg = ethtool_op_get_sg,
2126};
2127
2128static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2129{
2130 struct emac_instance *dev = netdev_priv(ndev);
2131 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
2132
2133 DBG(dev, "ioctl %08x" NL, cmd);
2134
2135 if (dev->phy.address < 0)
2136 return -EOPNOTSUPP;
2137
2138 switch (cmd) {
2139 case SIOCGMIIPHY:
2140 case SIOCDEVPRIVATE:
2141 data[0] = dev->phy.address;
2142 /* Fall through */
2143 case SIOCGMIIREG:
2144 case SIOCDEVPRIVATE + 1:
2145 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
2146 return 0;
2147
2148 case SIOCSMIIREG:
2149 case SIOCDEVPRIVATE + 2:
2150 if (!capable(CAP_NET_ADMIN))
2151 return -EPERM;
2152 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
2153 return 0;
2154 default:
2155 return -EOPNOTSUPP;
2156 }
2157}
2158
2159struct emac_depentry {
2160 u32 phandle;
2161 struct device_node *node;
2162 struct of_device *ofdev;
2163 void *drvdata;
2164};
2165
2166#define EMAC_DEP_MAL_IDX 0
2167#define EMAC_DEP_ZMII_IDX 1
2168#define EMAC_DEP_RGMII_IDX 2
2169#define EMAC_DEP_TAH_IDX 3
2170#define EMAC_DEP_MDIO_IDX 4
2171#define EMAC_DEP_PREV_IDX 5
2172#define EMAC_DEP_COUNT 6
2173
2174static int __devinit emac_check_deps(struct emac_instance *dev,
2175 struct emac_depentry *deps)
2176{
2177 int i, there = 0;
2178 struct device_node *np;
2179
2180 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2181 /* no dependency on that item, allright */
2182 if (deps[i].phandle == 0) {
2183 there++;
2184 continue;
2185 }
2186 /* special case for blist as the dependency might go away */
2187 if (i == EMAC_DEP_PREV_IDX) {
2188 np = *(dev->blist - 1);
2189 if (np == NULL) {
2190 deps[i].phandle = 0;
2191 there++;
2192 continue;
2193 }
2194 if (deps[i].node == NULL)
2195 deps[i].node = of_node_get(np);
2196 }
2197 if (deps[i].node == NULL)
2198 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2199 if (deps[i].node == NULL)
2200 continue;
2201 if (deps[i].ofdev == NULL)
2202 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2203 if (deps[i].ofdev == NULL)
2204 continue;
2205 if (deps[i].drvdata == NULL)
2206 deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2207 if (deps[i].drvdata != NULL)
2208 there++;
2209 }
2210 return (there == EMAC_DEP_COUNT);
2211}
2212
2213static void emac_put_deps(struct emac_instance *dev)
2214{
2215 if (dev->mal_dev)
2216 of_dev_put(dev->mal_dev);
2217 if (dev->zmii_dev)
2218 of_dev_put(dev->zmii_dev);
2219 if (dev->rgmii_dev)
2220 of_dev_put(dev->rgmii_dev);
2221 if (dev->mdio_dev)
2222 of_dev_put(dev->mdio_dev);
2223 if (dev->tah_dev)
2224 of_dev_put(dev->tah_dev);
2225}
2226
2227static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2228 unsigned long action, void *data)
2229{
2230 /* We are only intereted in device addition */
2231 if (action == BUS_NOTIFY_BOUND_DRIVER)
2232 wake_up_all(&emac_probe_wait);
2233 return 0;
2234}
2235
2236static struct notifier_block emac_of_bus_notifier = {
2237 .notifier_call = emac_of_bus_notify
2238};
2239
2240static int __devinit emac_wait_deps(struct emac_instance *dev)
2241{
2242 struct emac_depentry deps[EMAC_DEP_COUNT];
2243 int i, err;
2244
2245 memset(&deps, 0, sizeof(deps));
2246
2247 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2248 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2249 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2250 if (dev->tah_ph)
2251 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2252 if (dev->mdio_ph)
2253 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2254 if (dev->blist && dev->blist > emac_boot_list)
2255 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2256 bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2257 wait_event_timeout(emac_probe_wait,
2258 emac_check_deps(dev, deps),
2259 EMAC_PROBE_DEP_TIMEOUT);
2260 bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2261 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2262 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2263 if (deps[i].node)
2264 of_node_put(deps[i].node);
2265 if (err && deps[i].ofdev)
2266 of_dev_put(deps[i].ofdev);
2267 }
2268 if (err == 0) {
2269 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2270 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2271 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2272 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2273 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2274 }
2275 if (deps[EMAC_DEP_PREV_IDX].ofdev)
2276 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2277 return err;
2278}
2279
2280static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2281 u32 *val, int fatal)
2282{
2283 int len;
2284 const u32 *prop = of_get_property(np, name, &len);
2285 if (prop == NULL || len < sizeof(u32)) {
2286 if (fatal)
2287 printk(KERN_ERR "%s: missing %s property\n",
2288 np->full_name, name);
2289 return -ENODEV;
2290 }
2291 *val = *prop;
2292 return 0;
2293}
2294
2295static int __devinit emac_init_phy(struct emac_instance *dev)
2296{
2297 struct device_node *np = dev->ofdev->node;
2298 struct net_device *ndev = dev->ndev;
2299 u32 phy_map, adv;
2300 int i;
2301
2302 dev->phy.dev = ndev;
2303 dev->phy.mode = dev->phy_mode;
2304
2305 /* PHY-less configuration.
2306 * XXX I probably should move these settings to the dev tree
2307 */
2308 if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2309 emac_reset(dev);
2310
2311 /* PHY-less configuration.
2312 * XXX I probably should move these settings to the dev tree
2313 */
2314 dev->phy.address = -1;
2315 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2316 dev->phy.pause = 1;
2317
2318 return 0;
2319 }
2320
2321 mutex_lock(&emac_phy_map_lock);
2322 phy_map = dev->phy_map | busy_phy_map;
2323
2324 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2325
2326 dev->phy.mdio_read = emac_mdio_read;
2327 dev->phy.mdio_write = emac_mdio_write;
2328
2329 /* Configure EMAC with defaults so we can at least use MDIO
2330 * This is needed mostly for 440GX
2331 */
2332 if (emac_phy_gpcs(dev->phy.mode)) {
2333 /* XXX
2334 * Make GPCS PHY address equal to EMAC index.
2335 * We probably should take into account busy_phy_map
2336 * and/or phy_map here.
2337 *
2338 * Note that the busy_phy_map is currently global
2339 * while it should probably be per-ASIC...
2340 */
2341 dev->phy.address = dev->cell_index;
2342 }
2343
2344 emac_configure(dev);
2345
2346 if (dev->phy_address != 0xffffffff)
2347 phy_map = ~(1 << dev->phy_address);
2348
2349 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2350 if (!(phy_map & 1)) {
2351 int r;
2352 busy_phy_map |= 1 << i;
2353
2354 /* Quick check if there is a PHY at the address */
2355 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2356 if (r == 0xffff || r < 0)
2357 continue;
2358 if (!emac_mii_phy_probe(&dev->phy, i))
2359 break;
2360 }
2361 mutex_unlock(&emac_phy_map_lock);
2362 if (i == 0x20) {
2363 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2364 return -ENXIO;
2365 }
2366
2367 /* Init PHY */
2368 if (dev->phy.def->ops->init)
2369 dev->phy.def->ops->init(&dev->phy);
2370
2371 /* Disable any PHY features not supported by the platform */
2372 dev->phy.def->features &= ~dev->phy_feat_exc;
2373
2374 /* Setup initial link parameters */
2375 if (dev->phy.features & SUPPORTED_Autoneg) {
2376 adv = dev->phy.features;
2377 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2378 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2379 /* Restart autonegotiation */
2380 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2381 } else {
2382 u32 f = dev->phy.def->features;
2383 int speed = SPEED_10, fd = DUPLEX_HALF;
2384
2385 /* Select highest supported speed/duplex */
2386 if (f & SUPPORTED_1000baseT_Full) {
2387 speed = SPEED_1000;
2388 fd = DUPLEX_FULL;
2389 } else if (f & SUPPORTED_1000baseT_Half)
2390 speed = SPEED_1000;
2391 else if (f & SUPPORTED_100baseT_Full) {
2392 speed = SPEED_100;
2393 fd = DUPLEX_FULL;
2394 } else if (f & SUPPORTED_100baseT_Half)
2395 speed = SPEED_100;
2396 else if (f & SUPPORTED_10baseT_Full)
2397 fd = DUPLEX_FULL;
2398
2399 /* Force link parameters */
2400 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2401 }
2402 return 0;
2403}
2404
2405static int __devinit emac_init_config(struct emac_instance *dev)
2406{
2407 struct device_node *np = dev->ofdev->node;
2408 const void *p;
2409 unsigned int plen;
2410 const char *pm, *phy_modes[] = {
2411 [PHY_MODE_NA] = "",
2412 [PHY_MODE_MII] = "mii",
2413 [PHY_MODE_RMII] = "rmii",
2414 [PHY_MODE_SMII] = "smii",
2415 [PHY_MODE_RGMII] = "rgmii",
2416 [PHY_MODE_TBI] = "tbi",
2417 [PHY_MODE_GMII] = "gmii",
2418 [PHY_MODE_RTBI] = "rtbi",
2419 [PHY_MODE_SGMII] = "sgmii",
2420 };
2421
2422 /* Read config from device-tree */
2423 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2424 return -ENXIO;
2425 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2426 return -ENXIO;
2427 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2428 return -ENXIO;
2429 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2430 return -ENXIO;
2431 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2432 dev->max_mtu = 1500;
2433 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2434 dev->rx_fifo_size = 2048;
2435 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2436 dev->tx_fifo_size = 2048;
2437 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2438 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2439 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2440 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2441 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2442 dev->phy_address = 0xffffffff;
2443 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2444 dev->phy_map = 0xffffffff;
2445 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2446 return -ENXIO;
2447 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2448 dev->tah_ph = 0;
2449 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
63b6cad7 2450 dev->tah_port = 0;
1d3bb996
DG
2451 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2452 dev->mdio_ph = 0;
2453 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2454 dev->zmii_ph = 0;;
2455 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2456 dev->zmii_port = 0xffffffff;;
2457 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2458 dev->rgmii_ph = 0;;
2459 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2460 dev->rgmii_port = 0xffffffff;;
2461 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2462 dev->fifo_entry_size = 16;
2463 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2464 dev->mal_burst_size = 256;
2465
2466 /* PHY mode needs some decoding */
2467 dev->phy_mode = PHY_MODE_NA;
2468 pm = of_get_property(np, "phy-mode", &plen);
2469 if (pm != NULL) {
2470 int i;
2471 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2472 if (!strcasecmp(pm, phy_modes[i])) {
2473 dev->phy_mode = i;
2474 break;
2475 }
2476 }
2477
2478 /* Backward compat with non-final DT */
2479 if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2480 u32 nmode = *(const u32 *)pm;
2481 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2482 dev->phy_mode = nmode;
2483 }
2484
2485 /* Check EMAC version */
2486 if (of_device_is_compatible(np, "ibm,emac4"))
2487 dev->features |= EMAC_FTR_EMAC4;
bff713b5
BH
2488
2489 /* Fixup some feature bits based on the device tree */
2490 if (of_get_property(np, "has-inverted-stacr-oc", NULL))
1d3bb996 2491 dev->features |= EMAC_FTR_STACR_OC_INVERT;
bff713b5
BH
2492 if (of_get_property(np, "has-new-stacr-staopc", NULL))
2493 dev->features |= EMAC_FTR_HAS_NEW_STACR;
1d3bb996 2494
bff713b5
BH
2495 /* CAB lacks the appropriate properties */
2496 if (of_device_is_compatible(np, "ibm,emac-axon"))
2497 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2498 EMAC_FTR_STACR_OC_INVERT;
2499
2500 /* Enable TAH/ZMII/RGMII features as found */
1d3bb996
DG
2501 if (dev->tah_ph != 0) {
2502#ifdef CONFIG_IBM_NEW_EMAC_TAH
2503 dev->features |= EMAC_FTR_HAS_TAH;
2504#else
2505 printk(KERN_ERR "%s: TAH support not enabled !\n",
2506 np->full_name);
2507 return -ENXIO;
2508#endif
2509 }
2510
2511 if (dev->zmii_ph != 0) {
2512#ifdef CONFIG_IBM_NEW_EMAC_ZMII
2513 dev->features |= EMAC_FTR_HAS_ZMII;
2514#else
2515 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2516 np->full_name);
2517 return -ENXIO;
2518#endif
2519 }
2520
2521 if (dev->rgmii_ph != 0) {
2522#ifdef CONFIG_IBM_NEW_EMAC_RGMII
2523 dev->features |= EMAC_FTR_HAS_RGMII;
2524#else
2525 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2526 np->full_name);
2527 return -ENXIO;
2528#endif
2529 }
2530
2531 /* Read MAC-address */
2532 p = of_get_property(np, "local-mac-address", NULL);
2533 if (p == NULL) {
2534 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2535 np->full_name);
2536 return -ENXIO;
2537 }
2538 memcpy(dev->ndev->dev_addr, p, 6);
2539
2540 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2541 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2542 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2543 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2544 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2545
2546 return 0;
2547}
2548
2549static int __devinit emac_probe(struct of_device *ofdev,
2550 const struct of_device_id *match)
2551{
2552 struct net_device *ndev;
2553 struct emac_instance *dev;
2554 struct device_node *np = ofdev->node;
2555 struct device_node **blist = NULL;
2556 int err, i;
2557
3d722562
HB
2558 /* Skip unused/unwired EMACS */
2559 if (of_get_property(np, "unused", NULL))
2560 return -ENODEV;
2561
1d3bb996
DG
2562 /* Find ourselves in the bootlist if we are there */
2563 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2564 if (emac_boot_list[i] == np)
2565 blist = &emac_boot_list[i];
2566
2567 /* Allocate our net_device structure */
2568 err = -ENOMEM;
2569 ndev = alloc_etherdev(sizeof(struct emac_instance));
2570 if (!ndev) {
2571 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2572 np->full_name);
2573 goto err_gone;
2574 }
2575 dev = netdev_priv(ndev);
2576 dev->ndev = ndev;
2577 dev->ofdev = ofdev;
2578 dev->blist = blist;
1d3bb996
DG
2579 SET_NETDEV_DEV(ndev, &ofdev->dev);
2580
2581 /* Initialize some embedded data structures */
2582 mutex_init(&dev->mdio_lock);
2583 mutex_init(&dev->link_lock);
2584 spin_lock_init(&dev->lock);
2585 INIT_WORK(&dev->reset_work, emac_reset_work);
2586
2587 /* Init various config data based on device-tree */
2588 err = emac_init_config(dev);
2589 if (err != 0)
2590 goto err_free;
2591
2592 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2593 dev->emac_irq = irq_of_parse_and_map(np, 0);
2594 dev->wol_irq = irq_of_parse_and_map(np, 1);
2595 if (dev->emac_irq == NO_IRQ) {
2596 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2597 goto err_free;
2598 }
2599 ndev->irq = dev->emac_irq;
2600
2601 /* Map EMAC regs */
2602 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2603 printk(KERN_ERR "%s: Can't get registers address\n",
2604 np->full_name);
2605 goto err_irq_unmap;
2606 }
2607 // TODO : request_mem_region
2608 dev->emacp = ioremap(dev->rsrc_regs.start, sizeof(struct emac_regs));
2609 if (dev->emacp == NULL) {
2610 printk(KERN_ERR "%s: Can't map device registers!\n",
2611 np->full_name);
2612 err = -ENOMEM;
2613 goto err_irq_unmap;
2614 }
2615
2616 /* Wait for dependent devices */
2617 err = emac_wait_deps(dev);
2618 if (err) {
2619 printk(KERN_ERR
2620 "%s: Timeout waiting for dependent devices\n",
2621 np->full_name);
2622 /* display more info about what's missing ? */
2623 goto err_reg_unmap;
2624 }
2625 dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2626 if (dev->mdio_dev != NULL)
2627 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2628
2629 /* Register with MAL */
2630 dev->commac.ops = &emac_commac_ops;
2631 dev->commac.dev = dev;
2632 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2633 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2634 err = mal_register_commac(dev->mal, &dev->commac);
2635 if (err) {
2636 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2637 np->full_name, dev->mal_dev->node->full_name);
2638 goto err_rel_deps;
2639 }
2640 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2641 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2642
2643 /* Get pointers to BD rings */
2644 dev->tx_desc =
2645 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2646 dev->rx_desc =
2647 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2648
2649 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2650 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2651
2652 /* Clean rings */
2653 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2654 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2655
2656 /* Attach to ZMII, if needed */
2657 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2658 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2659 goto err_unreg_commac;
2660
2661 /* Attach to RGMII, if needed */
2662 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2663 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2664 goto err_detach_zmii;
2665
2666 /* Attach to TAH, if needed */
2667 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2668 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2669 goto err_detach_rgmii;
2670
2671 /* Set some link defaults before we can find out real parameters */
2672 dev->phy.speed = SPEED_100;
2673 dev->phy.duplex = DUPLEX_FULL;
2674 dev->phy.autoneg = AUTONEG_DISABLE;
2675 dev->phy.pause = dev->phy.asym_pause = 0;
2676 dev->stop_timeout = STOP_TIMEOUT_100;
2677 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2678
2679 /* Find PHY if any */
2680 err = emac_init_phy(dev);
2681 if (err != 0)
2682 goto err_detach_tah;
2683
2684 /* Fill in the driver function table */
2685 ndev->open = &emac_open;
2686#ifdef CONFIG_IBM_NEW_EMAC_TAH
2687 if (dev->tah_dev) {
2688 ndev->hard_start_xmit = &emac_start_xmit_sg;
2689 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2690 } else
2691#endif
2692 ndev->hard_start_xmit = &emac_start_xmit;
2693 ndev->tx_timeout = &emac_tx_timeout;
2694 ndev->watchdog_timeo = 5 * HZ;
2695 ndev->stop = &emac_close;
2696 ndev->get_stats = &emac_stats;
2697 ndev->set_multicast_list = &emac_set_multicast_list;
2698 ndev->do_ioctl = &emac_ioctl;
2699 if (emac_phy_supports_gige(dev->phy_mode)) {
2700 ndev->change_mtu = &emac_change_mtu;
2701 dev->commac.ops = &emac_commac_sg_ops;
2702 }
2703 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2704
2705 netif_carrier_off(ndev);
2706 netif_stop_queue(ndev);
2707
2708 err = register_netdev(ndev);
2709 if (err) {
2710 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2711 np->full_name, err);
2712 goto err_detach_tah;
2713 }
2714
2715 /* Set our drvdata last as we don't want them visible until we are
2716 * fully initialized
2717 */
2718 wmb();
2719 dev_set_drvdata(&ofdev->dev, dev);
2720
2721 /* There's a new kid in town ! Let's tell everybody */
2722 wake_up_all(&emac_probe_wait);
2723
2724
2725 printk(KERN_INFO
2726 "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2727 ndev->name, dev->cell_index, np->full_name,
2728 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2729 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2730
2731 if (dev->phy.address >= 0)
2732 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2733 dev->phy.def->name, dev->phy.address);
2734
2735 emac_dbg_register(dev);
2736
2737 /* Life is good */
2738 return 0;
2739
2740 /* I have a bad feeling about this ... */
2741
2742 err_detach_tah:
2743 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2744 tah_detach(dev->tah_dev, dev->tah_port);
2745 err_detach_rgmii:
2746 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2747 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2748 err_detach_zmii:
2749 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2750 zmii_detach(dev->zmii_dev, dev->zmii_port);
2751 err_unreg_commac:
2752 mal_unregister_commac(dev->mal, &dev->commac);
2753 err_rel_deps:
2754 emac_put_deps(dev);
2755 err_reg_unmap:
2756 iounmap(dev->emacp);
2757 err_irq_unmap:
2758 if (dev->wol_irq != NO_IRQ)
2759 irq_dispose_mapping(dev->wol_irq);
2760 if (dev->emac_irq != NO_IRQ)
2761 irq_dispose_mapping(dev->emac_irq);
2762 err_free:
2763 kfree(ndev);
2764 err_gone:
2765 /* if we were on the bootlist, remove us as we won't show up and
2766 * wake up all waiters to notify them in case they were waiting
2767 * on us
2768 */
2769 if (blist) {
2770 *blist = NULL;
2771 wake_up_all(&emac_probe_wait);
2772 }
2773 return err;
2774}
2775
2776static int __devexit emac_remove(struct of_device *ofdev)
2777{
2778 struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2779
2780 DBG(dev, "remove" NL);
2781
2782 dev_set_drvdata(&ofdev->dev, NULL);
2783
2784 unregister_netdev(dev->ndev);
2785
61dbcece
BH
2786 flush_scheduled_work();
2787
1d3bb996
DG
2788 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2789 tah_detach(dev->tah_dev, dev->tah_port);
2790 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2791 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2792 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2793 zmii_detach(dev->zmii_dev, dev->zmii_port);
2794
2795 mal_unregister_commac(dev->mal, &dev->commac);
2796 emac_put_deps(dev);
2797
2798 emac_dbg_unregister(dev);
2799 iounmap(dev->emacp);
2800
2801 if (dev->wol_irq != NO_IRQ)
2802 irq_dispose_mapping(dev->wol_irq);
2803 if (dev->emac_irq != NO_IRQ)
2804 irq_dispose_mapping(dev->emac_irq);
2805
2806 kfree(dev->ndev);
2807
2808 return 0;
2809}
2810
2811/* XXX Features in here should be replaced by properties... */
2812static struct of_device_id emac_match[] =
2813{
2814 {
2815 .type = "network",
2816 .compatible = "ibm,emac",
2817 },
2818 {
2819 .type = "network",
2820 .compatible = "ibm,emac4",
2821 },
2822 {},
2823};
2824
2825static struct of_platform_driver emac_driver = {
2826 .name = "emac",
2827 .match_table = emac_match,
2828
2829 .probe = emac_probe,
2830 .remove = emac_remove,
2831};
2832
2833static void __init emac_make_bootlist(void)
2834{
2835 struct device_node *np = NULL;
2836 int j, max, i = 0, k;
2837 int cell_indices[EMAC_BOOT_LIST_SIZE];
2838
2839 /* Collect EMACs */
2840 while((np = of_find_all_nodes(np)) != NULL) {
2841 const u32 *idx;
2842
2843 if (of_match_node(emac_match, np) == NULL)
2844 continue;
2845 if (of_get_property(np, "unused", NULL))
2846 continue;
2847 idx = of_get_property(np, "cell-index", NULL);
2848 if (idx == NULL)
2849 continue;
2850 cell_indices[i] = *idx;
2851 emac_boot_list[i++] = of_node_get(np);
2852 if (i >= EMAC_BOOT_LIST_SIZE) {
2853 of_node_put(np);
2854 break;
2855 }
2856 }
2857 max = i;
2858
2859 /* Bubble sort them (doh, what a creative algorithm :-) */
2860 for (i = 0; max > 1 && (i < (max - 1)); i++)
2861 for (j = i; j < max; j++) {
2862 if (cell_indices[i] > cell_indices[j]) {
2863 np = emac_boot_list[i];
2864 emac_boot_list[i] = emac_boot_list[j];
2865 emac_boot_list[j] = np;
2866 k = cell_indices[i];
2867 cell_indices[i] = cell_indices[j];
2868 cell_indices[j] = k;
2869 }
2870 }
2871}
2872
2873static int __init emac_init(void)
2874{
2875 int rc;
2876
2877 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2878
2879 /* Init debug stuff */
2880 emac_init_debug();
2881
2882 /* Build EMAC boot list */
2883 emac_make_bootlist();
2884
2885 /* Init submodules */
2886 rc = mal_init();
2887 if (rc)
2888 goto err;
2889 rc = zmii_init();
2890 if (rc)
2891 goto err_mal;
2892 rc = rgmii_init();
2893 if (rc)
2894 goto err_zmii;
2895 rc = tah_init();
2896 if (rc)
2897 goto err_rgmii;
2898 rc = of_register_platform_driver(&emac_driver);
2899 if (rc)
2900 goto err_tah;
2901
2902 return 0;
2903
2904 err_tah:
2905 tah_exit();
2906 err_rgmii:
2907 rgmii_exit();
2908 err_zmii:
2909 zmii_exit();
2910 err_mal:
2911 mal_exit();
2912 err:
2913 return rc;
2914}
2915
2916static void __exit emac_exit(void)
2917{
2918 int i;
2919
2920 of_unregister_platform_driver(&emac_driver);
2921
2922 tah_exit();
2923 rgmii_exit();
2924 zmii_exit();
2925 mal_exit();
2926 emac_fini_debug();
2927
2928 /* Destroy EMAC boot list */
2929 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2930 if (emac_boot_list[i])
2931 of_node_put(emac_boot_list[i]);
2932}
2933
2934module_init(emac_init);
2935module_exit(emac_exit);