]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/ibm_newemac/core.c
ibm_newemac: Fix section mismatch warnings
[net-next-2.6.git] / drivers / net / ibm_newemac / core.c
CommitLineData
1d3bb996
DG
1/*
2 * drivers/net/ibm_newemac/core.c
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller.
5 *
17cf803a
BH
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
8 *
9 * Based on the arch/ppc version of the driver:
10 *
1d3bb996
DG
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13 *
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 * Armin Kuster <akuster@mvista.com>
18 * Johnnie Peters <jpeters@mvista.com>
19 *
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
24 *
25 */
26
27#include <linux/sched.h>
28#include <linux/string.h>
29#include <linux/errno.h>
30#include <linux/delay.h>
31#include <linux/types.h>
32#include <linux/pci.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/crc32.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#include <linux/bitops.h>
39#include <linux/workqueue.h>
283029d1 40#include <linux/of.h>
1d3bb996
DG
41
42#include <asm/processor.h>
43#include <asm/io.h>
44#include <asm/dma.h>
45#include <asm/uaccess.h>
46
47#include "core.h"
48
49/*
50 * Lack of dma_unmap_???? calls is intentional.
51 *
52 * API-correct usage requires additional support state information to be
53 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
54 * EMAC design (e.g. TX buffer passed from network stack can be split into
55 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
56 * maintaining such information will add additional overhead.
57 * Current DMA API implementation for 4xx processors only ensures cache coherency
58 * and dma_unmap_???? routines are empty and are likely to stay this way.
59 * I decided to omit dma_unmap_??? calls because I don't want to add additional
60 * complexity just for the sake of following some abstract API, when it doesn't
61 * add any real benefit to the driver. I understand that this decision maybe
62 * controversial, but I really tried to make code API-correct and efficient
63 * at the same time and didn't come up with code I liked :(. --ebs
64 */
65
66#define DRV_NAME "emac"
67#define DRV_VERSION "3.54"
68#define DRV_DESC "PPC 4xx OCP EMAC driver"
69
70MODULE_DESCRIPTION(DRV_DESC);
71MODULE_AUTHOR
72 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
73MODULE_LICENSE("GPL");
74
75/*
76 * PPC64 doesn't (yet) have a cacheable_memcpy
77 */
78#ifdef CONFIG_PPC64
79#define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
80#endif
81
82/* minimum number of free TX descriptors required to wake up TX process */
83#define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
84
85/* If packet size is less than this number, we allocate small skb and copy packet
86 * contents into it instead of just sending original big skb up
87 */
88#define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
89
90/* Since multiple EMACs share MDIO lines in various ways, we need
91 * to avoid re-using the same PHY ID in cases where the arch didn't
92 * setup precise phy_map entries
93 *
94 * XXX This is something that needs to be reworked as we can have multiple
95 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
96 * probably require in that case to have explicit PHY IDs in the device-tree
97 */
98static u32 busy_phy_map;
99static DEFINE_MUTEX(emac_phy_map_lock);
100
101/* This is the wait queue used to wait on any event related to probe, that
102 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
103 */
104static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
105
106/* Having stable interface names is a doomed idea. However, it would be nice
107 * if we didn't have completely random interface names at boot too :-) It's
108 * just a matter of making everybody's life easier. Since we are doing
109 * threaded probing, it's a bit harder though. The base idea here is that
110 * we make up a list of all emacs in the device-tree before we register the
111 * driver. Every emac will then wait for the previous one in the list to
112 * initialize before itself. We should also keep that list ordered by
113 * cell_index.
114 * That list is only 4 entries long, meaning that additional EMACs don't
115 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
116 */
117
118#define EMAC_BOOT_LIST_SIZE 4
119static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
120
121/* How long should I wait for dependent devices ? */
122#define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
123
124/* I don't want to litter system log with timeout errors
125 * when we have brain-damaged PHY.
126 */
127static inline void emac_report_timeout_error(struct emac_instance *dev,
128 const char *error)
129{
130 if (net_ratelimit())
131 printk(KERN_ERR "%s: %s\n", dev->ndev->name, error);
132}
133
134/* PHY polling intervals */
135#define PHY_POLL_LINK_ON HZ
136#define PHY_POLL_LINK_OFF (HZ / 5)
137
138/* Graceful stop timeouts in us.
139 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
140 */
141#define STOP_TIMEOUT_10 1230
142#define STOP_TIMEOUT_100 124
143#define STOP_TIMEOUT_1000 13
144#define STOP_TIMEOUT_1000_JUMBO 73
145
4373c932
PB
146static unsigned char default_mcast_addr[] = {
147 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
148};
149
1d3bb996
DG
150/* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
151static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
152 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
153 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
154 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
155 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
156 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
157 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
158 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
159 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
160 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
161 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
162 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
163 "tx_bd_excessive_collisions", "tx_bd_late_collision",
164 "tx_bd_multple_collisions", "tx_bd_single_collision",
165 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
166 "tx_errors"
167};
168
169static irqreturn_t emac_irq(int irq, void *dev_instance);
170static void emac_clean_tx_ring(struct emac_instance *dev);
171static void __emac_set_multicast_list(struct emac_instance *dev);
172
173static inline int emac_phy_supports_gige(int phy_mode)
174{
175 return phy_mode == PHY_MODE_GMII ||
176 phy_mode == PHY_MODE_RGMII ||
177 phy_mode == PHY_MODE_TBI ||
178 phy_mode == PHY_MODE_RTBI;
179}
180
181static inline int emac_phy_gpcs(int phy_mode)
182{
183 return phy_mode == PHY_MODE_TBI ||
184 phy_mode == PHY_MODE_RTBI;
185}
186
187static inline void emac_tx_enable(struct emac_instance *dev)
188{
189 struct emac_regs __iomem *p = dev->emacp;
190 u32 r;
191
192 DBG(dev, "tx_enable" NL);
193
194 r = in_be32(&p->mr0);
195 if (!(r & EMAC_MR0_TXE))
196 out_be32(&p->mr0, r | EMAC_MR0_TXE);
197}
198
199static void emac_tx_disable(struct emac_instance *dev)
200{
201 struct emac_regs __iomem *p = dev->emacp;
202 u32 r;
203
204 DBG(dev, "tx_disable" NL);
205
206 r = in_be32(&p->mr0);
207 if (r & EMAC_MR0_TXE) {
208 int n = dev->stop_timeout;
209 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
210 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
211 udelay(1);
212 --n;
213 }
214 if (unlikely(!n))
215 emac_report_timeout_error(dev, "TX disable timeout");
216 }
217}
218
219static void emac_rx_enable(struct emac_instance *dev)
220{
221 struct emac_regs __iomem *p = dev->emacp;
222 u32 r;
223
224 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
225 goto out;
226
227 DBG(dev, "rx_enable" NL);
228
229 r = in_be32(&p->mr0);
230 if (!(r & EMAC_MR0_RXE)) {
231 if (unlikely(!(r & EMAC_MR0_RXI))) {
232 /* Wait if previous async disable is still in progress */
233 int n = dev->stop_timeout;
234 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
235 udelay(1);
236 --n;
237 }
238 if (unlikely(!n))
239 emac_report_timeout_error(dev,
240 "RX disable timeout");
241 }
242 out_be32(&p->mr0, r | EMAC_MR0_RXE);
243 }
244 out:
245 ;
246}
247
248static void emac_rx_disable(struct emac_instance *dev)
249{
250 struct emac_regs __iomem *p = dev->emacp;
251 u32 r;
252
253 DBG(dev, "rx_disable" NL);
254
255 r = in_be32(&p->mr0);
256 if (r & EMAC_MR0_RXE) {
257 int n = dev->stop_timeout;
258 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
259 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
260 udelay(1);
261 --n;
262 }
263 if (unlikely(!n))
264 emac_report_timeout_error(dev, "RX disable timeout");
265 }
266}
267
268static inline void emac_netif_stop(struct emac_instance *dev)
269{
270 netif_tx_lock_bh(dev->ndev);
271 dev->no_mcast = 1;
272 netif_tx_unlock_bh(dev->ndev);
273 dev->ndev->trans_start = jiffies; /* prevent tx timeout */
274 mal_poll_disable(dev->mal, &dev->commac);
275 netif_tx_disable(dev->ndev);
276}
277
278static inline void emac_netif_start(struct emac_instance *dev)
279{
280 netif_tx_lock_bh(dev->ndev);
281 dev->no_mcast = 0;
282 if (dev->mcast_pending && netif_running(dev->ndev))
283 __emac_set_multicast_list(dev);
284 netif_tx_unlock_bh(dev->ndev);
285
286 netif_wake_queue(dev->ndev);
287
288 /* NOTE: unconditional netif_wake_queue is only appropriate
289 * so long as all callers are assured to have free tx slots
290 * (taken from tg3... though the case where that is wrong is
291 * not terribly harmful)
292 */
293 mal_poll_enable(dev->mal, &dev->commac);
294}
295
296static inline void emac_rx_disable_async(struct emac_instance *dev)
297{
298 struct emac_regs __iomem *p = dev->emacp;
299 u32 r;
300
301 DBG(dev, "rx_disable_async" NL);
302
303 r = in_be32(&p->mr0);
304 if (r & EMAC_MR0_RXE)
305 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
306}
307
308static int emac_reset(struct emac_instance *dev)
309{
310 struct emac_regs __iomem *p = dev->emacp;
311 int n = 20;
312
313 DBG(dev, "reset" NL);
314
315 if (!dev->reset_failed) {
316 /* 40x erratum suggests stopping RX channel before reset,
317 * we stop TX as well
318 */
319 emac_rx_disable(dev);
320 emac_tx_disable(dev);
321 }
322
323 out_be32(&p->mr0, EMAC_MR0_SRST);
324 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
325 --n;
326
327 if (n) {
328 dev->reset_failed = 0;
329 return 0;
330 } else {
331 emac_report_timeout_error(dev, "reset timeout");
332 dev->reset_failed = 1;
333 return -ETIMEDOUT;
334 }
335}
336
337static void emac_hash_mc(struct emac_instance *dev)
338{
339 struct emac_regs __iomem *p = dev->emacp;
340 u16 gaht[4] = { 0 };
341 struct dev_mc_list *dmi;
342
343 DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
344
345 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
346 int bit;
347 DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
348 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
349 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
350
351 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
352 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
353 }
354 out_be32(&p->gaht1, gaht[0]);
355 out_be32(&p->gaht2, gaht[1]);
356 out_be32(&p->gaht3, gaht[2]);
357 out_be32(&p->gaht4, gaht[3]);
358}
359
360static inline u32 emac_iff2rmr(struct net_device *ndev)
361{
362 struct emac_instance *dev = netdev_priv(ndev);
363 u32 r;
364
365 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
366
367 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
368 r |= EMAC4_RMR_BASE;
369 else
370 r |= EMAC_RMR_BASE;
371
372 if (ndev->flags & IFF_PROMISC)
373 r |= EMAC_RMR_PME;
374 else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
375 r |= EMAC_RMR_PMME;
376 else if (ndev->mc_count > 0)
377 r |= EMAC_RMR_MAE;
378
379 return r;
380}
381
382static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
383{
384 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
385
386 DBG2(dev, "__emac_calc_base_mr1" NL);
387
388 switch(tx_size) {
389 case 2048:
390 ret |= EMAC_MR1_TFS_2K;
391 break;
392 default:
393 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
394 dev->ndev->name, tx_size);
395 }
396
397 switch(rx_size) {
398 case 16384:
399 ret |= EMAC_MR1_RFS_16K;
400 break;
401 case 4096:
402 ret |= EMAC_MR1_RFS_4K;
403 break;
404 default:
405 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
406 dev->ndev->name, rx_size);
407 }
408
409 return ret;
410}
411
412static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
413{
414 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
4696c3c4 415 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
1d3bb996
DG
416
417 DBG2(dev, "__emac4_calc_base_mr1" NL);
418
419 switch(tx_size) {
420 case 4096:
421 ret |= EMAC4_MR1_TFS_4K;
422 break;
423 case 2048:
424 ret |= EMAC4_MR1_TFS_2K;
425 break;
426 default:
427 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
428 dev->ndev->name, tx_size);
429 }
430
431 switch(rx_size) {
432 case 16384:
433 ret |= EMAC4_MR1_RFS_16K;
434 break;
435 case 4096:
436 ret |= EMAC4_MR1_RFS_4K;
437 break;
438 case 2048:
439 ret |= EMAC4_MR1_RFS_2K;
440 break;
441 default:
442 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
443 dev->ndev->name, rx_size);
444 }
445
446 return ret;
447}
448
449static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
450{
451 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
452 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
453 __emac_calc_base_mr1(dev, tx_size, rx_size);
454}
455
456static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
457{
458 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
459 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
460 else
461 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
462}
463
464static inline u32 emac_calc_rwmr(struct emac_instance *dev,
465 unsigned int low, unsigned int high)
466{
467 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
468 return (low << 22) | ( (high & 0x3ff) << 6);
469 else
470 return (low << 23) | ( (high & 0x1ff) << 7);
471}
472
473static int emac_configure(struct emac_instance *dev)
474{
475 struct emac_regs __iomem *p = dev->emacp;
476 struct net_device *ndev = dev->ndev;
911b237d 477 int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
1d3bb996
DG
478 u32 r, mr1 = 0;
479
480 DBG(dev, "configure" NL);
481
911b237d
BH
482 if (!link) {
483 out_be32(&p->mr1, in_be32(&p->mr1)
484 | EMAC_MR1_FDE | EMAC_MR1_ILE);
485 udelay(100);
486 } else if (emac_reset(dev) < 0)
1d3bb996
DG
487 return -ETIMEDOUT;
488
489 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
490 tah_reset(dev->tah_dev);
491
911b237d
BH
492 DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
493 link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
1d3bb996
DG
494
495 /* Default fifo sizes */
496 tx_size = dev->tx_fifo_size;
497 rx_size = dev->rx_fifo_size;
498
911b237d
BH
499 /* No link, force loopback */
500 if (!link)
501 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
502
1d3bb996 503 /* Check for full duplex */
911b237d 504 else if (dev->phy.duplex == DUPLEX_FULL)
1d3bb996
DG
505 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
506
507 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
508 dev->stop_timeout = STOP_TIMEOUT_10;
509 switch (dev->phy.speed) {
510 case SPEED_1000:
511 if (emac_phy_gpcs(dev->phy.mode)) {
512 mr1 |= EMAC_MR1_MF_1000GPCS |
513 EMAC_MR1_MF_IPPA(dev->phy.address);
514
515 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
516 * identify this GPCS PHY later.
517 */
518 out_be32(&p->ipcr, 0xdeadbeef);
519 } else
520 mr1 |= EMAC_MR1_MF_1000;
521
522 /* Extended fifo sizes */
523 tx_size = dev->tx_fifo_size_gige;
524 rx_size = dev->rx_fifo_size_gige;
525
526 if (dev->ndev->mtu > ETH_DATA_LEN) {
f34ebab6
SR
527 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
528 mr1 |= EMAC4_MR1_JPSM;
529 else
530 mr1 |= EMAC_MR1_JPSM;
1d3bb996
DG
531 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
532 } else
533 dev->stop_timeout = STOP_TIMEOUT_1000;
534 break;
535 case SPEED_100:
536 mr1 |= EMAC_MR1_MF_100;
537 dev->stop_timeout = STOP_TIMEOUT_100;
538 break;
539 default: /* make gcc happy */
540 break;
541 }
542
543 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
544 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
545 dev->phy.speed);
546 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
547 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
548
549 /* on 40x erratum forces us to NOT use integrated flow control,
550 * let's hope it works on 44x ;)
551 */
552 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
553 dev->phy.duplex == DUPLEX_FULL) {
554 if (dev->phy.pause)
555 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
556 else if (dev->phy.asym_pause)
557 mr1 |= EMAC_MR1_APP;
558 }
559
560 /* Add base settings & fifo sizes & program MR1 */
561 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
562 out_be32(&p->mr1, mr1);
563
564 /* Set individual MAC address */
565 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
566 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
567 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
568 ndev->dev_addr[5]);
569
570 /* VLAN Tag Protocol ID */
571 out_be32(&p->vtpid, 0x8100);
572
573 /* Receive mode register */
574 r = emac_iff2rmr(ndev);
575 if (r & EMAC_RMR_MAE)
576 emac_hash_mc(dev);
577 out_be32(&p->rmr, r);
578
579 /* FIFOs thresholds */
580 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
581 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
582 tx_size / 2 / dev->fifo_entry_size);
583 else
584 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
585 tx_size / 2 / dev->fifo_entry_size);
586 out_be32(&p->tmr1, r);
587 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
588
589 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
590 there should be still enough space in FIFO to allow the our link
591 partner time to process this frame and also time to send PAUSE
592 frame itself.
593
594 Here is the worst case scenario for the RX FIFO "headroom"
595 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
596
597 1) One maximum-length frame on TX 1522 bytes
598 2) One PAUSE frame time 64 bytes
599 3) PAUSE frame decode time allowance 64 bytes
600 4) One maximum-length frame on RX 1522 bytes
601 5) Round-trip propagation delay of the link (100Mb) 15 bytes
602 ----------
603 3187 bytes
604
605 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
606 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
607 */
608 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
609 rx_size / 4 / dev->fifo_entry_size);
610 out_be32(&p->rwmr, r);
611
612 /* Set PAUSE timer to the maximum */
613 out_be32(&p->ptr, 0xffff);
614
615 /* IRQ sources */
616 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
617 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
618 EMAC_ISR_IRE | EMAC_ISR_TE;
619 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
620 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
621 EMAC4_ISR_RXOE | */;
622 out_be32(&p->iser, r);
623
624 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
625 if (emac_phy_gpcs(dev->phy.mode))
626 emac_mii_reset_phy(&dev->phy);
627
4373c932
PB
628 /* Required for Pause packet support in EMAC */
629 dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
630
1d3bb996
DG
631 return 0;
632}
633
634static void emac_reinitialize(struct emac_instance *dev)
635{
636 DBG(dev, "reinitialize" NL);
637
638 emac_netif_stop(dev);
639 if (!emac_configure(dev)) {
640 emac_tx_enable(dev);
641 emac_rx_enable(dev);
642 }
643 emac_netif_start(dev);
644}
645
646static void emac_full_tx_reset(struct emac_instance *dev)
647{
648 DBG(dev, "full_tx_reset" NL);
649
650 emac_tx_disable(dev);
651 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
652 emac_clean_tx_ring(dev);
653 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
654
655 emac_configure(dev);
656
657 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
658 emac_tx_enable(dev);
659 emac_rx_enable(dev);
660}
661
662static void emac_reset_work(struct work_struct *work)
663{
664 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
665
666 DBG(dev, "reset_work" NL);
667
668 mutex_lock(&dev->link_lock);
61dbcece
BH
669 if (dev->opened) {
670 emac_netif_stop(dev);
671 emac_full_tx_reset(dev);
672 emac_netif_start(dev);
673 }
1d3bb996
DG
674 mutex_unlock(&dev->link_lock);
675}
676
677static void emac_tx_timeout(struct net_device *ndev)
678{
679 struct emac_instance *dev = netdev_priv(ndev);
680
681 DBG(dev, "tx_timeout" NL);
682
683 schedule_work(&dev->reset_work);
684}
685
686
687static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
688{
689 int done = !!(stacr & EMAC_STACR_OC);
690
691 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
692 done = !done;
693
694 return done;
695};
696
697static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
698{
699 struct emac_regs __iomem *p = dev->emacp;
700 u32 r = 0;
701 int n, err = -ETIMEDOUT;
702
703 mutex_lock(&dev->mdio_lock);
704
705 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
706
707 /* Enable proper MDIO port */
708 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
709 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
710 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
711 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
712
713 /* Wait for management interface to become idle */
714 n = 10;
715 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
716 udelay(1);
717 if (!--n) {
718 DBG2(dev, " -> timeout wait idle\n");
719 goto bail;
720 }
721 }
722
723 /* Issue read command */
724 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
725 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
726 else
727 r = EMAC_STACR_BASE(dev->opb_bus_freq);
728 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
729 r |= EMAC_STACR_OC;
bff713b5 730 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
1d3bb996
DG
731 r |= EMACX_STACR_STAC_READ;
732 else
733 r |= EMAC_STACR_STAC_READ;
734 r |= (reg & EMAC_STACR_PRA_MASK)
735 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
736 out_be32(&p->stacr, r);
737
738 /* Wait for read to complete */
739 n = 100;
740 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
741 udelay(1);
742 if (!--n) {
743 DBG2(dev, " -> timeout wait complete\n");
744 goto bail;
745 }
746 }
747
748 if (unlikely(r & EMAC_STACR_PHYE)) {
749 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
750 err = -EREMOTEIO;
751 goto bail;
752 }
753
754 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
755
756 DBG2(dev, "mdio_read -> %04x" NL, r);
757 err = 0;
758 bail:
759 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
760 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
761 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
762 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
763 mutex_unlock(&dev->mdio_lock);
764
765 return err == 0 ? r : err;
766}
767
768static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
769 u16 val)
770{
771 struct emac_regs __iomem *p = dev->emacp;
772 u32 r = 0;
773 int n, err = -ETIMEDOUT;
774
775 mutex_lock(&dev->mdio_lock);
776
777 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
778
779 /* Enable proper MDIO port */
780 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
781 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
782 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
783 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
784
785 /* Wait for management interface to be idle */
786 n = 10;
787 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
788 udelay(1);
789 if (!--n) {
790 DBG2(dev, " -> timeout wait idle\n");
791 goto bail;
792 }
793 }
794
795 /* Issue write command */
796 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
797 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
798 else
799 r = EMAC_STACR_BASE(dev->opb_bus_freq);
800 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
801 r |= EMAC_STACR_OC;
bff713b5 802 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
1d3bb996
DG
803 r |= EMACX_STACR_STAC_WRITE;
804 else
805 r |= EMAC_STACR_STAC_WRITE;
806 r |= (reg & EMAC_STACR_PRA_MASK) |
807 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
808 (val << EMAC_STACR_PHYD_SHIFT);
809 out_be32(&p->stacr, r);
810
811 /* Wait for write to complete */
812 n = 100;
813 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
814 udelay(1);
815 if (!--n) {
816 DBG2(dev, " -> timeout wait complete\n");
817 goto bail;
818 }
819 }
820 err = 0;
821 bail:
822 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
823 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
824 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
825 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
826 mutex_unlock(&dev->mdio_lock);
827}
828
829static int emac_mdio_read(struct net_device *ndev, int id, int reg)
830{
831 struct emac_instance *dev = netdev_priv(ndev);
832 int res;
833
834 res = __emac_mdio_read(dev->mdio_instance ? dev->mdio_instance : dev,
835 (u8) id, (u8) reg);
836 return res;
837}
838
839static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
840{
841 struct emac_instance *dev = netdev_priv(ndev);
842
843 __emac_mdio_write(dev->mdio_instance ? dev->mdio_instance : dev,
844 (u8) id, (u8) reg, (u16) val);
845}
846
847/* Tx lock BH */
848static void __emac_set_multicast_list(struct emac_instance *dev)
849{
850 struct emac_regs __iomem *p = dev->emacp;
851 u32 rmr = emac_iff2rmr(dev->ndev);
852
853 DBG(dev, "__multicast %08x" NL, rmr);
854
855 /* I decided to relax register access rules here to avoid
856 * full EMAC reset.
857 *
858 * There is a real problem with EMAC4 core if we use MWSW_001 bit
859 * in MR1 register and do a full EMAC reset.
860 * One TX BD status update is delayed and, after EMAC reset, it
861 * never happens, resulting in TX hung (it'll be recovered by TX
862 * timeout handler eventually, but this is just gross).
863 * So we either have to do full TX reset or try to cheat here :)
864 *
865 * The only required change is to RX mode register, so I *think* all
866 * we need is just to stop RX channel. This seems to work on all
867 * tested SoCs. --ebs
868 *
869 * If we need the full reset, we might just trigger the workqueue
870 * and do it async... a bit nasty but should work --BenH
871 */
872 dev->mcast_pending = 0;
873 emac_rx_disable(dev);
874 if (rmr & EMAC_RMR_MAE)
875 emac_hash_mc(dev);
876 out_be32(&p->rmr, rmr);
877 emac_rx_enable(dev);
878}
879
880/* Tx lock BH */
881static void emac_set_multicast_list(struct net_device *ndev)
882{
883 struct emac_instance *dev = netdev_priv(ndev);
884
885 DBG(dev, "multicast" NL);
886
887 BUG_ON(!netif_running(dev->ndev));
888
889 if (dev->no_mcast) {
890 dev->mcast_pending = 1;
891 return;
892 }
893 __emac_set_multicast_list(dev);
894}
895
896static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
897{
898 int rx_sync_size = emac_rx_sync_size(new_mtu);
899 int rx_skb_size = emac_rx_skb_size(new_mtu);
900 int i, ret = 0;
901
902 mutex_lock(&dev->link_lock);
903 emac_netif_stop(dev);
904 emac_rx_disable(dev);
905 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
906
907 if (dev->rx_sg_skb) {
908 ++dev->estats.rx_dropped_resize;
909 dev_kfree_skb(dev->rx_sg_skb);
910 dev->rx_sg_skb = NULL;
911 }
912
913 /* Make a first pass over RX ring and mark BDs ready, dropping
914 * non-processed packets on the way. We need this as a separate pass
915 * to simplify error recovery in the case of allocation failure later.
916 */
917 for (i = 0; i < NUM_RX_BUFF; ++i) {
918 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
919 ++dev->estats.rx_dropped_resize;
920
921 dev->rx_desc[i].data_len = 0;
922 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
923 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
924 }
925
926 /* Reallocate RX ring only if bigger skb buffers are required */
927 if (rx_skb_size <= dev->rx_skb_size)
928 goto skip;
929
930 /* Second pass, allocate new skbs */
931 for (i = 0; i < NUM_RX_BUFF; ++i) {
932 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
933 if (!skb) {
934 ret = -ENOMEM;
935 goto oom;
936 }
937
938 BUG_ON(!dev->rx_skb[i]);
939 dev_kfree_skb(dev->rx_skb[i]);
940
941 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
942 dev->rx_desc[i].data_ptr =
943 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
944 DMA_FROM_DEVICE) + 2;
945 dev->rx_skb[i] = skb;
946 }
947 skip:
948 /* Check if we need to change "Jumbo" bit in MR1 */
949 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
950 /* This is to prevent starting RX channel in emac_rx_enable() */
951 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
952
953 dev->ndev->mtu = new_mtu;
954 emac_full_tx_reset(dev);
955 }
956
957 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
958 oom:
959 /* Restart RX */
960 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
961 dev->rx_slot = 0;
962 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
963 emac_rx_enable(dev);
964 emac_netif_start(dev);
965 mutex_unlock(&dev->link_lock);
966
967 return ret;
968}
969
970/* Process ctx, rtnl_lock semaphore */
971static int emac_change_mtu(struct net_device *ndev, int new_mtu)
972{
973 struct emac_instance *dev = netdev_priv(ndev);
974 int ret = 0;
975
976 if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
977 return -EINVAL;
978
979 DBG(dev, "change_mtu(%d)" NL, new_mtu);
980
981 if (netif_running(ndev)) {
982 /* Check if we really need to reinitalize RX ring */
983 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
984 ret = emac_resize_rx_ring(dev, new_mtu);
985 }
986
987 if (!ret) {
988 ndev->mtu = new_mtu;
989 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
990 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
991 }
992
993 return ret;
994}
995
996static void emac_clean_tx_ring(struct emac_instance *dev)
997{
998 int i;
999
1000 for (i = 0; i < NUM_TX_BUFF; ++i) {
1001 if (dev->tx_skb[i]) {
1002 dev_kfree_skb(dev->tx_skb[i]);
1003 dev->tx_skb[i] = NULL;
1004 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1005 ++dev->estats.tx_dropped;
1006 }
1007 dev->tx_desc[i].ctrl = 0;
1008 dev->tx_desc[i].data_ptr = 0;
1009 }
1010}
1011
1012static void emac_clean_rx_ring(struct emac_instance *dev)
1013{
1014 int i;
1015
1016 for (i = 0; i < NUM_RX_BUFF; ++i)
1017 if (dev->rx_skb[i]) {
1018 dev->rx_desc[i].ctrl = 0;
1019 dev_kfree_skb(dev->rx_skb[i]);
1020 dev->rx_skb[i] = NULL;
1021 dev->rx_desc[i].data_ptr = 0;
1022 }
1023
1024 if (dev->rx_sg_skb) {
1025 dev_kfree_skb(dev->rx_sg_skb);
1026 dev->rx_sg_skb = NULL;
1027 }
1028}
1029
1030static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1031 gfp_t flags)
1032{
1033 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1034 if (unlikely(!skb))
1035 return -ENOMEM;
1036
1037 dev->rx_skb[slot] = skb;
1038 dev->rx_desc[slot].data_len = 0;
1039
1040 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1041 dev->rx_desc[slot].data_ptr =
1042 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1043 DMA_FROM_DEVICE) + 2;
1044 wmb();
1045 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1046 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1047
1048 return 0;
1049}
1050
1051static void emac_print_link_status(struct emac_instance *dev)
1052{
1053 if (netif_carrier_ok(dev->ndev))
1054 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1055 dev->ndev->name, dev->phy.speed,
1056 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1057 dev->phy.pause ? ", pause enabled" :
1058 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1059 else
1060 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1061}
1062
1063/* Process ctx, rtnl_lock semaphore */
1064static int emac_open(struct net_device *ndev)
1065{
1066 struct emac_instance *dev = netdev_priv(ndev);
1067 int err, i;
1068
1069 DBG(dev, "open" NL);
1070
1071 /* Setup error IRQ handler */
1072 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1073 if (err) {
1074 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1075 ndev->name, dev->emac_irq);
1076 return err;
1077 }
1078
1079 /* Allocate RX ring */
1080 for (i = 0; i < NUM_RX_BUFF; ++i)
1081 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1082 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1083 ndev->name);
1084 goto oom;
1085 }
1086
1087 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1088 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1089 dev->rx_sg_skb = NULL;
1090
1091 mutex_lock(&dev->link_lock);
61dbcece 1092 dev->opened = 1;
1d3bb996 1093
61dbcece 1094 /* Start PHY polling now.
1d3bb996
DG
1095 */
1096 if (dev->phy.address >= 0) {
1097 int link_poll_interval;
1098 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1099 dev->phy.def->ops->read_link(&dev->phy);
1100 netif_carrier_on(dev->ndev);
1101 link_poll_interval = PHY_POLL_LINK_ON;
1102 } else {
1103 netif_carrier_off(dev->ndev);
1104 link_poll_interval = PHY_POLL_LINK_OFF;
1105 }
1106 dev->link_polling = 1;
1107 wmb();
1108 schedule_delayed_work(&dev->link_work, link_poll_interval);
1109 emac_print_link_status(dev);
1110 } else
1111 netif_carrier_on(dev->ndev);
1112
1113 emac_configure(dev);
1114 mal_poll_add(dev->mal, &dev->commac);
1115 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1116 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1117 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1118 emac_tx_enable(dev);
1119 emac_rx_enable(dev);
1120 emac_netif_start(dev);
1121
1122 mutex_unlock(&dev->link_lock);
1123
1124 return 0;
1125 oom:
1126 emac_clean_rx_ring(dev);
1127 free_irq(dev->emac_irq, dev);
1128
1129 return -ENOMEM;
1130}
1131
1132/* BHs disabled */
1133#if 0
1134static int emac_link_differs(struct emac_instance *dev)
1135{
1136 u32 r = in_be32(&dev->emacp->mr1);
1137
1138 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1139 int speed, pause, asym_pause;
1140
1141 if (r & EMAC_MR1_MF_1000)
1142 speed = SPEED_1000;
1143 else if (r & EMAC_MR1_MF_100)
1144 speed = SPEED_100;
1145 else
1146 speed = SPEED_10;
1147
1148 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1149 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1150 pause = 1;
1151 asym_pause = 0;
1152 break;
1153 case EMAC_MR1_APP:
1154 pause = 0;
1155 asym_pause = 1;
1156 break;
1157 default:
1158 pause = asym_pause = 0;
1159 }
1160 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1161 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1162}
1163#endif
1164
1165static void emac_link_timer(struct work_struct *work)
1166{
1167 struct emac_instance *dev =
1168 container_of((struct delayed_work *)work,
1169 struct emac_instance, link_work);
1170 int link_poll_interval;
1171
1172 mutex_lock(&dev->link_lock);
1d3bb996
DG
1173 DBG2(dev, "link timer" NL);
1174
61dbcece
BH
1175 if (!dev->opened)
1176 goto bail;
1177
1d3bb996
DG
1178 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1179 if (!netif_carrier_ok(dev->ndev)) {
1180 /* Get new link parameters */
1181 dev->phy.def->ops->read_link(&dev->phy);
1182
1183 netif_carrier_on(dev->ndev);
1184 emac_netif_stop(dev);
1185 emac_full_tx_reset(dev);
1186 emac_netif_start(dev);
1187 emac_print_link_status(dev);
1188 }
1189 link_poll_interval = PHY_POLL_LINK_ON;
1190 } else {
1191 if (netif_carrier_ok(dev->ndev)) {
1d3bb996
DG
1192 netif_carrier_off(dev->ndev);
1193 netif_tx_disable(dev->ndev);
911b237d 1194 emac_reinitialize(dev);
1d3bb996
DG
1195 emac_print_link_status(dev);
1196 }
1197 link_poll_interval = PHY_POLL_LINK_OFF;
1198 }
1199 schedule_delayed_work(&dev->link_work, link_poll_interval);
61dbcece 1200 bail:
1d3bb996
DG
1201 mutex_unlock(&dev->link_lock);
1202}
1203
1204static void emac_force_link_update(struct emac_instance *dev)
1205{
1206 netif_carrier_off(dev->ndev);
61dbcece 1207 smp_rmb();
1d3bb996
DG
1208 if (dev->link_polling) {
1209 cancel_rearming_delayed_work(&dev->link_work);
1210 if (dev->link_polling)
1211 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1212 }
1213}
1214
1215/* Process ctx, rtnl_lock semaphore */
1216static int emac_close(struct net_device *ndev)
1217{
1218 struct emac_instance *dev = netdev_priv(ndev);
1219
1220 DBG(dev, "close" NL);
1221
61dbcece
BH
1222 if (dev->phy.address >= 0) {
1223 dev->link_polling = 0;
1d3bb996 1224 cancel_rearming_delayed_work(&dev->link_work);
61dbcece
BH
1225 }
1226 mutex_lock(&dev->link_lock);
1d3bb996 1227 emac_netif_stop(dev);
61dbcece
BH
1228 dev->opened = 0;
1229 mutex_unlock(&dev->link_lock);
1d3bb996
DG
1230
1231 emac_rx_disable(dev);
1232 emac_tx_disable(dev);
1233 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1234 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1235 mal_poll_del(dev->mal, &dev->commac);
1236
1237 emac_clean_tx_ring(dev);
1238 emac_clean_rx_ring(dev);
1239
1240 free_irq(dev->emac_irq, dev);
1241
1242 return 0;
1243}
1244
1245static inline u16 emac_tx_csum(struct emac_instance *dev,
1246 struct sk_buff *skb)
1247{
e66f4168
VB
1248 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1249 (skb->ip_summed == CHECKSUM_PARTIAL)) {
1d3bb996
DG
1250 ++dev->stats.tx_packets_csum;
1251 return EMAC_TX_CTRL_TAH_CSUM;
1252 }
1253 return 0;
1254}
1255
1256static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1257{
1258 struct emac_regs __iomem *p = dev->emacp;
1259 struct net_device *ndev = dev->ndev;
1260
1261 /* Send the packet out. If the if makes a significant perf
1262 * difference, then we can store the TMR0 value in "dev"
1263 * instead
1264 */
1265 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1d3bb996 1266 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
07c2c76e 1267 else
1268 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1d3bb996
DG
1269
1270 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1271 netif_stop_queue(ndev);
1272 DBG2(dev, "stopped TX queue" NL);
1273 }
1274
1275 ndev->trans_start = jiffies;
1276 ++dev->stats.tx_packets;
1277 dev->stats.tx_bytes += len;
1278
1279 return 0;
1280}
1281
1282/* Tx lock BH */
1283static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1284{
1285 struct emac_instance *dev = netdev_priv(ndev);
1286 unsigned int len = skb->len;
1287 int slot;
1288
1289 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1290 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1291
1292 slot = dev->tx_slot++;
1293 if (dev->tx_slot == NUM_TX_BUFF) {
1294 dev->tx_slot = 0;
1295 ctrl |= MAL_TX_CTRL_WRAP;
1296 }
1297
1298 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1299
1300 dev->tx_skb[slot] = skb;
1301 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1302 skb->data, len,
1303 DMA_TO_DEVICE);
1304 dev->tx_desc[slot].data_len = (u16) len;
1305 wmb();
1306 dev->tx_desc[slot].ctrl = ctrl;
1307
1308 return emac_xmit_finish(dev, len);
1309}
1310
1d3bb996
DG
1311static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1312 u32 pd, int len, int last, u16 base_ctrl)
1313{
1314 while (1) {
1315 u16 ctrl = base_ctrl;
1316 int chunk = min(len, MAL_MAX_TX_SIZE);
1317 len -= chunk;
1318
1319 slot = (slot + 1) % NUM_TX_BUFF;
1320
1321 if (last && !len)
1322 ctrl |= MAL_TX_CTRL_LAST;
1323 if (slot == NUM_TX_BUFF - 1)
1324 ctrl |= MAL_TX_CTRL_WRAP;
1325
1326 dev->tx_skb[slot] = NULL;
1327 dev->tx_desc[slot].data_ptr = pd;
1328 dev->tx_desc[slot].data_len = (u16) chunk;
1329 dev->tx_desc[slot].ctrl = ctrl;
1330 ++dev->tx_cnt;
1331
1332 if (!len)
1333 break;
1334
1335 pd += chunk;
1336 }
1337 return slot;
1338}
1339
1340/* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1341static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1342{
1343 struct emac_instance *dev = netdev_priv(ndev);
1344 int nr_frags = skb_shinfo(skb)->nr_frags;
1345 int len = skb->len, chunk;
1346 int slot, i;
1347 u16 ctrl;
1348 u32 pd;
1349
1350 /* This is common "fast" path */
1351 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1352 return emac_start_xmit(skb, ndev);
1353
1354 len -= skb->data_len;
1355
1356 /* Note, this is only an *estimation*, we can still run out of empty
1357 * slots because of the additional fragmentation into
1358 * MAL_MAX_TX_SIZE-sized chunks
1359 */
1360 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1361 goto stop_queue;
1362
1363 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1364 emac_tx_csum(dev, skb);
1365 slot = dev->tx_slot;
1366
1367 /* skb data */
1368 dev->tx_skb[slot] = NULL;
1369 chunk = min(len, MAL_MAX_TX_SIZE);
1370 dev->tx_desc[slot].data_ptr = pd =
1371 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1372 dev->tx_desc[slot].data_len = (u16) chunk;
1373 len -= chunk;
1374 if (unlikely(len))
1375 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1376 ctrl);
1377 /* skb fragments */
1378 for (i = 0; i < nr_frags; ++i) {
1379 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1380 len = frag->size;
1381
1382 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1383 goto undo_frame;
1384
1385 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1386 DMA_TO_DEVICE);
1387
1388 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1389 ctrl);
1390 }
1391
1392 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1393
1394 /* Attach skb to the last slot so we don't release it too early */
1395 dev->tx_skb[slot] = skb;
1396
1397 /* Send the packet out */
1398 if (dev->tx_slot == NUM_TX_BUFF - 1)
1399 ctrl |= MAL_TX_CTRL_WRAP;
1400 wmb();
1401 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1402 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1403
1404 return emac_xmit_finish(dev, skb->len);
1405
1406 undo_frame:
1407 /* Well, too bad. Our previous estimation was overly optimistic.
1408 * Undo everything.
1409 */
1410 while (slot != dev->tx_slot) {
1411 dev->tx_desc[slot].ctrl = 0;
1412 --dev->tx_cnt;
1413 if (--slot < 0)
1414 slot = NUM_TX_BUFF - 1;
1415 }
1416 ++dev->estats.tx_undo;
1417
1418 stop_queue:
1419 netif_stop_queue(ndev);
1420 DBG2(dev, "stopped TX queue" NL);
1421 return 1;
1422}
1d3bb996
DG
1423
1424/* Tx lock BHs */
1425static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1426{
1427 struct emac_error_stats *st = &dev->estats;
1428
1429 DBG(dev, "BD TX error %04x" NL, ctrl);
1430
1431 ++st->tx_bd_errors;
1432 if (ctrl & EMAC_TX_ST_BFCS)
1433 ++st->tx_bd_bad_fcs;
1434 if (ctrl & EMAC_TX_ST_LCS)
1435 ++st->tx_bd_carrier_loss;
1436 if (ctrl & EMAC_TX_ST_ED)
1437 ++st->tx_bd_excessive_deferral;
1438 if (ctrl & EMAC_TX_ST_EC)
1439 ++st->tx_bd_excessive_collisions;
1440 if (ctrl & EMAC_TX_ST_LC)
1441 ++st->tx_bd_late_collision;
1442 if (ctrl & EMAC_TX_ST_MC)
1443 ++st->tx_bd_multple_collisions;
1444 if (ctrl & EMAC_TX_ST_SC)
1445 ++st->tx_bd_single_collision;
1446 if (ctrl & EMAC_TX_ST_UR)
1447 ++st->tx_bd_underrun;
1448 if (ctrl & EMAC_TX_ST_SQE)
1449 ++st->tx_bd_sqe;
1450}
1451
1452static void emac_poll_tx(void *param)
1453{
1454 struct emac_instance *dev = param;
1455 u32 bad_mask;
1456
1457 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1458
1459 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1460 bad_mask = EMAC_IS_BAD_TX_TAH;
1461 else
1462 bad_mask = EMAC_IS_BAD_TX;
1463
1464 netif_tx_lock_bh(dev->ndev);
1465 if (dev->tx_cnt) {
1466 u16 ctrl;
1467 int slot = dev->ack_slot, n = 0;
1468 again:
1469 ctrl = dev->tx_desc[slot].ctrl;
1470 if (!(ctrl & MAL_TX_CTRL_READY)) {
1471 struct sk_buff *skb = dev->tx_skb[slot];
1472 ++n;
1473
1474 if (skb) {
1475 dev_kfree_skb(skb);
1476 dev->tx_skb[slot] = NULL;
1477 }
1478 slot = (slot + 1) % NUM_TX_BUFF;
1479
1480 if (unlikely(ctrl & bad_mask))
1481 emac_parse_tx_error(dev, ctrl);
1482
1483 if (--dev->tx_cnt)
1484 goto again;
1485 }
1486 if (n) {
1487 dev->ack_slot = slot;
1488 if (netif_queue_stopped(dev->ndev) &&
1489 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1490 netif_wake_queue(dev->ndev);
1491
1492 DBG2(dev, "tx %d pkts" NL, n);
1493 }
1494 }
1495 netif_tx_unlock_bh(dev->ndev);
1496}
1497
1498static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1499 int len)
1500{
1501 struct sk_buff *skb = dev->rx_skb[slot];
1502
1503 DBG2(dev, "recycle %d %d" NL, slot, len);
1504
1505 if (len)
1506 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1507 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1508
1509 dev->rx_desc[slot].data_len = 0;
1510 wmb();
1511 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1512 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1513}
1514
1515static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1516{
1517 struct emac_error_stats *st = &dev->estats;
1518
1519 DBG(dev, "BD RX error %04x" NL, ctrl);
1520
1521 ++st->rx_bd_errors;
1522 if (ctrl & EMAC_RX_ST_OE)
1523 ++st->rx_bd_overrun;
1524 if (ctrl & EMAC_RX_ST_BP)
1525 ++st->rx_bd_bad_packet;
1526 if (ctrl & EMAC_RX_ST_RP)
1527 ++st->rx_bd_runt_packet;
1528 if (ctrl & EMAC_RX_ST_SE)
1529 ++st->rx_bd_short_event;
1530 if (ctrl & EMAC_RX_ST_AE)
1531 ++st->rx_bd_alignment_error;
1532 if (ctrl & EMAC_RX_ST_BFCS)
1533 ++st->rx_bd_bad_fcs;
1534 if (ctrl & EMAC_RX_ST_PTL)
1535 ++st->rx_bd_packet_too_long;
1536 if (ctrl & EMAC_RX_ST_ORE)
1537 ++st->rx_bd_out_of_range;
1538 if (ctrl & EMAC_RX_ST_IRE)
1539 ++st->rx_bd_in_range;
1540}
1541
1542static inline void emac_rx_csum(struct emac_instance *dev,
1543 struct sk_buff *skb, u16 ctrl)
1544{
1545#ifdef CONFIG_IBM_NEW_EMAC_TAH
1546 if (!ctrl && dev->tah_dev) {
1547 skb->ip_summed = CHECKSUM_UNNECESSARY;
1548 ++dev->stats.rx_packets_csum;
1549 }
1550#endif
1551}
1552
1553static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1554{
1555 if (likely(dev->rx_sg_skb != NULL)) {
1556 int len = dev->rx_desc[slot].data_len;
1557 int tot_len = dev->rx_sg_skb->len + len;
1558
1559 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1560 ++dev->estats.rx_dropped_mtu;
1561 dev_kfree_skb(dev->rx_sg_skb);
1562 dev->rx_sg_skb = NULL;
1563 } else {
bef1bc95 1564 cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1d3bb996
DG
1565 dev->rx_skb[slot]->data, len);
1566 skb_put(dev->rx_sg_skb, len);
1567 emac_recycle_rx_skb(dev, slot, len);
1568 return 0;
1569 }
1570 }
1571 emac_recycle_rx_skb(dev, slot, 0);
1572 return -1;
1573}
1574
1575/* NAPI poll context */
1576static int emac_poll_rx(void *param, int budget)
1577{
1578 struct emac_instance *dev = param;
1579 int slot = dev->rx_slot, received = 0;
1580
1581 DBG2(dev, "poll_rx(%d)" NL, budget);
1582
1583 again:
1584 while (budget > 0) {
1585 int len;
1586 struct sk_buff *skb;
1587 u16 ctrl = dev->rx_desc[slot].ctrl;
1588
1589 if (ctrl & MAL_RX_CTRL_EMPTY)
1590 break;
1591
1592 skb = dev->rx_skb[slot];
1593 mb();
1594 len = dev->rx_desc[slot].data_len;
1595
1596 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1597 goto sg;
1598
1599 ctrl &= EMAC_BAD_RX_MASK;
1600 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1601 emac_parse_rx_error(dev, ctrl);
1602 ++dev->estats.rx_dropped_error;
1603 emac_recycle_rx_skb(dev, slot, 0);
1604 len = 0;
1605 goto next;
1606 }
1607
1608 if (len && len < EMAC_RX_COPY_THRESH) {
1609 struct sk_buff *copy_skb =
1610 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1611 if (unlikely(!copy_skb))
1612 goto oom;
1613
1614 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1615 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1616 len + 2);
1617 emac_recycle_rx_skb(dev, slot, len);
1618 skb = copy_skb;
1619 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1620 goto oom;
1621
1622 skb_put(skb, len);
1623 push_packet:
1624 skb->dev = dev->ndev;
1625 skb->protocol = eth_type_trans(skb, dev->ndev);
1626 emac_rx_csum(dev, skb, ctrl);
1627
1628 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1629 ++dev->estats.rx_dropped_stack;
1630 next:
1631 ++dev->stats.rx_packets;
1632 skip:
1633 dev->stats.rx_bytes += len;
1634 slot = (slot + 1) % NUM_RX_BUFF;
1635 --budget;
1636 ++received;
1637 continue;
1638 sg:
1639 if (ctrl & MAL_RX_CTRL_FIRST) {
1640 BUG_ON(dev->rx_sg_skb);
1641 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1642 DBG(dev, "rx OOM %d" NL, slot);
1643 ++dev->estats.rx_dropped_oom;
1644 emac_recycle_rx_skb(dev, slot, 0);
1645 } else {
1646 dev->rx_sg_skb = skb;
1647 skb_put(skb, len);
1648 }
1649 } else if (!emac_rx_sg_append(dev, slot) &&
1650 (ctrl & MAL_RX_CTRL_LAST)) {
1651
1652 skb = dev->rx_sg_skb;
1653 dev->rx_sg_skb = NULL;
1654
1655 ctrl &= EMAC_BAD_RX_MASK;
1656 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1657 emac_parse_rx_error(dev, ctrl);
1658 ++dev->estats.rx_dropped_error;
1659 dev_kfree_skb(skb);
1660 len = 0;
1661 } else
1662 goto push_packet;
1663 }
1664 goto skip;
1665 oom:
1666 DBG(dev, "rx OOM %d" NL, slot);
1667 /* Drop the packet and recycle skb */
1668 ++dev->estats.rx_dropped_oom;
1669 emac_recycle_rx_skb(dev, slot, 0);
1670 goto next;
1671 }
1672
1673 if (received) {
1674 DBG2(dev, "rx %d BDs" NL, received);
1675 dev->rx_slot = slot;
1676 }
1677
1678 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1679 mb();
1680 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1681 DBG2(dev, "rx restart" NL);
1682 received = 0;
1683 goto again;
1684 }
1685
1686 if (dev->rx_sg_skb) {
1687 DBG2(dev, "dropping partial rx packet" NL);
1688 ++dev->estats.rx_dropped_error;
1689 dev_kfree_skb(dev->rx_sg_skb);
1690 dev->rx_sg_skb = NULL;
1691 }
1692
1693 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1694 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1695 emac_rx_enable(dev);
1696 dev->rx_slot = 0;
1697 }
1698 return received;
1699}
1700
1701/* NAPI poll context */
1702static int emac_peek_rx(void *param)
1703{
1704 struct emac_instance *dev = param;
1705
1706 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1707}
1708
1709/* NAPI poll context */
1710static int emac_peek_rx_sg(void *param)
1711{
1712 struct emac_instance *dev = param;
1713
1714 int slot = dev->rx_slot;
1715 while (1) {
1716 u16 ctrl = dev->rx_desc[slot].ctrl;
1717 if (ctrl & MAL_RX_CTRL_EMPTY)
1718 return 0;
1719 else if (ctrl & MAL_RX_CTRL_LAST)
1720 return 1;
1721
1722 slot = (slot + 1) % NUM_RX_BUFF;
1723
1724 /* I'm just being paranoid here :) */
1725 if (unlikely(slot == dev->rx_slot))
1726 return 0;
1727 }
1728}
1729
1730/* Hard IRQ */
1731static void emac_rxde(void *param)
1732{
1733 struct emac_instance *dev = param;
1734
1735 ++dev->estats.rx_stopped;
1736 emac_rx_disable_async(dev);
1737}
1738
1739/* Hard IRQ */
1740static irqreturn_t emac_irq(int irq, void *dev_instance)
1741{
1742 struct emac_instance *dev = dev_instance;
1743 struct emac_regs __iomem *p = dev->emacp;
1744 struct emac_error_stats *st = &dev->estats;
1745 u32 isr;
1746
1747 spin_lock(&dev->lock);
1748
1749 isr = in_be32(&p->isr);
1750 out_be32(&p->isr, isr);
1751
1752 DBG(dev, "isr = %08x" NL, isr);
1753
1754 if (isr & EMAC4_ISR_TXPE)
1755 ++st->tx_parity;
1756 if (isr & EMAC4_ISR_RXPE)
1757 ++st->rx_parity;
1758 if (isr & EMAC4_ISR_TXUE)
1759 ++st->tx_underrun;
1760 if (isr & EMAC4_ISR_RXOE)
1761 ++st->rx_fifo_overrun;
1762 if (isr & EMAC_ISR_OVR)
1763 ++st->rx_overrun;
1764 if (isr & EMAC_ISR_BP)
1765 ++st->rx_bad_packet;
1766 if (isr & EMAC_ISR_RP)
1767 ++st->rx_runt_packet;
1768 if (isr & EMAC_ISR_SE)
1769 ++st->rx_short_event;
1770 if (isr & EMAC_ISR_ALE)
1771 ++st->rx_alignment_error;
1772 if (isr & EMAC_ISR_BFCS)
1773 ++st->rx_bad_fcs;
1774 if (isr & EMAC_ISR_PTLE)
1775 ++st->rx_packet_too_long;
1776 if (isr & EMAC_ISR_ORE)
1777 ++st->rx_out_of_range;
1778 if (isr & EMAC_ISR_IRE)
1779 ++st->rx_in_range;
1780 if (isr & EMAC_ISR_SQE)
1781 ++st->tx_sqe;
1782 if (isr & EMAC_ISR_TE)
1783 ++st->tx_errors;
1784
1785 spin_unlock(&dev->lock);
1786
1787 return IRQ_HANDLED;
1788}
1789
1790static struct net_device_stats *emac_stats(struct net_device *ndev)
1791{
1792 struct emac_instance *dev = netdev_priv(ndev);
1793 struct emac_stats *st = &dev->stats;
1794 struct emac_error_stats *est = &dev->estats;
1795 struct net_device_stats *nst = &dev->nstats;
1796 unsigned long flags;
1797
1798 DBG2(dev, "stats" NL);
1799
1800 /* Compute "legacy" statistics */
1801 spin_lock_irqsave(&dev->lock, flags);
1802 nst->rx_packets = (unsigned long)st->rx_packets;
1803 nst->rx_bytes = (unsigned long)st->rx_bytes;
1804 nst->tx_packets = (unsigned long)st->tx_packets;
1805 nst->tx_bytes = (unsigned long)st->tx_bytes;
1806 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1807 est->rx_dropped_error +
1808 est->rx_dropped_resize +
1809 est->rx_dropped_mtu);
1810 nst->tx_dropped = (unsigned long)est->tx_dropped;
1811
1812 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1813 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1814 est->rx_fifo_overrun +
1815 est->rx_overrun);
1816 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1817 est->rx_alignment_error);
1818 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1819 est->rx_bad_fcs);
1820 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1821 est->rx_bd_short_event +
1822 est->rx_bd_packet_too_long +
1823 est->rx_bd_out_of_range +
1824 est->rx_bd_in_range +
1825 est->rx_runt_packet +
1826 est->rx_short_event +
1827 est->rx_packet_too_long +
1828 est->rx_out_of_range +
1829 est->rx_in_range);
1830
1831 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1832 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1833 est->tx_underrun);
1834 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1835 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1836 est->tx_bd_excessive_collisions +
1837 est->tx_bd_late_collision +
1838 est->tx_bd_multple_collisions);
1839 spin_unlock_irqrestore(&dev->lock, flags);
1840 return nst;
1841}
1842
1843static struct mal_commac_ops emac_commac_ops = {
1844 .poll_tx = &emac_poll_tx,
1845 .poll_rx = &emac_poll_rx,
1846 .peek_rx = &emac_peek_rx,
1847 .rxde = &emac_rxde,
1848};
1849
1850static struct mal_commac_ops emac_commac_sg_ops = {
1851 .poll_tx = &emac_poll_tx,
1852 .poll_rx = &emac_poll_rx,
1853 .peek_rx = &emac_peek_rx_sg,
1854 .rxde = &emac_rxde,
1855};
1856
1857/* Ethtool support */
1858static int emac_ethtool_get_settings(struct net_device *ndev,
1859 struct ethtool_cmd *cmd)
1860{
1861 struct emac_instance *dev = netdev_priv(ndev);
1862
1863 cmd->supported = dev->phy.features;
1864 cmd->port = PORT_MII;
1865 cmd->phy_address = dev->phy.address;
1866 cmd->transceiver =
1867 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1868
1869 mutex_lock(&dev->link_lock);
1870 cmd->advertising = dev->phy.advertising;
1871 cmd->autoneg = dev->phy.autoneg;
1872 cmd->speed = dev->phy.speed;
1873 cmd->duplex = dev->phy.duplex;
1874 mutex_unlock(&dev->link_lock);
1875
1876 return 0;
1877}
1878
1879static int emac_ethtool_set_settings(struct net_device *ndev,
1880 struct ethtool_cmd *cmd)
1881{
1882 struct emac_instance *dev = netdev_priv(ndev);
1883 u32 f = dev->phy.features;
1884
1885 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1886 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1887
1888 /* Basic sanity checks */
1889 if (dev->phy.address < 0)
1890 return -EOPNOTSUPP;
1891 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1892 return -EINVAL;
1893 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1894 return -EINVAL;
1895 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1896 return -EINVAL;
1897
1898 if (cmd->autoneg == AUTONEG_DISABLE) {
1899 switch (cmd->speed) {
1900 case SPEED_10:
1901 if (cmd->duplex == DUPLEX_HALF
1902 && !(f & SUPPORTED_10baseT_Half))
1903 return -EINVAL;
1904 if (cmd->duplex == DUPLEX_FULL
1905 && !(f & SUPPORTED_10baseT_Full))
1906 return -EINVAL;
1907 break;
1908 case SPEED_100:
1909 if (cmd->duplex == DUPLEX_HALF
1910 && !(f & SUPPORTED_100baseT_Half))
1911 return -EINVAL;
1912 if (cmd->duplex == DUPLEX_FULL
1913 && !(f & SUPPORTED_100baseT_Full))
1914 return -EINVAL;
1915 break;
1916 case SPEED_1000:
1917 if (cmd->duplex == DUPLEX_HALF
1918 && !(f & SUPPORTED_1000baseT_Half))
1919 return -EINVAL;
1920 if (cmd->duplex == DUPLEX_FULL
1921 && !(f & SUPPORTED_1000baseT_Full))
1922 return -EINVAL;
1923 break;
1924 default:
1925 return -EINVAL;
1926 }
1927
1928 mutex_lock(&dev->link_lock);
1929 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1930 cmd->duplex);
1931 mutex_unlock(&dev->link_lock);
1932
1933 } else {
1934 if (!(f & SUPPORTED_Autoneg))
1935 return -EINVAL;
1936
1937 mutex_lock(&dev->link_lock);
1938 dev->phy.def->ops->setup_aneg(&dev->phy,
1939 (cmd->advertising & f) |
1940 (dev->phy.advertising &
1941 (ADVERTISED_Pause |
1942 ADVERTISED_Asym_Pause)));
1943 mutex_unlock(&dev->link_lock);
1944 }
1945 emac_force_link_update(dev);
1946
1947 return 0;
1948}
1949
1950static void emac_ethtool_get_ringparam(struct net_device *ndev,
1951 struct ethtool_ringparam *rp)
1952{
1953 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1954 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1955}
1956
1957static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1958 struct ethtool_pauseparam *pp)
1959{
1960 struct emac_instance *dev = netdev_priv(ndev);
1961
1962 mutex_lock(&dev->link_lock);
1963 if ((dev->phy.features & SUPPORTED_Autoneg) &&
1964 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1965 pp->autoneg = 1;
1966
1967 if (dev->phy.duplex == DUPLEX_FULL) {
1968 if (dev->phy.pause)
1969 pp->rx_pause = pp->tx_pause = 1;
1970 else if (dev->phy.asym_pause)
1971 pp->tx_pause = 1;
1972 }
1973 mutex_unlock(&dev->link_lock);
1974}
1975
1976static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1977{
1978 struct emac_instance *dev = netdev_priv(ndev);
1979
eb4d84f1 1980 return dev->tah_dev != NULL;
1d3bb996
DG
1981}
1982
1983static int emac_get_regs_len(struct emac_instance *dev)
1984{
1985 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1986 return sizeof(struct emac_ethtool_regs_subhdr) +
1987 EMAC4_ETHTOOL_REGS_SIZE;
1988 else
1989 return sizeof(struct emac_ethtool_regs_subhdr) +
1990 EMAC_ETHTOOL_REGS_SIZE;
1991}
1992
1993static int emac_ethtool_get_regs_len(struct net_device *ndev)
1994{
1995 struct emac_instance *dev = netdev_priv(ndev);
1996 int size;
1997
1998 size = sizeof(struct emac_ethtool_regs_hdr) +
1999 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2000 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2001 size += zmii_get_regs_len(dev->zmii_dev);
2002 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2003 size += rgmii_get_regs_len(dev->rgmii_dev);
2004 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2005 size += tah_get_regs_len(dev->tah_dev);
2006
2007 return size;
2008}
2009
2010static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2011{
2012 struct emac_ethtool_regs_subhdr *hdr = buf;
2013
2014 hdr->index = dev->cell_index;
2015 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2016 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2017 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE);
2018 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE);
2019 } else {
2020 hdr->version = EMAC_ETHTOOL_REGS_VER;
2021 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
2022 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
2023 }
2024}
2025
2026static void emac_ethtool_get_regs(struct net_device *ndev,
2027 struct ethtool_regs *regs, void *buf)
2028{
2029 struct emac_instance *dev = netdev_priv(ndev);
2030 struct emac_ethtool_regs_hdr *hdr = buf;
2031
2032 hdr->components = 0;
2033 buf = hdr + 1;
2034
2035 buf = mal_dump_regs(dev->mal, buf);
2036 buf = emac_dump_regs(dev, buf);
2037 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2038 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2039 buf = zmii_dump_regs(dev->zmii_dev, buf);
2040 }
2041 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2042 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2043 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2044 }
2045 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2046 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2047 buf = tah_dump_regs(dev->tah_dev, buf);
2048 }
2049}
2050
2051static int emac_ethtool_nway_reset(struct net_device *ndev)
2052{
2053 struct emac_instance *dev = netdev_priv(ndev);
2054 int res = 0;
2055
2056 DBG(dev, "nway_reset" NL);
2057
2058 if (dev->phy.address < 0)
2059 return -EOPNOTSUPP;
2060
2061 mutex_lock(&dev->link_lock);
2062 if (!dev->phy.autoneg) {
2063 res = -EINVAL;
2064 goto out;
2065 }
2066
2067 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2068 out:
2069 mutex_unlock(&dev->link_lock);
2070 emac_force_link_update(dev);
2071 return res;
2072}
2073
2074static int emac_ethtool_get_stats_count(struct net_device *ndev)
2075{
2076 return EMAC_ETHTOOL_STATS_COUNT;
2077}
2078
2079static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2080 u8 * buf)
2081{
2082 if (stringset == ETH_SS_STATS)
2083 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2084}
2085
2086static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2087 struct ethtool_stats *estats,
2088 u64 * tmp_stats)
2089{
2090 struct emac_instance *dev = netdev_priv(ndev);
2091
2092 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2093 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2094 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2095}
2096
2097static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2098 struct ethtool_drvinfo *info)
2099{
2100 struct emac_instance *dev = netdev_priv(ndev);
2101
2102 strcpy(info->driver, "ibm_emac");
2103 strcpy(info->version, DRV_VERSION);
2104 info->fw_version[0] = '\0';
2105 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2106 dev->cell_index, dev->ofdev->node->full_name);
2107 info->n_stats = emac_ethtool_get_stats_count(ndev);
2108 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2109}
2110
2111static const struct ethtool_ops emac_ethtool_ops = {
2112 .get_settings = emac_ethtool_get_settings,
2113 .set_settings = emac_ethtool_set_settings,
2114 .get_drvinfo = emac_ethtool_get_drvinfo,
2115
2116 .get_regs_len = emac_ethtool_get_regs_len,
2117 .get_regs = emac_ethtool_get_regs,
2118
2119 .nway_reset = emac_ethtool_nway_reset,
2120
2121 .get_ringparam = emac_ethtool_get_ringparam,
2122 .get_pauseparam = emac_ethtool_get_pauseparam,
2123
2124 .get_rx_csum = emac_ethtool_get_rx_csum,
2125
2126 .get_strings = emac_ethtool_get_strings,
2127 .get_stats_count = emac_ethtool_get_stats_count,
2128 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2129
2130 .get_link = ethtool_op_get_link,
2131 .get_tx_csum = ethtool_op_get_tx_csum,
2132 .get_sg = ethtool_op_get_sg,
2133};
2134
2135static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2136{
2137 struct emac_instance *dev = netdev_priv(ndev);
2138 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
2139
2140 DBG(dev, "ioctl %08x" NL, cmd);
2141
2142 if (dev->phy.address < 0)
2143 return -EOPNOTSUPP;
2144
2145 switch (cmd) {
2146 case SIOCGMIIPHY:
2147 case SIOCDEVPRIVATE:
2148 data[0] = dev->phy.address;
2149 /* Fall through */
2150 case SIOCGMIIREG:
2151 case SIOCDEVPRIVATE + 1:
2152 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
2153 return 0;
2154
2155 case SIOCSMIIREG:
2156 case SIOCDEVPRIVATE + 2:
2157 if (!capable(CAP_NET_ADMIN))
2158 return -EPERM;
2159 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
2160 return 0;
2161 default:
2162 return -EOPNOTSUPP;
2163 }
2164}
2165
2166struct emac_depentry {
2167 u32 phandle;
2168 struct device_node *node;
2169 struct of_device *ofdev;
2170 void *drvdata;
2171};
2172
2173#define EMAC_DEP_MAL_IDX 0
2174#define EMAC_DEP_ZMII_IDX 1
2175#define EMAC_DEP_RGMII_IDX 2
2176#define EMAC_DEP_TAH_IDX 3
2177#define EMAC_DEP_MDIO_IDX 4
2178#define EMAC_DEP_PREV_IDX 5
2179#define EMAC_DEP_COUNT 6
2180
2181static int __devinit emac_check_deps(struct emac_instance *dev,
2182 struct emac_depentry *deps)
2183{
2184 int i, there = 0;
2185 struct device_node *np;
2186
2187 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2188 /* no dependency on that item, allright */
2189 if (deps[i].phandle == 0) {
2190 there++;
2191 continue;
2192 }
2193 /* special case for blist as the dependency might go away */
2194 if (i == EMAC_DEP_PREV_IDX) {
2195 np = *(dev->blist - 1);
2196 if (np == NULL) {
2197 deps[i].phandle = 0;
2198 there++;
2199 continue;
2200 }
2201 if (deps[i].node == NULL)
2202 deps[i].node = of_node_get(np);
2203 }
2204 if (deps[i].node == NULL)
2205 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2206 if (deps[i].node == NULL)
2207 continue;
2208 if (deps[i].ofdev == NULL)
2209 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2210 if (deps[i].ofdev == NULL)
2211 continue;
2212 if (deps[i].drvdata == NULL)
2213 deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2214 if (deps[i].drvdata != NULL)
2215 there++;
2216 }
2217 return (there == EMAC_DEP_COUNT);
2218}
2219
2220static void emac_put_deps(struct emac_instance *dev)
2221{
2222 if (dev->mal_dev)
2223 of_dev_put(dev->mal_dev);
2224 if (dev->zmii_dev)
2225 of_dev_put(dev->zmii_dev);
2226 if (dev->rgmii_dev)
2227 of_dev_put(dev->rgmii_dev);
2228 if (dev->mdio_dev)
2229 of_dev_put(dev->mdio_dev);
2230 if (dev->tah_dev)
2231 of_dev_put(dev->tah_dev);
2232}
2233
2234static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2235 unsigned long action, void *data)
2236{
2237 /* We are only intereted in device addition */
2238 if (action == BUS_NOTIFY_BOUND_DRIVER)
2239 wake_up_all(&emac_probe_wait);
2240 return 0;
2241}
2242
51d4a1cc 2243static struct notifier_block emac_of_bus_notifier __devinitdata = {
1d3bb996
DG
2244 .notifier_call = emac_of_bus_notify
2245};
2246
2247static int __devinit emac_wait_deps(struct emac_instance *dev)
2248{
2249 struct emac_depentry deps[EMAC_DEP_COUNT];
2250 int i, err;
2251
2252 memset(&deps, 0, sizeof(deps));
2253
2254 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2255 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2256 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2257 if (dev->tah_ph)
2258 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2259 if (dev->mdio_ph)
2260 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2261 if (dev->blist && dev->blist > emac_boot_list)
2262 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2263 bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2264 wait_event_timeout(emac_probe_wait,
2265 emac_check_deps(dev, deps),
2266 EMAC_PROBE_DEP_TIMEOUT);
2267 bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2268 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2269 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2270 if (deps[i].node)
2271 of_node_put(deps[i].node);
2272 if (err && deps[i].ofdev)
2273 of_dev_put(deps[i].ofdev);
2274 }
2275 if (err == 0) {
2276 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2277 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2278 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2279 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2280 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2281 }
2282 if (deps[EMAC_DEP_PREV_IDX].ofdev)
2283 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2284 return err;
2285}
2286
2287static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2288 u32 *val, int fatal)
2289{
2290 int len;
2291 const u32 *prop = of_get_property(np, name, &len);
2292 if (prop == NULL || len < sizeof(u32)) {
2293 if (fatal)
2294 printk(KERN_ERR "%s: missing %s property\n",
2295 np->full_name, name);
2296 return -ENODEV;
2297 }
2298 *val = *prop;
2299 return 0;
2300}
2301
2302static int __devinit emac_init_phy(struct emac_instance *dev)
2303{
2304 struct device_node *np = dev->ofdev->node;
2305 struct net_device *ndev = dev->ndev;
2306 u32 phy_map, adv;
2307 int i;
2308
2309 dev->phy.dev = ndev;
2310 dev->phy.mode = dev->phy_mode;
2311
2312 /* PHY-less configuration.
2313 * XXX I probably should move these settings to the dev tree
2314 */
2315 if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2316 emac_reset(dev);
2317
2318 /* PHY-less configuration.
2319 * XXX I probably should move these settings to the dev tree
2320 */
2321 dev->phy.address = -1;
2322 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2323 dev->phy.pause = 1;
2324
2325 return 0;
2326 }
2327
2328 mutex_lock(&emac_phy_map_lock);
2329 phy_map = dev->phy_map | busy_phy_map;
2330
2331 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2332
2333 dev->phy.mdio_read = emac_mdio_read;
2334 dev->phy.mdio_write = emac_mdio_write;
2335
2336 /* Configure EMAC with defaults so we can at least use MDIO
2337 * This is needed mostly for 440GX
2338 */
2339 if (emac_phy_gpcs(dev->phy.mode)) {
2340 /* XXX
2341 * Make GPCS PHY address equal to EMAC index.
2342 * We probably should take into account busy_phy_map
2343 * and/or phy_map here.
2344 *
2345 * Note that the busy_phy_map is currently global
2346 * while it should probably be per-ASIC...
2347 */
2348 dev->phy.address = dev->cell_index;
2349 }
2350
2351 emac_configure(dev);
2352
2353 if (dev->phy_address != 0xffffffff)
2354 phy_map = ~(1 << dev->phy_address);
2355
2356 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2357 if (!(phy_map & 1)) {
2358 int r;
2359 busy_phy_map |= 1 << i;
2360
2361 /* Quick check if there is a PHY at the address */
2362 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2363 if (r == 0xffff || r < 0)
2364 continue;
2365 if (!emac_mii_phy_probe(&dev->phy, i))
2366 break;
2367 }
2368 mutex_unlock(&emac_phy_map_lock);
2369 if (i == 0x20) {
2370 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2371 return -ENXIO;
2372 }
2373
2374 /* Init PHY */
2375 if (dev->phy.def->ops->init)
2376 dev->phy.def->ops->init(&dev->phy);
2377
2378 /* Disable any PHY features not supported by the platform */
2379 dev->phy.def->features &= ~dev->phy_feat_exc;
2380
2381 /* Setup initial link parameters */
2382 if (dev->phy.features & SUPPORTED_Autoneg) {
2383 adv = dev->phy.features;
2384 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2385 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2386 /* Restart autonegotiation */
2387 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2388 } else {
2389 u32 f = dev->phy.def->features;
2390 int speed = SPEED_10, fd = DUPLEX_HALF;
2391
2392 /* Select highest supported speed/duplex */
2393 if (f & SUPPORTED_1000baseT_Full) {
2394 speed = SPEED_1000;
2395 fd = DUPLEX_FULL;
2396 } else if (f & SUPPORTED_1000baseT_Half)
2397 speed = SPEED_1000;
2398 else if (f & SUPPORTED_100baseT_Full) {
2399 speed = SPEED_100;
2400 fd = DUPLEX_FULL;
2401 } else if (f & SUPPORTED_100baseT_Half)
2402 speed = SPEED_100;
2403 else if (f & SUPPORTED_10baseT_Full)
2404 fd = DUPLEX_FULL;
2405
2406 /* Force link parameters */
2407 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2408 }
2409 return 0;
2410}
2411
2412static int __devinit emac_init_config(struct emac_instance *dev)
2413{
2414 struct device_node *np = dev->ofdev->node;
2415 const void *p;
2416 unsigned int plen;
2417 const char *pm, *phy_modes[] = {
2418 [PHY_MODE_NA] = "",
2419 [PHY_MODE_MII] = "mii",
2420 [PHY_MODE_RMII] = "rmii",
2421 [PHY_MODE_SMII] = "smii",
2422 [PHY_MODE_RGMII] = "rgmii",
2423 [PHY_MODE_TBI] = "tbi",
2424 [PHY_MODE_GMII] = "gmii",
2425 [PHY_MODE_RTBI] = "rtbi",
2426 [PHY_MODE_SGMII] = "sgmii",
2427 };
2428
2429 /* Read config from device-tree */
2430 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2431 return -ENXIO;
2432 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2433 return -ENXIO;
2434 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2435 return -ENXIO;
2436 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2437 return -ENXIO;
2438 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2439 dev->max_mtu = 1500;
2440 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2441 dev->rx_fifo_size = 2048;
2442 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2443 dev->tx_fifo_size = 2048;
2444 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2445 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2446 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2447 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2448 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2449 dev->phy_address = 0xffffffff;
2450 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2451 dev->phy_map = 0xffffffff;
2452 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2453 return -ENXIO;
2454 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2455 dev->tah_ph = 0;
2456 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
63b6cad7 2457 dev->tah_port = 0;
1d3bb996
DG
2458 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2459 dev->mdio_ph = 0;
2460 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2461 dev->zmii_ph = 0;;
2462 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2463 dev->zmii_port = 0xffffffff;;
2464 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2465 dev->rgmii_ph = 0;;
2466 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2467 dev->rgmii_port = 0xffffffff;;
2468 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2469 dev->fifo_entry_size = 16;
2470 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2471 dev->mal_burst_size = 256;
2472
2473 /* PHY mode needs some decoding */
2474 dev->phy_mode = PHY_MODE_NA;
2475 pm = of_get_property(np, "phy-mode", &plen);
2476 if (pm != NULL) {
2477 int i;
2478 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2479 if (!strcasecmp(pm, phy_modes[i])) {
2480 dev->phy_mode = i;
2481 break;
2482 }
2483 }
2484
2485 /* Backward compat with non-final DT */
2486 if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2487 u32 nmode = *(const u32 *)pm;
2488 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2489 dev->phy_mode = nmode;
2490 }
2491
2492 /* Check EMAC version */
2493 if (of_device_is_compatible(np, "ibm,emac4"))
2494 dev->features |= EMAC_FTR_EMAC4;
bff713b5
BH
2495
2496 /* Fixup some feature bits based on the device tree */
2497 if (of_get_property(np, "has-inverted-stacr-oc", NULL))
1d3bb996 2498 dev->features |= EMAC_FTR_STACR_OC_INVERT;
bff713b5
BH
2499 if (of_get_property(np, "has-new-stacr-staopc", NULL))
2500 dev->features |= EMAC_FTR_HAS_NEW_STACR;
1d3bb996 2501
bff713b5
BH
2502 /* CAB lacks the appropriate properties */
2503 if (of_device_is_compatible(np, "ibm,emac-axon"))
2504 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2505 EMAC_FTR_STACR_OC_INVERT;
2506
2507 /* Enable TAH/ZMII/RGMII features as found */
1d3bb996
DG
2508 if (dev->tah_ph != 0) {
2509#ifdef CONFIG_IBM_NEW_EMAC_TAH
2510 dev->features |= EMAC_FTR_HAS_TAH;
2511#else
2512 printk(KERN_ERR "%s: TAH support not enabled !\n",
2513 np->full_name);
2514 return -ENXIO;
2515#endif
2516 }
2517
2518 if (dev->zmii_ph != 0) {
2519#ifdef CONFIG_IBM_NEW_EMAC_ZMII
2520 dev->features |= EMAC_FTR_HAS_ZMII;
2521#else
2522 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2523 np->full_name);
2524 return -ENXIO;
2525#endif
2526 }
2527
2528 if (dev->rgmii_ph != 0) {
2529#ifdef CONFIG_IBM_NEW_EMAC_RGMII
2530 dev->features |= EMAC_FTR_HAS_RGMII;
2531#else
2532 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2533 np->full_name);
2534 return -ENXIO;
2535#endif
2536 }
2537
2538 /* Read MAC-address */
2539 p = of_get_property(np, "local-mac-address", NULL);
2540 if (p == NULL) {
2541 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2542 np->full_name);
2543 return -ENXIO;
2544 }
2545 memcpy(dev->ndev->dev_addr, p, 6);
2546
2547 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2548 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2549 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2550 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2551 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2552
2553 return 0;
2554}
2555
2556static int __devinit emac_probe(struct of_device *ofdev,
2557 const struct of_device_id *match)
2558{
2559 struct net_device *ndev;
2560 struct emac_instance *dev;
2561 struct device_node *np = ofdev->node;
2562 struct device_node **blist = NULL;
2563 int err, i;
2564
3d722562
HB
2565 /* Skip unused/unwired EMACS */
2566 if (of_get_property(np, "unused", NULL))
2567 return -ENODEV;
2568
1d3bb996
DG
2569 /* Find ourselves in the bootlist if we are there */
2570 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2571 if (emac_boot_list[i] == np)
2572 blist = &emac_boot_list[i];
2573
2574 /* Allocate our net_device structure */
2575 err = -ENOMEM;
2576 ndev = alloc_etherdev(sizeof(struct emac_instance));
2577 if (!ndev) {
2578 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2579 np->full_name);
2580 goto err_gone;
2581 }
2582 dev = netdev_priv(ndev);
2583 dev->ndev = ndev;
2584 dev->ofdev = ofdev;
2585 dev->blist = blist;
1d3bb996
DG
2586 SET_NETDEV_DEV(ndev, &ofdev->dev);
2587
2588 /* Initialize some embedded data structures */
2589 mutex_init(&dev->mdio_lock);
2590 mutex_init(&dev->link_lock);
2591 spin_lock_init(&dev->lock);
2592 INIT_WORK(&dev->reset_work, emac_reset_work);
2593
2594 /* Init various config data based on device-tree */
2595 err = emac_init_config(dev);
2596 if (err != 0)
2597 goto err_free;
2598
2599 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2600 dev->emac_irq = irq_of_parse_and_map(np, 0);
2601 dev->wol_irq = irq_of_parse_and_map(np, 1);
2602 if (dev->emac_irq == NO_IRQ) {
2603 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2604 goto err_free;
2605 }
2606 ndev->irq = dev->emac_irq;
2607
2608 /* Map EMAC regs */
2609 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2610 printk(KERN_ERR "%s: Can't get registers address\n",
2611 np->full_name);
2612 goto err_irq_unmap;
2613 }
2614 // TODO : request_mem_region
2615 dev->emacp = ioremap(dev->rsrc_regs.start, sizeof(struct emac_regs));
2616 if (dev->emacp == NULL) {
2617 printk(KERN_ERR "%s: Can't map device registers!\n",
2618 np->full_name);
2619 err = -ENOMEM;
2620 goto err_irq_unmap;
2621 }
2622
2623 /* Wait for dependent devices */
2624 err = emac_wait_deps(dev);
2625 if (err) {
2626 printk(KERN_ERR
2627 "%s: Timeout waiting for dependent devices\n",
2628 np->full_name);
2629 /* display more info about what's missing ? */
2630 goto err_reg_unmap;
2631 }
2632 dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2633 if (dev->mdio_dev != NULL)
2634 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2635
2636 /* Register with MAL */
2637 dev->commac.ops = &emac_commac_ops;
2638 dev->commac.dev = dev;
2639 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2640 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2641 err = mal_register_commac(dev->mal, &dev->commac);
2642 if (err) {
2643 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2644 np->full_name, dev->mal_dev->node->full_name);
2645 goto err_rel_deps;
2646 }
2647 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2648 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2649
2650 /* Get pointers to BD rings */
2651 dev->tx_desc =
2652 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2653 dev->rx_desc =
2654 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2655
2656 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2657 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2658
2659 /* Clean rings */
2660 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2661 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2662
2663 /* Attach to ZMII, if needed */
2664 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2665 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2666 goto err_unreg_commac;
2667
2668 /* Attach to RGMII, if needed */
2669 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2670 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2671 goto err_detach_zmii;
2672
2673 /* Attach to TAH, if needed */
2674 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2675 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2676 goto err_detach_rgmii;
2677
2678 /* Set some link defaults before we can find out real parameters */
2679 dev->phy.speed = SPEED_100;
2680 dev->phy.duplex = DUPLEX_FULL;
2681 dev->phy.autoneg = AUTONEG_DISABLE;
2682 dev->phy.pause = dev->phy.asym_pause = 0;
2683 dev->stop_timeout = STOP_TIMEOUT_100;
2684 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2685
2686 /* Find PHY if any */
2687 err = emac_init_phy(dev);
2688 if (err != 0)
2689 goto err_detach_tah;
2690
2691 /* Fill in the driver function table */
2692 ndev->open = &emac_open;
ee63d22b 2693 if (dev->tah_dev)
1d3bb996 2694 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
1d3bb996
DG
2695 ndev->tx_timeout = &emac_tx_timeout;
2696 ndev->watchdog_timeo = 5 * HZ;
2697 ndev->stop = &emac_close;
2698 ndev->get_stats = &emac_stats;
2699 ndev->set_multicast_list = &emac_set_multicast_list;
2700 ndev->do_ioctl = &emac_ioctl;
2701 if (emac_phy_supports_gige(dev->phy_mode)) {
ee63d22b 2702 ndev->hard_start_xmit = &emac_start_xmit_sg;
1d3bb996
DG
2703 ndev->change_mtu = &emac_change_mtu;
2704 dev->commac.ops = &emac_commac_sg_ops;
ee63d22b
SR
2705 } else {
2706 ndev->hard_start_xmit = &emac_start_xmit;
1d3bb996
DG
2707 }
2708 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2709
2710 netif_carrier_off(ndev);
2711 netif_stop_queue(ndev);
2712
2713 err = register_netdev(ndev);
2714 if (err) {
2715 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2716 np->full_name, err);
2717 goto err_detach_tah;
2718 }
2719
2720 /* Set our drvdata last as we don't want them visible until we are
2721 * fully initialized
2722 */
2723 wmb();
2724 dev_set_drvdata(&ofdev->dev, dev);
2725
2726 /* There's a new kid in town ! Let's tell everybody */
2727 wake_up_all(&emac_probe_wait);
2728
2729
2730 printk(KERN_INFO
2731 "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2732 ndev->name, dev->cell_index, np->full_name,
2733 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2734 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2735
2736 if (dev->phy.address >= 0)
2737 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2738 dev->phy.def->name, dev->phy.address);
2739
2740 emac_dbg_register(dev);
2741
2742 /* Life is good */
2743 return 0;
2744
2745 /* I have a bad feeling about this ... */
2746
2747 err_detach_tah:
2748 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2749 tah_detach(dev->tah_dev, dev->tah_port);
2750 err_detach_rgmii:
2751 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2752 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2753 err_detach_zmii:
2754 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2755 zmii_detach(dev->zmii_dev, dev->zmii_port);
2756 err_unreg_commac:
2757 mal_unregister_commac(dev->mal, &dev->commac);
2758 err_rel_deps:
2759 emac_put_deps(dev);
2760 err_reg_unmap:
2761 iounmap(dev->emacp);
2762 err_irq_unmap:
2763 if (dev->wol_irq != NO_IRQ)
2764 irq_dispose_mapping(dev->wol_irq);
2765 if (dev->emac_irq != NO_IRQ)
2766 irq_dispose_mapping(dev->emac_irq);
2767 err_free:
2768 kfree(ndev);
2769 err_gone:
2770 /* if we were on the bootlist, remove us as we won't show up and
2771 * wake up all waiters to notify them in case they were waiting
2772 * on us
2773 */
2774 if (blist) {
2775 *blist = NULL;
2776 wake_up_all(&emac_probe_wait);
2777 }
2778 return err;
2779}
2780
2781static int __devexit emac_remove(struct of_device *ofdev)
2782{
2783 struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2784
2785 DBG(dev, "remove" NL);
2786
2787 dev_set_drvdata(&ofdev->dev, NULL);
2788
2789 unregister_netdev(dev->ndev);
2790
61dbcece
BH
2791 flush_scheduled_work();
2792
1d3bb996
DG
2793 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2794 tah_detach(dev->tah_dev, dev->tah_port);
2795 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2796 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2797 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2798 zmii_detach(dev->zmii_dev, dev->zmii_port);
2799
2800 mal_unregister_commac(dev->mal, &dev->commac);
2801 emac_put_deps(dev);
2802
2803 emac_dbg_unregister(dev);
2804 iounmap(dev->emacp);
2805
2806 if (dev->wol_irq != NO_IRQ)
2807 irq_dispose_mapping(dev->wol_irq);
2808 if (dev->emac_irq != NO_IRQ)
2809 irq_dispose_mapping(dev->emac_irq);
2810
2811 kfree(dev->ndev);
2812
2813 return 0;
2814}
2815
2816/* XXX Features in here should be replaced by properties... */
2817static struct of_device_id emac_match[] =
2818{
2819 {
2820 .type = "network",
2821 .compatible = "ibm,emac",
2822 },
2823 {
2824 .type = "network",
2825 .compatible = "ibm,emac4",
2826 },
2827 {},
2828};
2829
2830static struct of_platform_driver emac_driver = {
2831 .name = "emac",
2832 .match_table = emac_match,
2833
2834 .probe = emac_probe,
2835 .remove = emac_remove,
2836};
2837
2838static void __init emac_make_bootlist(void)
2839{
2840 struct device_node *np = NULL;
2841 int j, max, i = 0, k;
2842 int cell_indices[EMAC_BOOT_LIST_SIZE];
2843
2844 /* Collect EMACs */
2845 while((np = of_find_all_nodes(np)) != NULL) {
2846 const u32 *idx;
2847
2848 if (of_match_node(emac_match, np) == NULL)
2849 continue;
2850 if (of_get_property(np, "unused", NULL))
2851 continue;
2852 idx = of_get_property(np, "cell-index", NULL);
2853 if (idx == NULL)
2854 continue;
2855 cell_indices[i] = *idx;
2856 emac_boot_list[i++] = of_node_get(np);
2857 if (i >= EMAC_BOOT_LIST_SIZE) {
2858 of_node_put(np);
2859 break;
2860 }
2861 }
2862 max = i;
2863
2864 /* Bubble sort them (doh, what a creative algorithm :-) */
2865 for (i = 0; max > 1 && (i < (max - 1)); i++)
2866 for (j = i; j < max; j++) {
2867 if (cell_indices[i] > cell_indices[j]) {
2868 np = emac_boot_list[i];
2869 emac_boot_list[i] = emac_boot_list[j];
2870 emac_boot_list[j] = np;
2871 k = cell_indices[i];
2872 cell_indices[i] = cell_indices[j];
2873 cell_indices[j] = k;
2874 }
2875 }
2876}
2877
2878static int __init emac_init(void)
2879{
2880 int rc;
2881
2882 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2883
2884 /* Init debug stuff */
2885 emac_init_debug();
2886
2887 /* Build EMAC boot list */
2888 emac_make_bootlist();
2889
2890 /* Init submodules */
2891 rc = mal_init();
2892 if (rc)
2893 goto err;
2894 rc = zmii_init();
2895 if (rc)
2896 goto err_mal;
2897 rc = rgmii_init();
2898 if (rc)
2899 goto err_zmii;
2900 rc = tah_init();
2901 if (rc)
2902 goto err_rgmii;
2903 rc = of_register_platform_driver(&emac_driver);
2904 if (rc)
2905 goto err_tah;
2906
2907 return 0;
2908
2909 err_tah:
2910 tah_exit();
2911 err_rgmii:
2912 rgmii_exit();
2913 err_zmii:
2914 zmii_exit();
2915 err_mal:
2916 mal_exit();
2917 err:
2918 return rc;
2919}
2920
2921static void __exit emac_exit(void)
2922{
2923 int i;
2924
2925 of_unregister_platform_driver(&emac_driver);
2926
2927 tah_exit();
2928 rgmii_exit();
2929 zmii_exit();
2930 mal_exit();
2931 emac_fini_debug();
2932
2933 /* Destroy EMAC boot list */
2934 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2935 if (emac_boot_list[i])
2936 of_node_put(emac_boot_list[i]);
2937}
2938
2939module_init(emac_init);
2940module_exit(emac_exit);