]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/ibm_newemac/core.c
endianness annotations in arm io.h
[net-next-2.6.git] / drivers / net / ibm_newemac / core.c
CommitLineData
1d3bb996
DG
1/*
2 * drivers/net/ibm_newemac/core.c
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller.
5 *
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
8 *
9 * Based on original work by
10 * Matt Porter <mporter@kernel.crashing.org>
11 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
12 * Armin Kuster <akuster@mvista.com>
13 * Johnnie Peters <jpeters@mvista.com>
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 *
20 */
21
22#include <linux/sched.h>
23#include <linux/string.h>
24#include <linux/errno.h>
25#include <linux/delay.h>
26#include <linux/types.h>
27#include <linux/pci.h>
28#include <linux/etherdevice.h>
29#include <linux/skbuff.h>
30#include <linux/crc32.h>
31#include <linux/ethtool.h>
32#include <linux/mii.h>
33#include <linux/bitops.h>
34#include <linux/workqueue.h>
35
36#include <asm/processor.h>
37#include <asm/io.h>
38#include <asm/dma.h>
39#include <asm/uaccess.h>
40
41#include "core.h"
42
43/*
44 * Lack of dma_unmap_???? calls is intentional.
45 *
46 * API-correct usage requires additional support state information to be
47 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
48 * EMAC design (e.g. TX buffer passed from network stack can be split into
49 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
50 * maintaining such information will add additional overhead.
51 * Current DMA API implementation for 4xx processors only ensures cache coherency
52 * and dma_unmap_???? routines are empty and are likely to stay this way.
53 * I decided to omit dma_unmap_??? calls because I don't want to add additional
54 * complexity just for the sake of following some abstract API, when it doesn't
55 * add any real benefit to the driver. I understand that this decision maybe
56 * controversial, but I really tried to make code API-correct and efficient
57 * at the same time and didn't come up with code I liked :(. --ebs
58 */
59
60#define DRV_NAME "emac"
61#define DRV_VERSION "3.54"
62#define DRV_DESC "PPC 4xx OCP EMAC driver"
63
64MODULE_DESCRIPTION(DRV_DESC);
65MODULE_AUTHOR
66 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
67MODULE_LICENSE("GPL");
68
69/*
70 * PPC64 doesn't (yet) have a cacheable_memcpy
71 */
72#ifdef CONFIG_PPC64
73#define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
74#endif
75
76/* minimum number of free TX descriptors required to wake up TX process */
77#define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
78
79/* If packet size is less than this number, we allocate small skb and copy packet
80 * contents into it instead of just sending original big skb up
81 */
82#define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
83
84/* Since multiple EMACs share MDIO lines in various ways, we need
85 * to avoid re-using the same PHY ID in cases where the arch didn't
86 * setup precise phy_map entries
87 *
88 * XXX This is something that needs to be reworked as we can have multiple
89 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
90 * probably require in that case to have explicit PHY IDs in the device-tree
91 */
92static u32 busy_phy_map;
93static DEFINE_MUTEX(emac_phy_map_lock);
94
95/* This is the wait queue used to wait on any event related to probe, that
96 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
97 */
98static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
99
100/* Having stable interface names is a doomed idea. However, it would be nice
101 * if we didn't have completely random interface names at boot too :-) It's
102 * just a matter of making everybody's life easier. Since we are doing
103 * threaded probing, it's a bit harder though. The base idea here is that
104 * we make up a list of all emacs in the device-tree before we register the
105 * driver. Every emac will then wait for the previous one in the list to
106 * initialize before itself. We should also keep that list ordered by
107 * cell_index.
108 * That list is only 4 entries long, meaning that additional EMACs don't
109 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
110 */
111
112#define EMAC_BOOT_LIST_SIZE 4
113static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
114
115/* How long should I wait for dependent devices ? */
116#define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
117
118/* I don't want to litter system log with timeout errors
119 * when we have brain-damaged PHY.
120 */
121static inline void emac_report_timeout_error(struct emac_instance *dev,
122 const char *error)
123{
124 if (net_ratelimit())
125 printk(KERN_ERR "%s: %s\n", dev->ndev->name, error);
126}
127
128/* PHY polling intervals */
129#define PHY_POLL_LINK_ON HZ
130#define PHY_POLL_LINK_OFF (HZ / 5)
131
132/* Graceful stop timeouts in us.
133 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
134 */
135#define STOP_TIMEOUT_10 1230
136#define STOP_TIMEOUT_100 124
137#define STOP_TIMEOUT_1000 13
138#define STOP_TIMEOUT_1000_JUMBO 73
139
140/* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
141static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
142 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
143 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
144 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
145 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
146 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
147 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
148 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
149 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
150 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
151 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
152 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
153 "tx_bd_excessive_collisions", "tx_bd_late_collision",
154 "tx_bd_multple_collisions", "tx_bd_single_collision",
155 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
156 "tx_errors"
157};
158
159static irqreturn_t emac_irq(int irq, void *dev_instance);
160static void emac_clean_tx_ring(struct emac_instance *dev);
161static void __emac_set_multicast_list(struct emac_instance *dev);
162
163static inline int emac_phy_supports_gige(int phy_mode)
164{
165 return phy_mode == PHY_MODE_GMII ||
166 phy_mode == PHY_MODE_RGMII ||
167 phy_mode == PHY_MODE_TBI ||
168 phy_mode == PHY_MODE_RTBI;
169}
170
171static inline int emac_phy_gpcs(int phy_mode)
172{
173 return phy_mode == PHY_MODE_TBI ||
174 phy_mode == PHY_MODE_RTBI;
175}
176
177static inline void emac_tx_enable(struct emac_instance *dev)
178{
179 struct emac_regs __iomem *p = dev->emacp;
180 u32 r;
181
182 DBG(dev, "tx_enable" NL);
183
184 r = in_be32(&p->mr0);
185 if (!(r & EMAC_MR0_TXE))
186 out_be32(&p->mr0, r | EMAC_MR0_TXE);
187}
188
189static void emac_tx_disable(struct emac_instance *dev)
190{
191 struct emac_regs __iomem *p = dev->emacp;
192 u32 r;
193
194 DBG(dev, "tx_disable" NL);
195
196 r = in_be32(&p->mr0);
197 if (r & EMAC_MR0_TXE) {
198 int n = dev->stop_timeout;
199 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
200 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
201 udelay(1);
202 --n;
203 }
204 if (unlikely(!n))
205 emac_report_timeout_error(dev, "TX disable timeout");
206 }
207}
208
209static void emac_rx_enable(struct emac_instance *dev)
210{
211 struct emac_regs __iomem *p = dev->emacp;
212 u32 r;
213
214 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
215 goto out;
216
217 DBG(dev, "rx_enable" NL);
218
219 r = in_be32(&p->mr0);
220 if (!(r & EMAC_MR0_RXE)) {
221 if (unlikely(!(r & EMAC_MR0_RXI))) {
222 /* Wait if previous async disable is still in progress */
223 int n = dev->stop_timeout;
224 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
225 udelay(1);
226 --n;
227 }
228 if (unlikely(!n))
229 emac_report_timeout_error(dev,
230 "RX disable timeout");
231 }
232 out_be32(&p->mr0, r | EMAC_MR0_RXE);
233 }
234 out:
235 ;
236}
237
238static void emac_rx_disable(struct emac_instance *dev)
239{
240 struct emac_regs __iomem *p = dev->emacp;
241 u32 r;
242
243 DBG(dev, "rx_disable" NL);
244
245 r = in_be32(&p->mr0);
246 if (r & EMAC_MR0_RXE) {
247 int n = dev->stop_timeout;
248 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
249 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
250 udelay(1);
251 --n;
252 }
253 if (unlikely(!n))
254 emac_report_timeout_error(dev, "RX disable timeout");
255 }
256}
257
258static inline void emac_netif_stop(struct emac_instance *dev)
259{
260 netif_tx_lock_bh(dev->ndev);
261 dev->no_mcast = 1;
262 netif_tx_unlock_bh(dev->ndev);
263 dev->ndev->trans_start = jiffies; /* prevent tx timeout */
264 mal_poll_disable(dev->mal, &dev->commac);
265 netif_tx_disable(dev->ndev);
266}
267
268static inline void emac_netif_start(struct emac_instance *dev)
269{
270 netif_tx_lock_bh(dev->ndev);
271 dev->no_mcast = 0;
272 if (dev->mcast_pending && netif_running(dev->ndev))
273 __emac_set_multicast_list(dev);
274 netif_tx_unlock_bh(dev->ndev);
275
276 netif_wake_queue(dev->ndev);
277
278 /* NOTE: unconditional netif_wake_queue is only appropriate
279 * so long as all callers are assured to have free tx slots
280 * (taken from tg3... though the case where that is wrong is
281 * not terribly harmful)
282 */
283 mal_poll_enable(dev->mal, &dev->commac);
284}
285
286static inline void emac_rx_disable_async(struct emac_instance *dev)
287{
288 struct emac_regs __iomem *p = dev->emacp;
289 u32 r;
290
291 DBG(dev, "rx_disable_async" NL);
292
293 r = in_be32(&p->mr0);
294 if (r & EMAC_MR0_RXE)
295 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
296}
297
298static int emac_reset(struct emac_instance *dev)
299{
300 struct emac_regs __iomem *p = dev->emacp;
301 int n = 20;
302
303 DBG(dev, "reset" NL);
304
305 if (!dev->reset_failed) {
306 /* 40x erratum suggests stopping RX channel before reset,
307 * we stop TX as well
308 */
309 emac_rx_disable(dev);
310 emac_tx_disable(dev);
311 }
312
313 out_be32(&p->mr0, EMAC_MR0_SRST);
314 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
315 --n;
316
317 if (n) {
318 dev->reset_failed = 0;
319 return 0;
320 } else {
321 emac_report_timeout_error(dev, "reset timeout");
322 dev->reset_failed = 1;
323 return -ETIMEDOUT;
324 }
325}
326
327static void emac_hash_mc(struct emac_instance *dev)
328{
329 struct emac_regs __iomem *p = dev->emacp;
330 u16 gaht[4] = { 0 };
331 struct dev_mc_list *dmi;
332
333 DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
334
335 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
336 int bit;
337 DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
338 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
339 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
340
341 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
342 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
343 }
344 out_be32(&p->gaht1, gaht[0]);
345 out_be32(&p->gaht2, gaht[1]);
346 out_be32(&p->gaht3, gaht[2]);
347 out_be32(&p->gaht4, gaht[3]);
348}
349
350static inline u32 emac_iff2rmr(struct net_device *ndev)
351{
352 struct emac_instance *dev = netdev_priv(ndev);
353 u32 r;
354
355 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
356
357 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
358 r |= EMAC4_RMR_BASE;
359 else
360 r |= EMAC_RMR_BASE;
361
362 if (ndev->flags & IFF_PROMISC)
363 r |= EMAC_RMR_PME;
364 else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
365 r |= EMAC_RMR_PMME;
366 else if (ndev->mc_count > 0)
367 r |= EMAC_RMR_MAE;
368
369 return r;
370}
371
372static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
373{
374 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
375
376 DBG2(dev, "__emac_calc_base_mr1" NL);
377
378 switch(tx_size) {
379 case 2048:
380 ret |= EMAC_MR1_TFS_2K;
381 break;
382 default:
383 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
384 dev->ndev->name, tx_size);
385 }
386
387 switch(rx_size) {
388 case 16384:
389 ret |= EMAC_MR1_RFS_16K;
390 break;
391 case 4096:
392 ret |= EMAC_MR1_RFS_4K;
393 break;
394 default:
395 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
396 dev->ndev->name, rx_size);
397 }
398
399 return ret;
400}
401
402static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
403{
404 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
405 EMAC4_MR1_OBCI(dev->opb_bus_freq);
406
407 DBG2(dev, "__emac4_calc_base_mr1" NL);
408
409 switch(tx_size) {
410 case 4096:
411 ret |= EMAC4_MR1_TFS_4K;
412 break;
413 case 2048:
414 ret |= EMAC4_MR1_TFS_2K;
415 break;
416 default:
417 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
418 dev->ndev->name, tx_size);
419 }
420
421 switch(rx_size) {
422 case 16384:
423 ret |= EMAC4_MR1_RFS_16K;
424 break;
425 case 4096:
426 ret |= EMAC4_MR1_RFS_4K;
427 break;
428 case 2048:
429 ret |= EMAC4_MR1_RFS_2K;
430 break;
431 default:
432 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
433 dev->ndev->name, rx_size);
434 }
435
436 return ret;
437}
438
439static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
440{
441 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
442 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
443 __emac_calc_base_mr1(dev, tx_size, rx_size);
444}
445
446static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
447{
448 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
449 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
450 else
451 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
452}
453
454static inline u32 emac_calc_rwmr(struct emac_instance *dev,
455 unsigned int low, unsigned int high)
456{
457 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
458 return (low << 22) | ( (high & 0x3ff) << 6);
459 else
460 return (low << 23) | ( (high & 0x1ff) << 7);
461}
462
463static int emac_configure(struct emac_instance *dev)
464{
465 struct emac_regs __iomem *p = dev->emacp;
466 struct net_device *ndev = dev->ndev;
467 int tx_size, rx_size;
468 u32 r, mr1 = 0;
469
470 DBG(dev, "configure" NL);
471
472 if (emac_reset(dev) < 0)
473 return -ETIMEDOUT;
474
475 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
476 tah_reset(dev->tah_dev);
477
478 DBG(dev, " duplex = %d, pause = %d, asym_pause = %d\n",
479 dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
480
481 /* Default fifo sizes */
482 tx_size = dev->tx_fifo_size;
483 rx_size = dev->rx_fifo_size;
484
485 /* Check for full duplex */
486 if (dev->phy.duplex == DUPLEX_FULL)
487 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
488
489 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
490 dev->stop_timeout = STOP_TIMEOUT_10;
491 switch (dev->phy.speed) {
492 case SPEED_1000:
493 if (emac_phy_gpcs(dev->phy.mode)) {
494 mr1 |= EMAC_MR1_MF_1000GPCS |
495 EMAC_MR1_MF_IPPA(dev->phy.address);
496
497 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
498 * identify this GPCS PHY later.
499 */
500 out_be32(&p->ipcr, 0xdeadbeef);
501 } else
502 mr1 |= EMAC_MR1_MF_1000;
503
504 /* Extended fifo sizes */
505 tx_size = dev->tx_fifo_size_gige;
506 rx_size = dev->rx_fifo_size_gige;
507
508 if (dev->ndev->mtu > ETH_DATA_LEN) {
509 mr1 |= EMAC_MR1_JPSM;
510 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
511 } else
512 dev->stop_timeout = STOP_TIMEOUT_1000;
513 break;
514 case SPEED_100:
515 mr1 |= EMAC_MR1_MF_100;
516 dev->stop_timeout = STOP_TIMEOUT_100;
517 break;
518 default: /* make gcc happy */
519 break;
520 }
521
522 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
523 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
524 dev->phy.speed);
525 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
526 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
527
528 /* on 40x erratum forces us to NOT use integrated flow control,
529 * let's hope it works on 44x ;)
530 */
531 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
532 dev->phy.duplex == DUPLEX_FULL) {
533 if (dev->phy.pause)
534 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
535 else if (dev->phy.asym_pause)
536 mr1 |= EMAC_MR1_APP;
537 }
538
539 /* Add base settings & fifo sizes & program MR1 */
540 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
541 out_be32(&p->mr1, mr1);
542
543 /* Set individual MAC address */
544 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
545 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
546 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
547 ndev->dev_addr[5]);
548
549 /* VLAN Tag Protocol ID */
550 out_be32(&p->vtpid, 0x8100);
551
552 /* Receive mode register */
553 r = emac_iff2rmr(ndev);
554 if (r & EMAC_RMR_MAE)
555 emac_hash_mc(dev);
556 out_be32(&p->rmr, r);
557
558 /* FIFOs thresholds */
559 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
560 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
561 tx_size / 2 / dev->fifo_entry_size);
562 else
563 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
564 tx_size / 2 / dev->fifo_entry_size);
565 out_be32(&p->tmr1, r);
566 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
567
568 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
569 there should be still enough space in FIFO to allow the our link
570 partner time to process this frame and also time to send PAUSE
571 frame itself.
572
573 Here is the worst case scenario for the RX FIFO "headroom"
574 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
575
576 1) One maximum-length frame on TX 1522 bytes
577 2) One PAUSE frame time 64 bytes
578 3) PAUSE frame decode time allowance 64 bytes
579 4) One maximum-length frame on RX 1522 bytes
580 5) Round-trip propagation delay of the link (100Mb) 15 bytes
581 ----------
582 3187 bytes
583
584 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
585 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
586 */
587 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
588 rx_size / 4 / dev->fifo_entry_size);
589 out_be32(&p->rwmr, r);
590
591 /* Set PAUSE timer to the maximum */
592 out_be32(&p->ptr, 0xffff);
593
594 /* IRQ sources */
595 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
596 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
597 EMAC_ISR_IRE | EMAC_ISR_TE;
598 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
599 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
600 EMAC4_ISR_RXOE | */;
601 out_be32(&p->iser, r);
602
603 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
604 if (emac_phy_gpcs(dev->phy.mode))
605 emac_mii_reset_phy(&dev->phy);
606
607 return 0;
608}
609
610static void emac_reinitialize(struct emac_instance *dev)
611{
612 DBG(dev, "reinitialize" NL);
613
614 emac_netif_stop(dev);
615 if (!emac_configure(dev)) {
616 emac_tx_enable(dev);
617 emac_rx_enable(dev);
618 }
619 emac_netif_start(dev);
620}
621
622static void emac_full_tx_reset(struct emac_instance *dev)
623{
624 DBG(dev, "full_tx_reset" NL);
625
626 emac_tx_disable(dev);
627 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
628 emac_clean_tx_ring(dev);
629 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
630
631 emac_configure(dev);
632
633 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
634 emac_tx_enable(dev);
635 emac_rx_enable(dev);
636}
637
638static void emac_reset_work(struct work_struct *work)
639{
640 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
641
642 DBG(dev, "reset_work" NL);
643
644 mutex_lock(&dev->link_lock);
645 emac_netif_stop(dev);
646 emac_full_tx_reset(dev);
647 emac_netif_start(dev);
648 mutex_unlock(&dev->link_lock);
649}
650
651static void emac_tx_timeout(struct net_device *ndev)
652{
653 struct emac_instance *dev = netdev_priv(ndev);
654
655 DBG(dev, "tx_timeout" NL);
656
657 schedule_work(&dev->reset_work);
658}
659
660
661static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
662{
663 int done = !!(stacr & EMAC_STACR_OC);
664
665 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
666 done = !done;
667
668 return done;
669};
670
671static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
672{
673 struct emac_regs __iomem *p = dev->emacp;
674 u32 r = 0;
675 int n, err = -ETIMEDOUT;
676
677 mutex_lock(&dev->mdio_lock);
678
679 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
680
681 /* Enable proper MDIO port */
682 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
683 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
684 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
685 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
686
687 /* Wait for management interface to become idle */
688 n = 10;
689 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
690 udelay(1);
691 if (!--n) {
692 DBG2(dev, " -> timeout wait idle\n");
693 goto bail;
694 }
695 }
696
697 /* Issue read command */
698 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
699 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
700 else
701 r = EMAC_STACR_BASE(dev->opb_bus_freq);
702 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
703 r |= EMAC_STACR_OC;
704 if (emac_has_feature(dev, EMAC_FTR_HAS_AXON_STACR))
705 r |= EMACX_STACR_STAC_READ;
706 else
707 r |= EMAC_STACR_STAC_READ;
708 r |= (reg & EMAC_STACR_PRA_MASK)
709 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
710 out_be32(&p->stacr, r);
711
712 /* Wait for read to complete */
713 n = 100;
714 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
715 udelay(1);
716 if (!--n) {
717 DBG2(dev, " -> timeout wait complete\n");
718 goto bail;
719 }
720 }
721
722 if (unlikely(r & EMAC_STACR_PHYE)) {
723 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
724 err = -EREMOTEIO;
725 goto bail;
726 }
727
728 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
729
730 DBG2(dev, "mdio_read -> %04x" NL, r);
731 err = 0;
732 bail:
733 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
734 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
735 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
736 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
737 mutex_unlock(&dev->mdio_lock);
738
739 return err == 0 ? r : err;
740}
741
742static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
743 u16 val)
744{
745 struct emac_regs __iomem *p = dev->emacp;
746 u32 r = 0;
747 int n, err = -ETIMEDOUT;
748
749 mutex_lock(&dev->mdio_lock);
750
751 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
752
753 /* Enable proper MDIO port */
754 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
755 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
756 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
757 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
758
759 /* Wait for management interface to be idle */
760 n = 10;
761 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
762 udelay(1);
763 if (!--n) {
764 DBG2(dev, " -> timeout wait idle\n");
765 goto bail;
766 }
767 }
768
769 /* Issue write command */
770 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
771 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
772 else
773 r = EMAC_STACR_BASE(dev->opb_bus_freq);
774 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
775 r |= EMAC_STACR_OC;
776 if (emac_has_feature(dev, EMAC_FTR_HAS_AXON_STACR))
777 r |= EMACX_STACR_STAC_WRITE;
778 else
779 r |= EMAC_STACR_STAC_WRITE;
780 r |= (reg & EMAC_STACR_PRA_MASK) |
781 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
782 (val << EMAC_STACR_PHYD_SHIFT);
783 out_be32(&p->stacr, r);
784
785 /* Wait for write to complete */
786 n = 100;
787 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
788 udelay(1);
789 if (!--n) {
790 DBG2(dev, " -> timeout wait complete\n");
791 goto bail;
792 }
793 }
794 err = 0;
795 bail:
796 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
797 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
798 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
799 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
800 mutex_unlock(&dev->mdio_lock);
801}
802
803static int emac_mdio_read(struct net_device *ndev, int id, int reg)
804{
805 struct emac_instance *dev = netdev_priv(ndev);
806 int res;
807
808 res = __emac_mdio_read(dev->mdio_instance ? dev->mdio_instance : dev,
809 (u8) id, (u8) reg);
810 return res;
811}
812
813static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
814{
815 struct emac_instance *dev = netdev_priv(ndev);
816
817 __emac_mdio_write(dev->mdio_instance ? dev->mdio_instance : dev,
818 (u8) id, (u8) reg, (u16) val);
819}
820
821/* Tx lock BH */
822static void __emac_set_multicast_list(struct emac_instance *dev)
823{
824 struct emac_regs __iomem *p = dev->emacp;
825 u32 rmr = emac_iff2rmr(dev->ndev);
826
827 DBG(dev, "__multicast %08x" NL, rmr);
828
829 /* I decided to relax register access rules here to avoid
830 * full EMAC reset.
831 *
832 * There is a real problem with EMAC4 core if we use MWSW_001 bit
833 * in MR1 register and do a full EMAC reset.
834 * One TX BD status update is delayed and, after EMAC reset, it
835 * never happens, resulting in TX hung (it'll be recovered by TX
836 * timeout handler eventually, but this is just gross).
837 * So we either have to do full TX reset or try to cheat here :)
838 *
839 * The only required change is to RX mode register, so I *think* all
840 * we need is just to stop RX channel. This seems to work on all
841 * tested SoCs. --ebs
842 *
843 * If we need the full reset, we might just trigger the workqueue
844 * and do it async... a bit nasty but should work --BenH
845 */
846 dev->mcast_pending = 0;
847 emac_rx_disable(dev);
848 if (rmr & EMAC_RMR_MAE)
849 emac_hash_mc(dev);
850 out_be32(&p->rmr, rmr);
851 emac_rx_enable(dev);
852}
853
854/* Tx lock BH */
855static void emac_set_multicast_list(struct net_device *ndev)
856{
857 struct emac_instance *dev = netdev_priv(ndev);
858
859 DBG(dev, "multicast" NL);
860
861 BUG_ON(!netif_running(dev->ndev));
862
863 if (dev->no_mcast) {
864 dev->mcast_pending = 1;
865 return;
866 }
867 __emac_set_multicast_list(dev);
868}
869
870static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
871{
872 int rx_sync_size = emac_rx_sync_size(new_mtu);
873 int rx_skb_size = emac_rx_skb_size(new_mtu);
874 int i, ret = 0;
875
876 mutex_lock(&dev->link_lock);
877 emac_netif_stop(dev);
878 emac_rx_disable(dev);
879 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
880
881 if (dev->rx_sg_skb) {
882 ++dev->estats.rx_dropped_resize;
883 dev_kfree_skb(dev->rx_sg_skb);
884 dev->rx_sg_skb = NULL;
885 }
886
887 /* Make a first pass over RX ring and mark BDs ready, dropping
888 * non-processed packets on the way. We need this as a separate pass
889 * to simplify error recovery in the case of allocation failure later.
890 */
891 for (i = 0; i < NUM_RX_BUFF; ++i) {
892 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
893 ++dev->estats.rx_dropped_resize;
894
895 dev->rx_desc[i].data_len = 0;
896 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
897 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
898 }
899
900 /* Reallocate RX ring only if bigger skb buffers are required */
901 if (rx_skb_size <= dev->rx_skb_size)
902 goto skip;
903
904 /* Second pass, allocate new skbs */
905 for (i = 0; i < NUM_RX_BUFF; ++i) {
906 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
907 if (!skb) {
908 ret = -ENOMEM;
909 goto oom;
910 }
911
912 BUG_ON(!dev->rx_skb[i]);
913 dev_kfree_skb(dev->rx_skb[i]);
914
915 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
916 dev->rx_desc[i].data_ptr =
917 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
918 DMA_FROM_DEVICE) + 2;
919 dev->rx_skb[i] = skb;
920 }
921 skip:
922 /* Check if we need to change "Jumbo" bit in MR1 */
923 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
924 /* This is to prevent starting RX channel in emac_rx_enable() */
925 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
926
927 dev->ndev->mtu = new_mtu;
928 emac_full_tx_reset(dev);
929 }
930
931 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
932 oom:
933 /* Restart RX */
934 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
935 dev->rx_slot = 0;
936 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
937 emac_rx_enable(dev);
938 emac_netif_start(dev);
939 mutex_unlock(&dev->link_lock);
940
941 return ret;
942}
943
944/* Process ctx, rtnl_lock semaphore */
945static int emac_change_mtu(struct net_device *ndev, int new_mtu)
946{
947 struct emac_instance *dev = netdev_priv(ndev);
948 int ret = 0;
949
950 if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
951 return -EINVAL;
952
953 DBG(dev, "change_mtu(%d)" NL, new_mtu);
954
955 if (netif_running(ndev)) {
956 /* Check if we really need to reinitalize RX ring */
957 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
958 ret = emac_resize_rx_ring(dev, new_mtu);
959 }
960
961 if (!ret) {
962 ndev->mtu = new_mtu;
963 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
964 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
965 }
966
967 return ret;
968}
969
970static void emac_clean_tx_ring(struct emac_instance *dev)
971{
972 int i;
973
974 for (i = 0; i < NUM_TX_BUFF; ++i) {
975 if (dev->tx_skb[i]) {
976 dev_kfree_skb(dev->tx_skb[i]);
977 dev->tx_skb[i] = NULL;
978 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
979 ++dev->estats.tx_dropped;
980 }
981 dev->tx_desc[i].ctrl = 0;
982 dev->tx_desc[i].data_ptr = 0;
983 }
984}
985
986static void emac_clean_rx_ring(struct emac_instance *dev)
987{
988 int i;
989
990 for (i = 0; i < NUM_RX_BUFF; ++i)
991 if (dev->rx_skb[i]) {
992 dev->rx_desc[i].ctrl = 0;
993 dev_kfree_skb(dev->rx_skb[i]);
994 dev->rx_skb[i] = NULL;
995 dev->rx_desc[i].data_ptr = 0;
996 }
997
998 if (dev->rx_sg_skb) {
999 dev_kfree_skb(dev->rx_sg_skb);
1000 dev->rx_sg_skb = NULL;
1001 }
1002}
1003
1004static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1005 gfp_t flags)
1006{
1007 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1008 if (unlikely(!skb))
1009 return -ENOMEM;
1010
1011 dev->rx_skb[slot] = skb;
1012 dev->rx_desc[slot].data_len = 0;
1013
1014 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1015 dev->rx_desc[slot].data_ptr =
1016 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1017 DMA_FROM_DEVICE) + 2;
1018 wmb();
1019 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1020 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1021
1022 return 0;
1023}
1024
1025static void emac_print_link_status(struct emac_instance *dev)
1026{
1027 if (netif_carrier_ok(dev->ndev))
1028 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1029 dev->ndev->name, dev->phy.speed,
1030 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1031 dev->phy.pause ? ", pause enabled" :
1032 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1033 else
1034 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1035}
1036
1037/* Process ctx, rtnl_lock semaphore */
1038static int emac_open(struct net_device *ndev)
1039{
1040 struct emac_instance *dev = netdev_priv(ndev);
1041 int err, i;
1042
1043 DBG(dev, "open" NL);
1044
1045 /* Setup error IRQ handler */
1046 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1047 if (err) {
1048 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1049 ndev->name, dev->emac_irq);
1050 return err;
1051 }
1052
1053 /* Allocate RX ring */
1054 for (i = 0; i < NUM_RX_BUFF; ++i)
1055 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1056 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1057 ndev->name);
1058 goto oom;
1059 }
1060
1061 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1062 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1063 dev->rx_sg_skb = NULL;
1064
1065 mutex_lock(&dev->link_lock);
1066
1067 /* XXX Start PHY polling now. Shouldn't wr do like sungem instead and
1068 * always poll the PHY even when the iface is down ? That would allow
1069 * things like laptop-net to work. --BenH
1070 */
1071 if (dev->phy.address >= 0) {
1072 int link_poll_interval;
1073 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1074 dev->phy.def->ops->read_link(&dev->phy);
1075 netif_carrier_on(dev->ndev);
1076 link_poll_interval = PHY_POLL_LINK_ON;
1077 } else {
1078 netif_carrier_off(dev->ndev);
1079 link_poll_interval = PHY_POLL_LINK_OFF;
1080 }
1081 dev->link_polling = 1;
1082 wmb();
1083 schedule_delayed_work(&dev->link_work, link_poll_interval);
1084 emac_print_link_status(dev);
1085 } else
1086 netif_carrier_on(dev->ndev);
1087
1088 emac_configure(dev);
1089 mal_poll_add(dev->mal, &dev->commac);
1090 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1091 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1092 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1093 emac_tx_enable(dev);
1094 emac_rx_enable(dev);
1095 emac_netif_start(dev);
1096
1097 mutex_unlock(&dev->link_lock);
1098
1099 return 0;
1100 oom:
1101 emac_clean_rx_ring(dev);
1102 free_irq(dev->emac_irq, dev);
1103
1104 return -ENOMEM;
1105}
1106
1107/* BHs disabled */
1108#if 0
1109static int emac_link_differs(struct emac_instance *dev)
1110{
1111 u32 r = in_be32(&dev->emacp->mr1);
1112
1113 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1114 int speed, pause, asym_pause;
1115
1116 if (r & EMAC_MR1_MF_1000)
1117 speed = SPEED_1000;
1118 else if (r & EMAC_MR1_MF_100)
1119 speed = SPEED_100;
1120 else
1121 speed = SPEED_10;
1122
1123 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1124 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1125 pause = 1;
1126 asym_pause = 0;
1127 break;
1128 case EMAC_MR1_APP:
1129 pause = 0;
1130 asym_pause = 1;
1131 break;
1132 default:
1133 pause = asym_pause = 0;
1134 }
1135 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1136 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1137}
1138#endif
1139
1140static void emac_link_timer(struct work_struct *work)
1141{
1142 struct emac_instance *dev =
1143 container_of((struct delayed_work *)work,
1144 struct emac_instance, link_work);
1145 int link_poll_interval;
1146
1147 mutex_lock(&dev->link_lock);
1148
1149 DBG2(dev, "link timer" NL);
1150
1151 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1152 if (!netif_carrier_ok(dev->ndev)) {
1153 /* Get new link parameters */
1154 dev->phy.def->ops->read_link(&dev->phy);
1155
1156 netif_carrier_on(dev->ndev);
1157 emac_netif_stop(dev);
1158 emac_full_tx_reset(dev);
1159 emac_netif_start(dev);
1160 emac_print_link_status(dev);
1161 }
1162 link_poll_interval = PHY_POLL_LINK_ON;
1163 } else {
1164 if (netif_carrier_ok(dev->ndev)) {
1165 emac_reinitialize(dev);
1166 netif_carrier_off(dev->ndev);
1167 netif_tx_disable(dev->ndev);
1168 emac_print_link_status(dev);
1169 }
1170 link_poll_interval = PHY_POLL_LINK_OFF;
1171 }
1172 schedule_delayed_work(&dev->link_work, link_poll_interval);
1173
1174 mutex_unlock(&dev->link_lock);
1175}
1176
1177static void emac_force_link_update(struct emac_instance *dev)
1178{
1179 netif_carrier_off(dev->ndev);
1180 if (dev->link_polling) {
1181 cancel_rearming_delayed_work(&dev->link_work);
1182 if (dev->link_polling)
1183 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1184 }
1185}
1186
1187/* Process ctx, rtnl_lock semaphore */
1188static int emac_close(struct net_device *ndev)
1189{
1190 struct emac_instance *dev = netdev_priv(ndev);
1191
1192 DBG(dev, "close" NL);
1193
1194 if (dev->phy.address >= 0)
1195 cancel_rearming_delayed_work(&dev->link_work);
1196
1197 emac_netif_stop(dev);
1198 flush_scheduled_work();
1199
1200 emac_rx_disable(dev);
1201 emac_tx_disable(dev);
1202 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1203 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1204 mal_poll_del(dev->mal, &dev->commac);
1205
1206 emac_clean_tx_ring(dev);
1207 emac_clean_rx_ring(dev);
1208
1209 free_irq(dev->emac_irq, dev);
1210
1211 return 0;
1212}
1213
1214static inline u16 emac_tx_csum(struct emac_instance *dev,
1215 struct sk_buff *skb)
1216{
1217 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH &&
1218 skb->ip_summed == CHECKSUM_PARTIAL)) {
1219 ++dev->stats.tx_packets_csum;
1220 return EMAC_TX_CTRL_TAH_CSUM;
1221 }
1222 return 0;
1223}
1224
1225static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1226{
1227 struct emac_regs __iomem *p = dev->emacp;
1228 struct net_device *ndev = dev->ndev;
1229
1230 /* Send the packet out. If the if makes a significant perf
1231 * difference, then we can store the TMR0 value in "dev"
1232 * instead
1233 */
1234 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1d3bb996 1235 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
07c2c76e 1236 else
1237 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1d3bb996
DG
1238
1239 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1240 netif_stop_queue(ndev);
1241 DBG2(dev, "stopped TX queue" NL);
1242 }
1243
1244 ndev->trans_start = jiffies;
1245 ++dev->stats.tx_packets;
1246 dev->stats.tx_bytes += len;
1247
1248 return 0;
1249}
1250
1251/* Tx lock BH */
1252static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1253{
1254 struct emac_instance *dev = netdev_priv(ndev);
1255 unsigned int len = skb->len;
1256 int slot;
1257
1258 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1259 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1260
1261 slot = dev->tx_slot++;
1262 if (dev->tx_slot == NUM_TX_BUFF) {
1263 dev->tx_slot = 0;
1264 ctrl |= MAL_TX_CTRL_WRAP;
1265 }
1266
1267 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1268
1269 dev->tx_skb[slot] = skb;
1270 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1271 skb->data, len,
1272 DMA_TO_DEVICE);
1273 dev->tx_desc[slot].data_len = (u16) len;
1274 wmb();
1275 dev->tx_desc[slot].ctrl = ctrl;
1276
1277 return emac_xmit_finish(dev, len);
1278}
1279
1280#ifdef CONFIG_IBM_NEW_EMAC_TAH
1281static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1282 u32 pd, int len, int last, u16 base_ctrl)
1283{
1284 while (1) {
1285 u16 ctrl = base_ctrl;
1286 int chunk = min(len, MAL_MAX_TX_SIZE);
1287 len -= chunk;
1288
1289 slot = (slot + 1) % NUM_TX_BUFF;
1290
1291 if (last && !len)
1292 ctrl |= MAL_TX_CTRL_LAST;
1293 if (slot == NUM_TX_BUFF - 1)
1294 ctrl |= MAL_TX_CTRL_WRAP;
1295
1296 dev->tx_skb[slot] = NULL;
1297 dev->tx_desc[slot].data_ptr = pd;
1298 dev->tx_desc[slot].data_len = (u16) chunk;
1299 dev->tx_desc[slot].ctrl = ctrl;
1300 ++dev->tx_cnt;
1301
1302 if (!len)
1303 break;
1304
1305 pd += chunk;
1306 }
1307 return slot;
1308}
1309
1310/* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1311static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1312{
1313 struct emac_instance *dev = netdev_priv(ndev);
1314 int nr_frags = skb_shinfo(skb)->nr_frags;
1315 int len = skb->len, chunk;
1316 int slot, i;
1317 u16 ctrl;
1318 u32 pd;
1319
1320 /* This is common "fast" path */
1321 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1322 return emac_start_xmit(skb, ndev);
1323
1324 len -= skb->data_len;
1325
1326 /* Note, this is only an *estimation*, we can still run out of empty
1327 * slots because of the additional fragmentation into
1328 * MAL_MAX_TX_SIZE-sized chunks
1329 */
1330 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1331 goto stop_queue;
1332
1333 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1334 emac_tx_csum(dev, skb);
1335 slot = dev->tx_slot;
1336
1337 /* skb data */
1338 dev->tx_skb[slot] = NULL;
1339 chunk = min(len, MAL_MAX_TX_SIZE);
1340 dev->tx_desc[slot].data_ptr = pd =
1341 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1342 dev->tx_desc[slot].data_len = (u16) chunk;
1343 len -= chunk;
1344 if (unlikely(len))
1345 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1346 ctrl);
1347 /* skb fragments */
1348 for (i = 0; i < nr_frags; ++i) {
1349 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1350 len = frag->size;
1351
1352 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1353 goto undo_frame;
1354
1355 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1356 DMA_TO_DEVICE);
1357
1358 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1359 ctrl);
1360 }
1361
1362 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1363
1364 /* Attach skb to the last slot so we don't release it too early */
1365 dev->tx_skb[slot] = skb;
1366
1367 /* Send the packet out */
1368 if (dev->tx_slot == NUM_TX_BUFF - 1)
1369 ctrl |= MAL_TX_CTRL_WRAP;
1370 wmb();
1371 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1372 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1373
1374 return emac_xmit_finish(dev, skb->len);
1375
1376 undo_frame:
1377 /* Well, too bad. Our previous estimation was overly optimistic.
1378 * Undo everything.
1379 */
1380 while (slot != dev->tx_slot) {
1381 dev->tx_desc[slot].ctrl = 0;
1382 --dev->tx_cnt;
1383 if (--slot < 0)
1384 slot = NUM_TX_BUFF - 1;
1385 }
1386 ++dev->estats.tx_undo;
1387
1388 stop_queue:
1389 netif_stop_queue(ndev);
1390 DBG2(dev, "stopped TX queue" NL);
1391 return 1;
1392}
1393#else
1394# define emac_start_xmit_sg emac_start_xmit
1395#endif /* !defined(CONFIG_IBM_NEW_EMAC_TAH) */
1396
1397/* Tx lock BHs */
1398static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1399{
1400 struct emac_error_stats *st = &dev->estats;
1401
1402 DBG(dev, "BD TX error %04x" NL, ctrl);
1403
1404 ++st->tx_bd_errors;
1405 if (ctrl & EMAC_TX_ST_BFCS)
1406 ++st->tx_bd_bad_fcs;
1407 if (ctrl & EMAC_TX_ST_LCS)
1408 ++st->tx_bd_carrier_loss;
1409 if (ctrl & EMAC_TX_ST_ED)
1410 ++st->tx_bd_excessive_deferral;
1411 if (ctrl & EMAC_TX_ST_EC)
1412 ++st->tx_bd_excessive_collisions;
1413 if (ctrl & EMAC_TX_ST_LC)
1414 ++st->tx_bd_late_collision;
1415 if (ctrl & EMAC_TX_ST_MC)
1416 ++st->tx_bd_multple_collisions;
1417 if (ctrl & EMAC_TX_ST_SC)
1418 ++st->tx_bd_single_collision;
1419 if (ctrl & EMAC_TX_ST_UR)
1420 ++st->tx_bd_underrun;
1421 if (ctrl & EMAC_TX_ST_SQE)
1422 ++st->tx_bd_sqe;
1423}
1424
1425static void emac_poll_tx(void *param)
1426{
1427 struct emac_instance *dev = param;
1428 u32 bad_mask;
1429
1430 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1431
1432 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1433 bad_mask = EMAC_IS_BAD_TX_TAH;
1434 else
1435 bad_mask = EMAC_IS_BAD_TX;
1436
1437 netif_tx_lock_bh(dev->ndev);
1438 if (dev->tx_cnt) {
1439 u16 ctrl;
1440 int slot = dev->ack_slot, n = 0;
1441 again:
1442 ctrl = dev->tx_desc[slot].ctrl;
1443 if (!(ctrl & MAL_TX_CTRL_READY)) {
1444 struct sk_buff *skb = dev->tx_skb[slot];
1445 ++n;
1446
1447 if (skb) {
1448 dev_kfree_skb(skb);
1449 dev->tx_skb[slot] = NULL;
1450 }
1451 slot = (slot + 1) % NUM_TX_BUFF;
1452
1453 if (unlikely(ctrl & bad_mask))
1454 emac_parse_tx_error(dev, ctrl);
1455
1456 if (--dev->tx_cnt)
1457 goto again;
1458 }
1459 if (n) {
1460 dev->ack_slot = slot;
1461 if (netif_queue_stopped(dev->ndev) &&
1462 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1463 netif_wake_queue(dev->ndev);
1464
1465 DBG2(dev, "tx %d pkts" NL, n);
1466 }
1467 }
1468 netif_tx_unlock_bh(dev->ndev);
1469}
1470
1471static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1472 int len)
1473{
1474 struct sk_buff *skb = dev->rx_skb[slot];
1475
1476 DBG2(dev, "recycle %d %d" NL, slot, len);
1477
1478 if (len)
1479 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1480 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1481
1482 dev->rx_desc[slot].data_len = 0;
1483 wmb();
1484 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1485 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1486}
1487
1488static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1489{
1490 struct emac_error_stats *st = &dev->estats;
1491
1492 DBG(dev, "BD RX error %04x" NL, ctrl);
1493
1494 ++st->rx_bd_errors;
1495 if (ctrl & EMAC_RX_ST_OE)
1496 ++st->rx_bd_overrun;
1497 if (ctrl & EMAC_RX_ST_BP)
1498 ++st->rx_bd_bad_packet;
1499 if (ctrl & EMAC_RX_ST_RP)
1500 ++st->rx_bd_runt_packet;
1501 if (ctrl & EMAC_RX_ST_SE)
1502 ++st->rx_bd_short_event;
1503 if (ctrl & EMAC_RX_ST_AE)
1504 ++st->rx_bd_alignment_error;
1505 if (ctrl & EMAC_RX_ST_BFCS)
1506 ++st->rx_bd_bad_fcs;
1507 if (ctrl & EMAC_RX_ST_PTL)
1508 ++st->rx_bd_packet_too_long;
1509 if (ctrl & EMAC_RX_ST_ORE)
1510 ++st->rx_bd_out_of_range;
1511 if (ctrl & EMAC_RX_ST_IRE)
1512 ++st->rx_bd_in_range;
1513}
1514
1515static inline void emac_rx_csum(struct emac_instance *dev,
1516 struct sk_buff *skb, u16 ctrl)
1517{
1518#ifdef CONFIG_IBM_NEW_EMAC_TAH
1519 if (!ctrl && dev->tah_dev) {
1520 skb->ip_summed = CHECKSUM_UNNECESSARY;
1521 ++dev->stats.rx_packets_csum;
1522 }
1523#endif
1524}
1525
1526static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1527{
1528 if (likely(dev->rx_sg_skb != NULL)) {
1529 int len = dev->rx_desc[slot].data_len;
1530 int tot_len = dev->rx_sg_skb->len + len;
1531
1532 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1533 ++dev->estats.rx_dropped_mtu;
1534 dev_kfree_skb(dev->rx_sg_skb);
1535 dev->rx_sg_skb = NULL;
1536 } else {
bef1bc95 1537 cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1d3bb996
DG
1538 dev->rx_skb[slot]->data, len);
1539 skb_put(dev->rx_sg_skb, len);
1540 emac_recycle_rx_skb(dev, slot, len);
1541 return 0;
1542 }
1543 }
1544 emac_recycle_rx_skb(dev, slot, 0);
1545 return -1;
1546}
1547
1548/* NAPI poll context */
1549static int emac_poll_rx(void *param, int budget)
1550{
1551 struct emac_instance *dev = param;
1552 int slot = dev->rx_slot, received = 0;
1553
1554 DBG2(dev, "poll_rx(%d)" NL, budget);
1555
1556 again:
1557 while (budget > 0) {
1558 int len;
1559 struct sk_buff *skb;
1560 u16 ctrl = dev->rx_desc[slot].ctrl;
1561
1562 if (ctrl & MAL_RX_CTRL_EMPTY)
1563 break;
1564
1565 skb = dev->rx_skb[slot];
1566 mb();
1567 len = dev->rx_desc[slot].data_len;
1568
1569 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1570 goto sg;
1571
1572 ctrl &= EMAC_BAD_RX_MASK;
1573 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1574 emac_parse_rx_error(dev, ctrl);
1575 ++dev->estats.rx_dropped_error;
1576 emac_recycle_rx_skb(dev, slot, 0);
1577 len = 0;
1578 goto next;
1579 }
1580
1581 if (len && len < EMAC_RX_COPY_THRESH) {
1582 struct sk_buff *copy_skb =
1583 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1584 if (unlikely(!copy_skb))
1585 goto oom;
1586
1587 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1588 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1589 len + 2);
1590 emac_recycle_rx_skb(dev, slot, len);
1591 skb = copy_skb;
1592 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1593 goto oom;
1594
1595 skb_put(skb, len);
1596 push_packet:
1597 skb->dev = dev->ndev;
1598 skb->protocol = eth_type_trans(skb, dev->ndev);
1599 emac_rx_csum(dev, skb, ctrl);
1600
1601 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1602 ++dev->estats.rx_dropped_stack;
1603 next:
1604 ++dev->stats.rx_packets;
1605 skip:
1606 dev->stats.rx_bytes += len;
1607 slot = (slot + 1) % NUM_RX_BUFF;
1608 --budget;
1609 ++received;
1610 continue;
1611 sg:
1612 if (ctrl & MAL_RX_CTRL_FIRST) {
1613 BUG_ON(dev->rx_sg_skb);
1614 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1615 DBG(dev, "rx OOM %d" NL, slot);
1616 ++dev->estats.rx_dropped_oom;
1617 emac_recycle_rx_skb(dev, slot, 0);
1618 } else {
1619 dev->rx_sg_skb = skb;
1620 skb_put(skb, len);
1621 }
1622 } else if (!emac_rx_sg_append(dev, slot) &&
1623 (ctrl & MAL_RX_CTRL_LAST)) {
1624
1625 skb = dev->rx_sg_skb;
1626 dev->rx_sg_skb = NULL;
1627
1628 ctrl &= EMAC_BAD_RX_MASK;
1629 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1630 emac_parse_rx_error(dev, ctrl);
1631 ++dev->estats.rx_dropped_error;
1632 dev_kfree_skb(skb);
1633 len = 0;
1634 } else
1635 goto push_packet;
1636 }
1637 goto skip;
1638 oom:
1639 DBG(dev, "rx OOM %d" NL, slot);
1640 /* Drop the packet and recycle skb */
1641 ++dev->estats.rx_dropped_oom;
1642 emac_recycle_rx_skb(dev, slot, 0);
1643 goto next;
1644 }
1645
1646 if (received) {
1647 DBG2(dev, "rx %d BDs" NL, received);
1648 dev->rx_slot = slot;
1649 }
1650
1651 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1652 mb();
1653 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1654 DBG2(dev, "rx restart" NL);
1655 received = 0;
1656 goto again;
1657 }
1658
1659 if (dev->rx_sg_skb) {
1660 DBG2(dev, "dropping partial rx packet" NL);
1661 ++dev->estats.rx_dropped_error;
1662 dev_kfree_skb(dev->rx_sg_skb);
1663 dev->rx_sg_skb = NULL;
1664 }
1665
1666 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1667 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1668 emac_rx_enable(dev);
1669 dev->rx_slot = 0;
1670 }
1671 return received;
1672}
1673
1674/* NAPI poll context */
1675static int emac_peek_rx(void *param)
1676{
1677 struct emac_instance *dev = param;
1678
1679 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1680}
1681
1682/* NAPI poll context */
1683static int emac_peek_rx_sg(void *param)
1684{
1685 struct emac_instance *dev = param;
1686
1687 int slot = dev->rx_slot;
1688 while (1) {
1689 u16 ctrl = dev->rx_desc[slot].ctrl;
1690 if (ctrl & MAL_RX_CTRL_EMPTY)
1691 return 0;
1692 else if (ctrl & MAL_RX_CTRL_LAST)
1693 return 1;
1694
1695 slot = (slot + 1) % NUM_RX_BUFF;
1696
1697 /* I'm just being paranoid here :) */
1698 if (unlikely(slot == dev->rx_slot))
1699 return 0;
1700 }
1701}
1702
1703/* Hard IRQ */
1704static void emac_rxde(void *param)
1705{
1706 struct emac_instance *dev = param;
1707
1708 ++dev->estats.rx_stopped;
1709 emac_rx_disable_async(dev);
1710}
1711
1712/* Hard IRQ */
1713static irqreturn_t emac_irq(int irq, void *dev_instance)
1714{
1715 struct emac_instance *dev = dev_instance;
1716 struct emac_regs __iomem *p = dev->emacp;
1717 struct emac_error_stats *st = &dev->estats;
1718 u32 isr;
1719
1720 spin_lock(&dev->lock);
1721
1722 isr = in_be32(&p->isr);
1723 out_be32(&p->isr, isr);
1724
1725 DBG(dev, "isr = %08x" NL, isr);
1726
1727 if (isr & EMAC4_ISR_TXPE)
1728 ++st->tx_parity;
1729 if (isr & EMAC4_ISR_RXPE)
1730 ++st->rx_parity;
1731 if (isr & EMAC4_ISR_TXUE)
1732 ++st->tx_underrun;
1733 if (isr & EMAC4_ISR_RXOE)
1734 ++st->rx_fifo_overrun;
1735 if (isr & EMAC_ISR_OVR)
1736 ++st->rx_overrun;
1737 if (isr & EMAC_ISR_BP)
1738 ++st->rx_bad_packet;
1739 if (isr & EMAC_ISR_RP)
1740 ++st->rx_runt_packet;
1741 if (isr & EMAC_ISR_SE)
1742 ++st->rx_short_event;
1743 if (isr & EMAC_ISR_ALE)
1744 ++st->rx_alignment_error;
1745 if (isr & EMAC_ISR_BFCS)
1746 ++st->rx_bad_fcs;
1747 if (isr & EMAC_ISR_PTLE)
1748 ++st->rx_packet_too_long;
1749 if (isr & EMAC_ISR_ORE)
1750 ++st->rx_out_of_range;
1751 if (isr & EMAC_ISR_IRE)
1752 ++st->rx_in_range;
1753 if (isr & EMAC_ISR_SQE)
1754 ++st->tx_sqe;
1755 if (isr & EMAC_ISR_TE)
1756 ++st->tx_errors;
1757
1758 spin_unlock(&dev->lock);
1759
1760 return IRQ_HANDLED;
1761}
1762
1763static struct net_device_stats *emac_stats(struct net_device *ndev)
1764{
1765 struct emac_instance *dev = netdev_priv(ndev);
1766 struct emac_stats *st = &dev->stats;
1767 struct emac_error_stats *est = &dev->estats;
1768 struct net_device_stats *nst = &dev->nstats;
1769 unsigned long flags;
1770
1771 DBG2(dev, "stats" NL);
1772
1773 /* Compute "legacy" statistics */
1774 spin_lock_irqsave(&dev->lock, flags);
1775 nst->rx_packets = (unsigned long)st->rx_packets;
1776 nst->rx_bytes = (unsigned long)st->rx_bytes;
1777 nst->tx_packets = (unsigned long)st->tx_packets;
1778 nst->tx_bytes = (unsigned long)st->tx_bytes;
1779 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1780 est->rx_dropped_error +
1781 est->rx_dropped_resize +
1782 est->rx_dropped_mtu);
1783 nst->tx_dropped = (unsigned long)est->tx_dropped;
1784
1785 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1786 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1787 est->rx_fifo_overrun +
1788 est->rx_overrun);
1789 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1790 est->rx_alignment_error);
1791 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1792 est->rx_bad_fcs);
1793 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1794 est->rx_bd_short_event +
1795 est->rx_bd_packet_too_long +
1796 est->rx_bd_out_of_range +
1797 est->rx_bd_in_range +
1798 est->rx_runt_packet +
1799 est->rx_short_event +
1800 est->rx_packet_too_long +
1801 est->rx_out_of_range +
1802 est->rx_in_range);
1803
1804 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1805 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1806 est->tx_underrun);
1807 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1808 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1809 est->tx_bd_excessive_collisions +
1810 est->tx_bd_late_collision +
1811 est->tx_bd_multple_collisions);
1812 spin_unlock_irqrestore(&dev->lock, flags);
1813 return nst;
1814}
1815
1816static struct mal_commac_ops emac_commac_ops = {
1817 .poll_tx = &emac_poll_tx,
1818 .poll_rx = &emac_poll_rx,
1819 .peek_rx = &emac_peek_rx,
1820 .rxde = &emac_rxde,
1821};
1822
1823static struct mal_commac_ops emac_commac_sg_ops = {
1824 .poll_tx = &emac_poll_tx,
1825 .poll_rx = &emac_poll_rx,
1826 .peek_rx = &emac_peek_rx_sg,
1827 .rxde = &emac_rxde,
1828};
1829
1830/* Ethtool support */
1831static int emac_ethtool_get_settings(struct net_device *ndev,
1832 struct ethtool_cmd *cmd)
1833{
1834 struct emac_instance *dev = netdev_priv(ndev);
1835
1836 cmd->supported = dev->phy.features;
1837 cmd->port = PORT_MII;
1838 cmd->phy_address = dev->phy.address;
1839 cmd->transceiver =
1840 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1841
1842 mutex_lock(&dev->link_lock);
1843 cmd->advertising = dev->phy.advertising;
1844 cmd->autoneg = dev->phy.autoneg;
1845 cmd->speed = dev->phy.speed;
1846 cmd->duplex = dev->phy.duplex;
1847 mutex_unlock(&dev->link_lock);
1848
1849 return 0;
1850}
1851
1852static int emac_ethtool_set_settings(struct net_device *ndev,
1853 struct ethtool_cmd *cmd)
1854{
1855 struct emac_instance *dev = netdev_priv(ndev);
1856 u32 f = dev->phy.features;
1857
1858 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1859 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1860
1861 /* Basic sanity checks */
1862 if (dev->phy.address < 0)
1863 return -EOPNOTSUPP;
1864 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1865 return -EINVAL;
1866 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1867 return -EINVAL;
1868 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1869 return -EINVAL;
1870
1871 if (cmd->autoneg == AUTONEG_DISABLE) {
1872 switch (cmd->speed) {
1873 case SPEED_10:
1874 if (cmd->duplex == DUPLEX_HALF
1875 && !(f & SUPPORTED_10baseT_Half))
1876 return -EINVAL;
1877 if (cmd->duplex == DUPLEX_FULL
1878 && !(f & SUPPORTED_10baseT_Full))
1879 return -EINVAL;
1880 break;
1881 case SPEED_100:
1882 if (cmd->duplex == DUPLEX_HALF
1883 && !(f & SUPPORTED_100baseT_Half))
1884 return -EINVAL;
1885 if (cmd->duplex == DUPLEX_FULL
1886 && !(f & SUPPORTED_100baseT_Full))
1887 return -EINVAL;
1888 break;
1889 case SPEED_1000:
1890 if (cmd->duplex == DUPLEX_HALF
1891 && !(f & SUPPORTED_1000baseT_Half))
1892 return -EINVAL;
1893 if (cmd->duplex == DUPLEX_FULL
1894 && !(f & SUPPORTED_1000baseT_Full))
1895 return -EINVAL;
1896 break;
1897 default:
1898 return -EINVAL;
1899 }
1900
1901 mutex_lock(&dev->link_lock);
1902 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1903 cmd->duplex);
1904 mutex_unlock(&dev->link_lock);
1905
1906 } else {
1907 if (!(f & SUPPORTED_Autoneg))
1908 return -EINVAL;
1909
1910 mutex_lock(&dev->link_lock);
1911 dev->phy.def->ops->setup_aneg(&dev->phy,
1912 (cmd->advertising & f) |
1913 (dev->phy.advertising &
1914 (ADVERTISED_Pause |
1915 ADVERTISED_Asym_Pause)));
1916 mutex_unlock(&dev->link_lock);
1917 }
1918 emac_force_link_update(dev);
1919
1920 return 0;
1921}
1922
1923static void emac_ethtool_get_ringparam(struct net_device *ndev,
1924 struct ethtool_ringparam *rp)
1925{
1926 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1927 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1928}
1929
1930static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1931 struct ethtool_pauseparam *pp)
1932{
1933 struct emac_instance *dev = netdev_priv(ndev);
1934
1935 mutex_lock(&dev->link_lock);
1936 if ((dev->phy.features & SUPPORTED_Autoneg) &&
1937 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1938 pp->autoneg = 1;
1939
1940 if (dev->phy.duplex == DUPLEX_FULL) {
1941 if (dev->phy.pause)
1942 pp->rx_pause = pp->tx_pause = 1;
1943 else if (dev->phy.asym_pause)
1944 pp->tx_pause = 1;
1945 }
1946 mutex_unlock(&dev->link_lock);
1947}
1948
1949static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1950{
1951 struct emac_instance *dev = netdev_priv(ndev);
1952
1953 return dev->tah_dev != 0;
1954}
1955
1956static int emac_get_regs_len(struct emac_instance *dev)
1957{
1958 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1959 return sizeof(struct emac_ethtool_regs_subhdr) +
1960 EMAC4_ETHTOOL_REGS_SIZE;
1961 else
1962 return sizeof(struct emac_ethtool_regs_subhdr) +
1963 EMAC_ETHTOOL_REGS_SIZE;
1964}
1965
1966static int emac_ethtool_get_regs_len(struct net_device *ndev)
1967{
1968 struct emac_instance *dev = netdev_priv(ndev);
1969 int size;
1970
1971 size = sizeof(struct emac_ethtool_regs_hdr) +
1972 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
1973 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
1974 size += zmii_get_regs_len(dev->zmii_dev);
1975 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
1976 size += rgmii_get_regs_len(dev->rgmii_dev);
1977 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1978 size += tah_get_regs_len(dev->tah_dev);
1979
1980 return size;
1981}
1982
1983static void *emac_dump_regs(struct emac_instance *dev, void *buf)
1984{
1985 struct emac_ethtool_regs_subhdr *hdr = buf;
1986
1987 hdr->index = dev->cell_index;
1988 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
1989 hdr->version = EMAC4_ETHTOOL_REGS_VER;
1990 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE);
1991 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE);
1992 } else {
1993 hdr->version = EMAC_ETHTOOL_REGS_VER;
1994 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
1995 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
1996 }
1997}
1998
1999static void emac_ethtool_get_regs(struct net_device *ndev,
2000 struct ethtool_regs *regs, void *buf)
2001{
2002 struct emac_instance *dev = netdev_priv(ndev);
2003 struct emac_ethtool_regs_hdr *hdr = buf;
2004
2005 hdr->components = 0;
2006 buf = hdr + 1;
2007
2008 buf = mal_dump_regs(dev->mal, buf);
2009 buf = emac_dump_regs(dev, buf);
2010 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2011 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2012 buf = zmii_dump_regs(dev->zmii_dev, buf);
2013 }
2014 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2015 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2016 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2017 }
2018 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2019 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2020 buf = tah_dump_regs(dev->tah_dev, buf);
2021 }
2022}
2023
2024static int emac_ethtool_nway_reset(struct net_device *ndev)
2025{
2026 struct emac_instance *dev = netdev_priv(ndev);
2027 int res = 0;
2028
2029 DBG(dev, "nway_reset" NL);
2030
2031 if (dev->phy.address < 0)
2032 return -EOPNOTSUPP;
2033
2034 mutex_lock(&dev->link_lock);
2035 if (!dev->phy.autoneg) {
2036 res = -EINVAL;
2037 goto out;
2038 }
2039
2040 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2041 out:
2042 mutex_unlock(&dev->link_lock);
2043 emac_force_link_update(dev);
2044 return res;
2045}
2046
2047static int emac_ethtool_get_stats_count(struct net_device *ndev)
2048{
2049 return EMAC_ETHTOOL_STATS_COUNT;
2050}
2051
2052static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2053 u8 * buf)
2054{
2055 if (stringset == ETH_SS_STATS)
2056 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2057}
2058
2059static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2060 struct ethtool_stats *estats,
2061 u64 * tmp_stats)
2062{
2063 struct emac_instance *dev = netdev_priv(ndev);
2064
2065 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2066 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2067 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2068}
2069
2070static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2071 struct ethtool_drvinfo *info)
2072{
2073 struct emac_instance *dev = netdev_priv(ndev);
2074
2075 strcpy(info->driver, "ibm_emac");
2076 strcpy(info->version, DRV_VERSION);
2077 info->fw_version[0] = '\0';
2078 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2079 dev->cell_index, dev->ofdev->node->full_name);
2080 info->n_stats = emac_ethtool_get_stats_count(ndev);
2081 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2082}
2083
2084static const struct ethtool_ops emac_ethtool_ops = {
2085 .get_settings = emac_ethtool_get_settings,
2086 .set_settings = emac_ethtool_set_settings,
2087 .get_drvinfo = emac_ethtool_get_drvinfo,
2088
2089 .get_regs_len = emac_ethtool_get_regs_len,
2090 .get_regs = emac_ethtool_get_regs,
2091
2092 .nway_reset = emac_ethtool_nway_reset,
2093
2094 .get_ringparam = emac_ethtool_get_ringparam,
2095 .get_pauseparam = emac_ethtool_get_pauseparam,
2096
2097 .get_rx_csum = emac_ethtool_get_rx_csum,
2098
2099 .get_strings = emac_ethtool_get_strings,
2100 .get_stats_count = emac_ethtool_get_stats_count,
2101 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2102
2103 .get_link = ethtool_op_get_link,
2104 .get_tx_csum = ethtool_op_get_tx_csum,
2105 .get_sg = ethtool_op_get_sg,
2106};
2107
2108static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2109{
2110 struct emac_instance *dev = netdev_priv(ndev);
2111 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
2112
2113 DBG(dev, "ioctl %08x" NL, cmd);
2114
2115 if (dev->phy.address < 0)
2116 return -EOPNOTSUPP;
2117
2118 switch (cmd) {
2119 case SIOCGMIIPHY:
2120 case SIOCDEVPRIVATE:
2121 data[0] = dev->phy.address;
2122 /* Fall through */
2123 case SIOCGMIIREG:
2124 case SIOCDEVPRIVATE + 1:
2125 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
2126 return 0;
2127
2128 case SIOCSMIIREG:
2129 case SIOCDEVPRIVATE + 2:
2130 if (!capable(CAP_NET_ADMIN))
2131 return -EPERM;
2132 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
2133 return 0;
2134 default:
2135 return -EOPNOTSUPP;
2136 }
2137}
2138
2139struct emac_depentry {
2140 u32 phandle;
2141 struct device_node *node;
2142 struct of_device *ofdev;
2143 void *drvdata;
2144};
2145
2146#define EMAC_DEP_MAL_IDX 0
2147#define EMAC_DEP_ZMII_IDX 1
2148#define EMAC_DEP_RGMII_IDX 2
2149#define EMAC_DEP_TAH_IDX 3
2150#define EMAC_DEP_MDIO_IDX 4
2151#define EMAC_DEP_PREV_IDX 5
2152#define EMAC_DEP_COUNT 6
2153
2154static int __devinit emac_check_deps(struct emac_instance *dev,
2155 struct emac_depentry *deps)
2156{
2157 int i, there = 0;
2158 struct device_node *np;
2159
2160 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2161 /* no dependency on that item, allright */
2162 if (deps[i].phandle == 0) {
2163 there++;
2164 continue;
2165 }
2166 /* special case for blist as the dependency might go away */
2167 if (i == EMAC_DEP_PREV_IDX) {
2168 np = *(dev->blist - 1);
2169 if (np == NULL) {
2170 deps[i].phandle = 0;
2171 there++;
2172 continue;
2173 }
2174 if (deps[i].node == NULL)
2175 deps[i].node = of_node_get(np);
2176 }
2177 if (deps[i].node == NULL)
2178 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2179 if (deps[i].node == NULL)
2180 continue;
2181 if (deps[i].ofdev == NULL)
2182 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2183 if (deps[i].ofdev == NULL)
2184 continue;
2185 if (deps[i].drvdata == NULL)
2186 deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2187 if (deps[i].drvdata != NULL)
2188 there++;
2189 }
2190 return (there == EMAC_DEP_COUNT);
2191}
2192
2193static void emac_put_deps(struct emac_instance *dev)
2194{
2195 if (dev->mal_dev)
2196 of_dev_put(dev->mal_dev);
2197 if (dev->zmii_dev)
2198 of_dev_put(dev->zmii_dev);
2199 if (dev->rgmii_dev)
2200 of_dev_put(dev->rgmii_dev);
2201 if (dev->mdio_dev)
2202 of_dev_put(dev->mdio_dev);
2203 if (dev->tah_dev)
2204 of_dev_put(dev->tah_dev);
2205}
2206
2207static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2208 unsigned long action, void *data)
2209{
2210 /* We are only intereted in device addition */
2211 if (action == BUS_NOTIFY_BOUND_DRIVER)
2212 wake_up_all(&emac_probe_wait);
2213 return 0;
2214}
2215
2216static struct notifier_block emac_of_bus_notifier = {
2217 .notifier_call = emac_of_bus_notify
2218};
2219
2220static int __devinit emac_wait_deps(struct emac_instance *dev)
2221{
2222 struct emac_depentry deps[EMAC_DEP_COUNT];
2223 int i, err;
2224
2225 memset(&deps, 0, sizeof(deps));
2226
2227 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2228 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2229 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2230 if (dev->tah_ph)
2231 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2232 if (dev->mdio_ph)
2233 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2234 if (dev->blist && dev->blist > emac_boot_list)
2235 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2236 bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2237 wait_event_timeout(emac_probe_wait,
2238 emac_check_deps(dev, deps),
2239 EMAC_PROBE_DEP_TIMEOUT);
2240 bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2241 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2242 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2243 if (deps[i].node)
2244 of_node_put(deps[i].node);
2245 if (err && deps[i].ofdev)
2246 of_dev_put(deps[i].ofdev);
2247 }
2248 if (err == 0) {
2249 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2250 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2251 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2252 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2253 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2254 }
2255 if (deps[EMAC_DEP_PREV_IDX].ofdev)
2256 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2257 return err;
2258}
2259
2260static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2261 u32 *val, int fatal)
2262{
2263 int len;
2264 const u32 *prop = of_get_property(np, name, &len);
2265 if (prop == NULL || len < sizeof(u32)) {
2266 if (fatal)
2267 printk(KERN_ERR "%s: missing %s property\n",
2268 np->full_name, name);
2269 return -ENODEV;
2270 }
2271 *val = *prop;
2272 return 0;
2273}
2274
2275static int __devinit emac_init_phy(struct emac_instance *dev)
2276{
2277 struct device_node *np = dev->ofdev->node;
2278 struct net_device *ndev = dev->ndev;
2279 u32 phy_map, adv;
2280 int i;
2281
2282 dev->phy.dev = ndev;
2283 dev->phy.mode = dev->phy_mode;
2284
2285 /* PHY-less configuration.
2286 * XXX I probably should move these settings to the dev tree
2287 */
2288 if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2289 emac_reset(dev);
2290
2291 /* PHY-less configuration.
2292 * XXX I probably should move these settings to the dev tree
2293 */
2294 dev->phy.address = -1;
2295 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2296 dev->phy.pause = 1;
2297
2298 return 0;
2299 }
2300
2301 mutex_lock(&emac_phy_map_lock);
2302 phy_map = dev->phy_map | busy_phy_map;
2303
2304 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2305
2306 dev->phy.mdio_read = emac_mdio_read;
2307 dev->phy.mdio_write = emac_mdio_write;
2308
2309 /* Configure EMAC with defaults so we can at least use MDIO
2310 * This is needed mostly for 440GX
2311 */
2312 if (emac_phy_gpcs(dev->phy.mode)) {
2313 /* XXX
2314 * Make GPCS PHY address equal to EMAC index.
2315 * We probably should take into account busy_phy_map
2316 * and/or phy_map here.
2317 *
2318 * Note that the busy_phy_map is currently global
2319 * while it should probably be per-ASIC...
2320 */
2321 dev->phy.address = dev->cell_index;
2322 }
2323
2324 emac_configure(dev);
2325
2326 if (dev->phy_address != 0xffffffff)
2327 phy_map = ~(1 << dev->phy_address);
2328
2329 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2330 if (!(phy_map & 1)) {
2331 int r;
2332 busy_phy_map |= 1 << i;
2333
2334 /* Quick check if there is a PHY at the address */
2335 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2336 if (r == 0xffff || r < 0)
2337 continue;
2338 if (!emac_mii_phy_probe(&dev->phy, i))
2339 break;
2340 }
2341 mutex_unlock(&emac_phy_map_lock);
2342 if (i == 0x20) {
2343 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2344 return -ENXIO;
2345 }
2346
2347 /* Init PHY */
2348 if (dev->phy.def->ops->init)
2349 dev->phy.def->ops->init(&dev->phy);
2350
2351 /* Disable any PHY features not supported by the platform */
2352 dev->phy.def->features &= ~dev->phy_feat_exc;
2353
2354 /* Setup initial link parameters */
2355 if (dev->phy.features & SUPPORTED_Autoneg) {
2356 adv = dev->phy.features;
2357 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2358 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2359 /* Restart autonegotiation */
2360 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2361 } else {
2362 u32 f = dev->phy.def->features;
2363 int speed = SPEED_10, fd = DUPLEX_HALF;
2364
2365 /* Select highest supported speed/duplex */
2366 if (f & SUPPORTED_1000baseT_Full) {
2367 speed = SPEED_1000;
2368 fd = DUPLEX_FULL;
2369 } else if (f & SUPPORTED_1000baseT_Half)
2370 speed = SPEED_1000;
2371 else if (f & SUPPORTED_100baseT_Full) {
2372 speed = SPEED_100;
2373 fd = DUPLEX_FULL;
2374 } else if (f & SUPPORTED_100baseT_Half)
2375 speed = SPEED_100;
2376 else if (f & SUPPORTED_10baseT_Full)
2377 fd = DUPLEX_FULL;
2378
2379 /* Force link parameters */
2380 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2381 }
2382 return 0;
2383}
2384
2385static int __devinit emac_init_config(struct emac_instance *dev)
2386{
2387 struct device_node *np = dev->ofdev->node;
2388 const void *p;
2389 unsigned int plen;
2390 const char *pm, *phy_modes[] = {
2391 [PHY_MODE_NA] = "",
2392 [PHY_MODE_MII] = "mii",
2393 [PHY_MODE_RMII] = "rmii",
2394 [PHY_MODE_SMII] = "smii",
2395 [PHY_MODE_RGMII] = "rgmii",
2396 [PHY_MODE_TBI] = "tbi",
2397 [PHY_MODE_GMII] = "gmii",
2398 [PHY_MODE_RTBI] = "rtbi",
2399 [PHY_MODE_SGMII] = "sgmii",
2400 };
2401
2402 /* Read config from device-tree */
2403 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2404 return -ENXIO;
2405 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2406 return -ENXIO;
2407 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2408 return -ENXIO;
2409 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2410 return -ENXIO;
2411 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2412 dev->max_mtu = 1500;
2413 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2414 dev->rx_fifo_size = 2048;
2415 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2416 dev->tx_fifo_size = 2048;
2417 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2418 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2419 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2420 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2421 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2422 dev->phy_address = 0xffffffff;
2423 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2424 dev->phy_map = 0xffffffff;
2425 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2426 return -ENXIO;
2427 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2428 dev->tah_ph = 0;
2429 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2430 dev->tah_ph = 0;
2431 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2432 dev->mdio_ph = 0;
2433 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2434 dev->zmii_ph = 0;;
2435 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2436 dev->zmii_port = 0xffffffff;;
2437 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2438 dev->rgmii_ph = 0;;
2439 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2440 dev->rgmii_port = 0xffffffff;;
2441 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2442 dev->fifo_entry_size = 16;
2443 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2444 dev->mal_burst_size = 256;
2445
2446 /* PHY mode needs some decoding */
2447 dev->phy_mode = PHY_MODE_NA;
2448 pm = of_get_property(np, "phy-mode", &plen);
2449 if (pm != NULL) {
2450 int i;
2451 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2452 if (!strcasecmp(pm, phy_modes[i])) {
2453 dev->phy_mode = i;
2454 break;
2455 }
2456 }
2457
2458 /* Backward compat with non-final DT */
2459 if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2460 u32 nmode = *(const u32 *)pm;
2461 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2462 dev->phy_mode = nmode;
2463 }
2464
2465 /* Check EMAC version */
2466 if (of_device_is_compatible(np, "ibm,emac4"))
2467 dev->features |= EMAC_FTR_EMAC4;
2468 if (of_device_is_compatible(np, "ibm,emac-axon")
2469 || of_device_is_compatible(np, "ibm,emac-440epx"))
2470 dev->features |= EMAC_FTR_HAS_AXON_STACR
2471 | EMAC_FTR_STACR_OC_INVERT;
2472 if (of_device_is_compatible(np, "ibm,emac-440spe"))
2473 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2474
2475 /* Fixup some feature bits based on the device tree and verify
2476 * we have support for them compiled in
2477 */
2478 if (dev->tah_ph != 0) {
2479#ifdef CONFIG_IBM_NEW_EMAC_TAH
2480 dev->features |= EMAC_FTR_HAS_TAH;
2481#else
2482 printk(KERN_ERR "%s: TAH support not enabled !\n",
2483 np->full_name);
2484 return -ENXIO;
2485#endif
2486 }
2487
2488 if (dev->zmii_ph != 0) {
2489#ifdef CONFIG_IBM_NEW_EMAC_ZMII
2490 dev->features |= EMAC_FTR_HAS_ZMII;
2491#else
2492 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2493 np->full_name);
2494 return -ENXIO;
2495#endif
2496 }
2497
2498 if (dev->rgmii_ph != 0) {
2499#ifdef CONFIG_IBM_NEW_EMAC_RGMII
2500 dev->features |= EMAC_FTR_HAS_RGMII;
2501#else
2502 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2503 np->full_name);
2504 return -ENXIO;
2505#endif
2506 }
2507
2508 /* Read MAC-address */
2509 p = of_get_property(np, "local-mac-address", NULL);
2510 if (p == NULL) {
2511 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2512 np->full_name);
2513 return -ENXIO;
2514 }
2515 memcpy(dev->ndev->dev_addr, p, 6);
2516
2517 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2518 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2519 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2520 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2521 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2522
2523 return 0;
2524}
2525
2526static int __devinit emac_probe(struct of_device *ofdev,
2527 const struct of_device_id *match)
2528{
2529 struct net_device *ndev;
2530 struct emac_instance *dev;
2531 struct device_node *np = ofdev->node;
2532 struct device_node **blist = NULL;
2533 int err, i;
2534
2535 /* Find ourselves in the bootlist if we are there */
2536 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2537 if (emac_boot_list[i] == np)
2538 blist = &emac_boot_list[i];
2539
2540 /* Allocate our net_device structure */
2541 err = -ENOMEM;
2542 ndev = alloc_etherdev(sizeof(struct emac_instance));
2543 if (!ndev) {
2544 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2545 np->full_name);
2546 goto err_gone;
2547 }
2548 dev = netdev_priv(ndev);
2549 dev->ndev = ndev;
2550 dev->ofdev = ofdev;
2551 dev->blist = blist;
1d3bb996
DG
2552 SET_NETDEV_DEV(ndev, &ofdev->dev);
2553
2554 /* Initialize some embedded data structures */
2555 mutex_init(&dev->mdio_lock);
2556 mutex_init(&dev->link_lock);
2557 spin_lock_init(&dev->lock);
2558 INIT_WORK(&dev->reset_work, emac_reset_work);
2559
2560 /* Init various config data based on device-tree */
2561 err = emac_init_config(dev);
2562 if (err != 0)
2563 goto err_free;
2564
2565 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2566 dev->emac_irq = irq_of_parse_and_map(np, 0);
2567 dev->wol_irq = irq_of_parse_and_map(np, 1);
2568 if (dev->emac_irq == NO_IRQ) {
2569 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2570 goto err_free;
2571 }
2572 ndev->irq = dev->emac_irq;
2573
2574 /* Map EMAC regs */
2575 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2576 printk(KERN_ERR "%s: Can't get registers address\n",
2577 np->full_name);
2578 goto err_irq_unmap;
2579 }
2580 // TODO : request_mem_region
2581 dev->emacp = ioremap(dev->rsrc_regs.start, sizeof(struct emac_regs));
2582 if (dev->emacp == NULL) {
2583 printk(KERN_ERR "%s: Can't map device registers!\n",
2584 np->full_name);
2585 err = -ENOMEM;
2586 goto err_irq_unmap;
2587 }
2588
2589 /* Wait for dependent devices */
2590 err = emac_wait_deps(dev);
2591 if (err) {
2592 printk(KERN_ERR
2593 "%s: Timeout waiting for dependent devices\n",
2594 np->full_name);
2595 /* display more info about what's missing ? */
2596 goto err_reg_unmap;
2597 }
2598 dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2599 if (dev->mdio_dev != NULL)
2600 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2601
2602 /* Register with MAL */
2603 dev->commac.ops = &emac_commac_ops;
2604 dev->commac.dev = dev;
2605 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2606 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2607 err = mal_register_commac(dev->mal, &dev->commac);
2608 if (err) {
2609 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2610 np->full_name, dev->mal_dev->node->full_name);
2611 goto err_rel_deps;
2612 }
2613 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2614 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2615
2616 /* Get pointers to BD rings */
2617 dev->tx_desc =
2618 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2619 dev->rx_desc =
2620 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2621
2622 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2623 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2624
2625 /* Clean rings */
2626 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2627 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2628
2629 /* Attach to ZMII, if needed */
2630 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2631 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2632 goto err_unreg_commac;
2633
2634 /* Attach to RGMII, if needed */
2635 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2636 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2637 goto err_detach_zmii;
2638
2639 /* Attach to TAH, if needed */
2640 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2641 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2642 goto err_detach_rgmii;
2643
2644 /* Set some link defaults before we can find out real parameters */
2645 dev->phy.speed = SPEED_100;
2646 dev->phy.duplex = DUPLEX_FULL;
2647 dev->phy.autoneg = AUTONEG_DISABLE;
2648 dev->phy.pause = dev->phy.asym_pause = 0;
2649 dev->stop_timeout = STOP_TIMEOUT_100;
2650 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2651
2652 /* Find PHY if any */
2653 err = emac_init_phy(dev);
2654 if (err != 0)
2655 goto err_detach_tah;
2656
2657 /* Fill in the driver function table */
2658 ndev->open = &emac_open;
2659#ifdef CONFIG_IBM_NEW_EMAC_TAH
2660 if (dev->tah_dev) {
2661 ndev->hard_start_xmit = &emac_start_xmit_sg;
2662 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2663 } else
2664#endif
2665 ndev->hard_start_xmit = &emac_start_xmit;
2666 ndev->tx_timeout = &emac_tx_timeout;
2667 ndev->watchdog_timeo = 5 * HZ;
2668 ndev->stop = &emac_close;
2669 ndev->get_stats = &emac_stats;
2670 ndev->set_multicast_list = &emac_set_multicast_list;
2671 ndev->do_ioctl = &emac_ioctl;
2672 if (emac_phy_supports_gige(dev->phy_mode)) {
2673 ndev->change_mtu = &emac_change_mtu;
2674 dev->commac.ops = &emac_commac_sg_ops;
2675 }
2676 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2677
2678 netif_carrier_off(ndev);
2679 netif_stop_queue(ndev);
2680
2681 err = register_netdev(ndev);
2682 if (err) {
2683 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2684 np->full_name, err);
2685 goto err_detach_tah;
2686 }
2687
2688 /* Set our drvdata last as we don't want them visible until we are
2689 * fully initialized
2690 */
2691 wmb();
2692 dev_set_drvdata(&ofdev->dev, dev);
2693
2694 /* There's a new kid in town ! Let's tell everybody */
2695 wake_up_all(&emac_probe_wait);
2696
2697
2698 printk(KERN_INFO
2699 "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2700 ndev->name, dev->cell_index, np->full_name,
2701 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2702 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2703
2704 if (dev->phy.address >= 0)
2705 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2706 dev->phy.def->name, dev->phy.address);
2707
2708 emac_dbg_register(dev);
2709
2710 /* Life is good */
2711 return 0;
2712
2713 /* I have a bad feeling about this ... */
2714
2715 err_detach_tah:
2716 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2717 tah_detach(dev->tah_dev, dev->tah_port);
2718 err_detach_rgmii:
2719 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2720 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2721 err_detach_zmii:
2722 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2723 zmii_detach(dev->zmii_dev, dev->zmii_port);
2724 err_unreg_commac:
2725 mal_unregister_commac(dev->mal, &dev->commac);
2726 err_rel_deps:
2727 emac_put_deps(dev);
2728 err_reg_unmap:
2729 iounmap(dev->emacp);
2730 err_irq_unmap:
2731 if (dev->wol_irq != NO_IRQ)
2732 irq_dispose_mapping(dev->wol_irq);
2733 if (dev->emac_irq != NO_IRQ)
2734 irq_dispose_mapping(dev->emac_irq);
2735 err_free:
2736 kfree(ndev);
2737 err_gone:
2738 /* if we were on the bootlist, remove us as we won't show up and
2739 * wake up all waiters to notify them in case they were waiting
2740 * on us
2741 */
2742 if (blist) {
2743 *blist = NULL;
2744 wake_up_all(&emac_probe_wait);
2745 }
2746 return err;
2747}
2748
2749static int __devexit emac_remove(struct of_device *ofdev)
2750{
2751 struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2752
2753 DBG(dev, "remove" NL);
2754
2755 dev_set_drvdata(&ofdev->dev, NULL);
2756
2757 unregister_netdev(dev->ndev);
2758
2759 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2760 tah_detach(dev->tah_dev, dev->tah_port);
2761 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2762 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2763 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2764 zmii_detach(dev->zmii_dev, dev->zmii_port);
2765
2766 mal_unregister_commac(dev->mal, &dev->commac);
2767 emac_put_deps(dev);
2768
2769 emac_dbg_unregister(dev);
2770 iounmap(dev->emacp);
2771
2772 if (dev->wol_irq != NO_IRQ)
2773 irq_dispose_mapping(dev->wol_irq);
2774 if (dev->emac_irq != NO_IRQ)
2775 irq_dispose_mapping(dev->emac_irq);
2776
2777 kfree(dev->ndev);
2778
2779 return 0;
2780}
2781
2782/* XXX Features in here should be replaced by properties... */
2783static struct of_device_id emac_match[] =
2784{
2785 {
2786 .type = "network",
2787 .compatible = "ibm,emac",
2788 },
2789 {
2790 .type = "network",
2791 .compatible = "ibm,emac4",
2792 },
2793 {},
2794};
2795
2796static struct of_platform_driver emac_driver = {
2797 .name = "emac",
2798 .match_table = emac_match,
2799
2800 .probe = emac_probe,
2801 .remove = emac_remove,
2802};
2803
2804static void __init emac_make_bootlist(void)
2805{
2806 struct device_node *np = NULL;
2807 int j, max, i = 0, k;
2808 int cell_indices[EMAC_BOOT_LIST_SIZE];
2809
2810 /* Collect EMACs */
2811 while((np = of_find_all_nodes(np)) != NULL) {
2812 const u32 *idx;
2813
2814 if (of_match_node(emac_match, np) == NULL)
2815 continue;
2816 if (of_get_property(np, "unused", NULL))
2817 continue;
2818 idx = of_get_property(np, "cell-index", NULL);
2819 if (idx == NULL)
2820 continue;
2821 cell_indices[i] = *idx;
2822 emac_boot_list[i++] = of_node_get(np);
2823 if (i >= EMAC_BOOT_LIST_SIZE) {
2824 of_node_put(np);
2825 break;
2826 }
2827 }
2828 max = i;
2829
2830 /* Bubble sort them (doh, what a creative algorithm :-) */
2831 for (i = 0; max > 1 && (i < (max - 1)); i++)
2832 for (j = i; j < max; j++) {
2833 if (cell_indices[i] > cell_indices[j]) {
2834 np = emac_boot_list[i];
2835 emac_boot_list[i] = emac_boot_list[j];
2836 emac_boot_list[j] = np;
2837 k = cell_indices[i];
2838 cell_indices[i] = cell_indices[j];
2839 cell_indices[j] = k;
2840 }
2841 }
2842}
2843
2844static int __init emac_init(void)
2845{
2846 int rc;
2847
2848 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2849
2850 /* Init debug stuff */
2851 emac_init_debug();
2852
2853 /* Build EMAC boot list */
2854 emac_make_bootlist();
2855
2856 /* Init submodules */
2857 rc = mal_init();
2858 if (rc)
2859 goto err;
2860 rc = zmii_init();
2861 if (rc)
2862 goto err_mal;
2863 rc = rgmii_init();
2864 if (rc)
2865 goto err_zmii;
2866 rc = tah_init();
2867 if (rc)
2868 goto err_rgmii;
2869 rc = of_register_platform_driver(&emac_driver);
2870 if (rc)
2871 goto err_tah;
2872
2873 return 0;
2874
2875 err_tah:
2876 tah_exit();
2877 err_rgmii:
2878 rgmii_exit();
2879 err_zmii:
2880 zmii_exit();
2881 err_mal:
2882 mal_exit();
2883 err:
2884 return rc;
2885}
2886
2887static void __exit emac_exit(void)
2888{
2889 int i;
2890
2891 of_unregister_platform_driver(&emac_driver);
2892
2893 tah_exit();
2894 rgmii_exit();
2895 zmii_exit();
2896 mal_exit();
2897 emac_fini_debug();
2898
2899 /* Destroy EMAC boot list */
2900 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2901 if (emac_boot_list[i])
2902 of_node_put(emac_boot_list[i]);
2903}
2904
2905module_init(emac_init);
2906module_exit(emac_exit);