]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/gianfar.c
net: Remove unused netdev arg from some NAPI interfaces.
[net-next-2.6.git] / drivers / net / gianfar.c
CommitLineData
0bbaf069 1/*
1da177e4
LT
2 * drivers/net/gianfar.c
3 *
4 * Gianfar Ethernet Driver
7f7f5316
AF
5 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
1da177e4
LT
7 * Based on 8260_io/fcc_enet.c
8 *
9 * Author: Andy Fleming
4c8d3d99 10 * Maintainer: Kumar Gala
1da177e4 11 *
e8a2b6a4 12 * Copyright (c) 2002-2006 Freescale Semiconductor, Inc.
538cc7ee 13 * Copyright (c) 2007 MontaVista Software, Inc.
1da177e4
LT
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 *
20 * Gianfar: AKA Lambda Draconis, "Dragon"
21 * RA 11 31 24.2
22 * Dec +69 19 52
23 * V 3.84
24 * B-V +1.62
25 *
26 * Theory of operation
0bbaf069 27 *
b31a1d8b
AF
28 * The driver is initialized through of_device. Configuration information
29 * is therefore conveyed through an OF-style device tree.
1da177e4
LT
30 *
31 * The Gianfar Ethernet Controller uses a ring of buffer
32 * descriptors. The beginning is indicated by a register
0bbaf069
KG
33 * pointing to the physical address of the start of the ring.
34 * The end is determined by a "wrap" bit being set in the
1da177e4
LT
35 * last descriptor of the ring.
36 *
37 * When a packet is received, the RXF bit in the
0bbaf069 38 * IEVENT register is set, triggering an interrupt when the
1da177e4
LT
39 * corresponding bit in the IMASK register is also set (if
40 * interrupt coalescing is active, then the interrupt may not
41 * happen immediately, but will wait until either a set number
bb40dcbb 42 * of frames or amount of time have passed). In NAPI, the
1da177e4 43 * interrupt handler will signal there is work to be done, and
0aa1538f 44 * exit. This method will start at the last known empty
0bbaf069 45 * descriptor, and process every subsequent descriptor until there
1da177e4
LT
46 * are none left with data (NAPI will stop after a set number of
47 * packets to give time to other tasks, but will eventually
48 * process all the packets). The data arrives inside a
49 * pre-allocated skb, and so after the skb is passed up to the
50 * stack, a new skb must be allocated, and the address field in
51 * the buffer descriptor must be updated to indicate this new
52 * skb.
53 *
54 * When the kernel requests that a packet be transmitted, the
55 * driver starts where it left off last time, and points the
56 * descriptor at the buffer which was passed in. The driver
57 * then informs the DMA engine that there are packets ready to
58 * be transmitted. Once the controller is finished transmitting
59 * the packet, an interrupt may be triggered (under the same
60 * conditions as for reception, but depending on the TXF bit).
61 * The driver then cleans up the buffer.
62 */
63
1da177e4 64#include <linux/kernel.h>
1da177e4
LT
65#include <linux/string.h>
66#include <linux/errno.h>
bb40dcbb 67#include <linux/unistd.h>
1da177e4
LT
68#include <linux/slab.h>
69#include <linux/interrupt.h>
70#include <linux/init.h>
71#include <linux/delay.h>
72#include <linux/netdevice.h>
73#include <linux/etherdevice.h>
74#include <linux/skbuff.h>
0bbaf069 75#include <linux/if_vlan.h>
1da177e4
LT
76#include <linux/spinlock.h>
77#include <linux/mm.h>
b31a1d8b 78#include <linux/of_platform.h>
0bbaf069
KG
79#include <linux/ip.h>
80#include <linux/tcp.h>
81#include <linux/udp.h>
9c07b884 82#include <linux/in.h>
1da177e4
LT
83
84#include <asm/io.h>
85#include <asm/irq.h>
86#include <asm/uaccess.h>
87#include <linux/module.h>
1da177e4
LT
88#include <linux/dma-mapping.h>
89#include <linux/crc32.h>
bb40dcbb
AF
90#include <linux/mii.h>
91#include <linux/phy.h>
b31a1d8b
AF
92#include <linux/phy_fixed.h>
93#include <linux/of.h>
1da177e4
LT
94
95#include "gianfar.h"
bb40dcbb 96#include "gianfar_mii.h"
1da177e4
LT
97
98#define TX_TIMEOUT (1*HZ)
1da177e4
LT
99#undef BRIEF_GFAR_ERRORS
100#undef VERBOSE_GFAR_ERRORS
101
1da177e4 102const char gfar_driver_name[] = "Gianfar Ethernet";
7f7f5316 103const char gfar_driver_version[] = "1.3";
1da177e4 104
1da177e4
LT
105static int gfar_enet_open(struct net_device *dev);
106static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
ab939905 107static void gfar_reset_task(struct work_struct *work);
1da177e4
LT
108static void gfar_timeout(struct net_device *dev);
109static int gfar_close(struct net_device *dev);
815b97c6
AF
110struct sk_buff *gfar_new_skb(struct net_device *dev);
111static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
112 struct sk_buff *skb);
1da177e4
LT
113static int gfar_set_mac_address(struct net_device *dev);
114static int gfar_change_mtu(struct net_device *dev, int new_mtu);
7d12e780
DH
115static irqreturn_t gfar_error(int irq, void *dev_id);
116static irqreturn_t gfar_transmit(int irq, void *dev_id);
117static irqreturn_t gfar_interrupt(int irq, void *dev_id);
1da177e4
LT
118static void adjust_link(struct net_device *dev);
119static void init_registers(struct net_device *dev);
120static int init_phy(struct net_device *dev);
b31a1d8b
AF
121static int gfar_probe(struct of_device *ofdev,
122 const struct of_device_id *match);
123static int gfar_remove(struct of_device *ofdev);
bb40dcbb 124static void free_skb_resources(struct gfar_private *priv);
1da177e4
LT
125static void gfar_set_multi(struct net_device *dev);
126static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
d3c12873 127static void gfar_configure_serdes(struct net_device *dev);
bea3348e 128static int gfar_poll(struct napi_struct *napi, int budget);
f2d71c2d
VW
129#ifdef CONFIG_NET_POLL_CONTROLLER
130static void gfar_netpoll(struct net_device *dev);
131#endif
0bbaf069 132int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
f162b9d5 133static int gfar_clean_tx_ring(struct net_device *dev);
2c2db48a
DH
134static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
135 int amount_pull);
0bbaf069
KG
136static void gfar_vlan_rx_register(struct net_device *netdev,
137 struct vlan_group *grp);
7f7f5316 138void gfar_halt(struct net_device *dev);
d87eb127 139static void gfar_halt_nodisable(struct net_device *dev);
7f7f5316
AF
140void gfar_start(struct net_device *dev);
141static void gfar_clear_exact_match(struct net_device *dev);
142static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
1da177e4 143
7282d491 144extern const struct ethtool_ops gfar_ethtool_ops;
1da177e4
LT
145
146MODULE_AUTHOR("Freescale Semiconductor, Inc");
147MODULE_DESCRIPTION("Gianfar Ethernet Driver");
148MODULE_LICENSE("GPL");
149
7f7f5316
AF
150/* Returns 1 if incoming frames use an FCB */
151static inline int gfar_uses_fcb(struct gfar_private *priv)
0bbaf069 152{
77ecaf2d 153 return priv->vlgrp || priv->rx_csum_enable;
0bbaf069 154}
bb40dcbb 155
b31a1d8b
AF
156static int gfar_of_init(struct net_device *dev)
157{
158 struct device_node *phy, *mdio;
159 const unsigned int *id;
160 const char *model;
161 const char *ctype;
162 const void *mac_addr;
163 const phandle *ph;
164 u64 addr, size;
165 int err = 0;
166 struct gfar_private *priv = netdev_priv(dev);
167 struct device_node *np = priv->node;
168 char bus_name[MII_BUS_ID_SIZE];
169
170 if (!np || !of_device_is_available(np))
171 return -ENODEV;
172
173 /* get a pointer to the register memory */
174 addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
175 priv->regs = ioremap(addr, size);
176
177 if (priv->regs == NULL)
178 return -ENOMEM;
179
180 priv->interruptTransmit = irq_of_parse_and_map(np, 0);
181
182 model = of_get_property(np, "model", NULL);
183
184 /* If we aren't the FEC we have multiple interrupts */
185 if (model && strcasecmp(model, "FEC")) {
186 priv->interruptReceive = irq_of_parse_and_map(np, 1);
187
188 priv->interruptError = irq_of_parse_and_map(np, 2);
189
190 if (priv->interruptTransmit < 0 ||
191 priv->interruptReceive < 0 ||
192 priv->interruptError < 0) {
193 err = -EINVAL;
194 goto err_out;
195 }
196 }
197
198 mac_addr = of_get_mac_address(np);
199 if (mac_addr)
200 memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
201
202 if (model && !strcasecmp(model, "TSEC"))
203 priv->device_flags =
204 FSL_GIANFAR_DEV_HAS_GIGABIT |
205 FSL_GIANFAR_DEV_HAS_COALESCE |
206 FSL_GIANFAR_DEV_HAS_RMON |
207 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
208 if (model && !strcasecmp(model, "eTSEC"))
209 priv->device_flags =
210 FSL_GIANFAR_DEV_HAS_GIGABIT |
211 FSL_GIANFAR_DEV_HAS_COALESCE |
212 FSL_GIANFAR_DEV_HAS_RMON |
213 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
2c2db48a 214 FSL_GIANFAR_DEV_HAS_PADDING |
b31a1d8b
AF
215 FSL_GIANFAR_DEV_HAS_CSUM |
216 FSL_GIANFAR_DEV_HAS_VLAN |
217 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
218 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
219
220 ctype = of_get_property(np, "phy-connection-type", NULL);
221
222 /* We only care about rgmii-id. The rest are autodetected */
223 if (ctype && !strcmp(ctype, "rgmii-id"))
224 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
225 else
226 priv->interface = PHY_INTERFACE_MODE_MII;
227
228 if (of_get_property(np, "fsl,magic-packet", NULL))
229 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
230
231 ph = of_get_property(np, "phy-handle", NULL);
232 if (ph == NULL) {
233 u32 *fixed_link;
234
235 fixed_link = (u32 *)of_get_property(np, "fixed-link", NULL);
236 if (!fixed_link) {
237 err = -ENODEV;
238 goto err_out;
239 }
240
241 snprintf(priv->phy_bus_id, BUS_ID_SIZE, PHY_ID_FMT, "0",
242 fixed_link[0]);
243 } else {
244 phy = of_find_node_by_phandle(*ph);
245
246 if (phy == NULL) {
247 err = -ENODEV;
248 goto err_out;
249 }
250
251 mdio = of_get_parent(phy);
252
253 id = of_get_property(phy, "reg", NULL);
254
255 of_node_put(phy);
256 of_node_put(mdio);
257
258 gfar_mdio_bus_name(bus_name, mdio);
259 snprintf(priv->phy_bus_id, BUS_ID_SIZE, "%s:%02x",
260 bus_name, *id);
261 }
262
263 /* Find the TBI PHY. If it's not there, we don't support SGMII */
264 ph = of_get_property(np, "tbi-handle", NULL);
265 if (ph) {
266 struct device_node *tbi = of_find_node_by_phandle(*ph);
267 struct of_device *ofdev;
268 struct mii_bus *bus;
269
270 if (!tbi)
271 return 0;
272
273 mdio = of_get_parent(tbi);
274 if (!mdio)
275 return 0;
276
277 ofdev = of_find_device_by_node(mdio);
278
279 of_node_put(mdio);
280
281 id = of_get_property(tbi, "reg", NULL);
282 if (!id)
283 return 0;
284
285 of_node_put(tbi);
286
287 bus = dev_get_drvdata(&ofdev->dev);
288
289 priv->tbiphy = bus->phy_map[*id];
290 }
291
292 return 0;
293
294err_out:
295 iounmap(priv->regs);
296 return err;
297}
298
bb40dcbb
AF
299/* Set up the ethernet device structure, private data,
300 * and anything else we need before we start */
b31a1d8b
AF
301static int gfar_probe(struct of_device *ofdev,
302 const struct of_device_id *match)
1da177e4
LT
303{
304 u32 tempval;
305 struct net_device *dev = NULL;
306 struct gfar_private *priv = NULL;
b31a1d8b 307 DECLARE_MAC_BUF(mac);
c50a5d9a
DH
308 int err = 0;
309 int len_devname;
1da177e4
LT
310
311 /* Create an ethernet device instance */
312 dev = alloc_etherdev(sizeof (*priv));
313
bb40dcbb 314 if (NULL == dev)
1da177e4
LT
315 return -ENOMEM;
316
317 priv = netdev_priv(dev);
bea3348e 318 priv->dev = dev;
b31a1d8b 319 priv->node = ofdev->node;
1da177e4 320
b31a1d8b 321 err = gfar_of_init(dev);
1da177e4 322
b31a1d8b 323 if (err)
1da177e4 324 goto regs_fail;
1da177e4 325
fef6108d
AF
326 spin_lock_init(&priv->txlock);
327 spin_lock_init(&priv->rxlock);
d87eb127 328 spin_lock_init(&priv->bflock);
ab939905 329 INIT_WORK(&priv->reset_task, gfar_reset_task);
1da177e4 330
b31a1d8b 331 dev_set_drvdata(&ofdev->dev, priv);
1da177e4
LT
332
333 /* Stop the DMA engine now, in case it was running before */
334 /* (The firmware could have used it, and left it running). */
257d938a 335 gfar_halt(dev);
1da177e4
LT
336
337 /* Reset MAC layer */
338 gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET);
339
340 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
341 gfar_write(&priv->regs->maccfg1, tempval);
342
343 /* Initialize MACCFG2. */
344 gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS);
345
346 /* Initialize ECNTRL */
347 gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS);
348
1da177e4
LT
349 /* Set the dev->base_addr to the gfar reg region */
350 dev->base_addr = (unsigned long) (priv->regs);
351
b31a1d8b 352 SET_NETDEV_DEV(dev, &ofdev->dev);
1da177e4
LT
353
354 /* Fill in the dev structure */
355 dev->open = gfar_enet_open;
356 dev->hard_start_xmit = gfar_start_xmit;
357 dev->tx_timeout = gfar_timeout;
358 dev->watchdog_timeo = TX_TIMEOUT;
bea3348e 359 netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT);
f2d71c2d
VW
360#ifdef CONFIG_NET_POLL_CONTROLLER
361 dev->poll_controller = gfar_netpoll;
1da177e4
LT
362#endif
363 dev->stop = gfar_close;
1da177e4
LT
364 dev->change_mtu = gfar_change_mtu;
365 dev->mtu = 1500;
366 dev->set_multicast_list = gfar_set_multi;
367
0bbaf069
KG
368 dev->ethtool_ops = &gfar_ethtool_ops;
369
b31a1d8b 370 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
0bbaf069 371 priv->rx_csum_enable = 1;
4669bc90 372 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
0bbaf069
KG
373 } else
374 priv->rx_csum_enable = 0;
375
376 priv->vlgrp = NULL;
1da177e4 377
b31a1d8b 378 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
0bbaf069 379 dev->vlan_rx_register = gfar_vlan_rx_register;
1da177e4 380
0bbaf069 381 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
0bbaf069
KG
382 }
383
b31a1d8b 384 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
0bbaf069
KG
385 priv->extended_hash = 1;
386 priv->hash_width = 9;
387
388 priv->hash_regs[0] = &priv->regs->igaddr0;
389 priv->hash_regs[1] = &priv->regs->igaddr1;
390 priv->hash_regs[2] = &priv->regs->igaddr2;
391 priv->hash_regs[3] = &priv->regs->igaddr3;
392 priv->hash_regs[4] = &priv->regs->igaddr4;
393 priv->hash_regs[5] = &priv->regs->igaddr5;
394 priv->hash_regs[6] = &priv->regs->igaddr6;
395 priv->hash_regs[7] = &priv->regs->igaddr7;
396 priv->hash_regs[8] = &priv->regs->gaddr0;
397 priv->hash_regs[9] = &priv->regs->gaddr1;
398 priv->hash_regs[10] = &priv->regs->gaddr2;
399 priv->hash_regs[11] = &priv->regs->gaddr3;
400 priv->hash_regs[12] = &priv->regs->gaddr4;
401 priv->hash_regs[13] = &priv->regs->gaddr5;
402 priv->hash_regs[14] = &priv->regs->gaddr6;
403 priv->hash_regs[15] = &priv->regs->gaddr7;
404
405 } else {
406 priv->extended_hash = 0;
407 priv->hash_width = 8;
408
409 priv->hash_regs[0] = &priv->regs->gaddr0;
410 priv->hash_regs[1] = &priv->regs->gaddr1;
411 priv->hash_regs[2] = &priv->regs->gaddr2;
412 priv->hash_regs[3] = &priv->regs->gaddr3;
413 priv->hash_regs[4] = &priv->regs->gaddr4;
414 priv->hash_regs[5] = &priv->regs->gaddr5;
415 priv->hash_regs[6] = &priv->regs->gaddr6;
416 priv->hash_regs[7] = &priv->regs->gaddr7;
417 }
418
b31a1d8b 419 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
0bbaf069
KG
420 priv->padding = DEFAULT_PADDING;
421 else
422 priv->padding = 0;
423
0bbaf069
KG
424 if (dev->features & NETIF_F_IP_CSUM)
425 dev->hard_header_len += GMAC_FCB_LEN;
1da177e4
LT
426
427 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
1da177e4
LT
428 priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
429 priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
4669bc90 430 priv->num_txbdfree = DEFAULT_TX_RING_SIZE;
1da177e4
LT
431
432 priv->txcoalescing = DEFAULT_TX_COALESCE;
b46a8454 433 priv->txic = DEFAULT_TXIC;
1da177e4 434 priv->rxcoalescing = DEFAULT_RX_COALESCE;
b46a8454 435 priv->rxic = DEFAULT_RXIC;
1da177e4 436
0bbaf069
KG
437 /* Enable most messages by default */
438 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
439
d3eab82b
TP
440 /* Carrier starts down, phylib will bring it up */
441 netif_carrier_off(dev);
442
1da177e4
LT
443 err = register_netdev(dev);
444
445 if (err) {
446 printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
447 dev->name);
448 goto register_fail;
449 }
450
c50a5d9a
DH
451 /* fill out IRQ number and name fields */
452 len_devname = strlen(dev->name);
453 strncpy(&priv->int_name_tx[0], dev->name, len_devname);
454 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
455 strncpy(&priv->int_name_tx[len_devname],
456 "_tx", sizeof("_tx") + 1);
457
458 strncpy(&priv->int_name_rx[0], dev->name, len_devname);
459 strncpy(&priv->int_name_rx[len_devname],
460 "_rx", sizeof("_rx") + 1);
461
462 strncpy(&priv->int_name_er[0], dev->name, len_devname);
463 strncpy(&priv->int_name_er[len_devname],
464 "_er", sizeof("_er") + 1);
465 } else
466 priv->int_name_tx[len_devname] = '\0';
467
7f7f5316
AF
468 /* Create all the sysfs files */
469 gfar_init_sysfs(dev);
470
1da177e4 471 /* Print out the device info */
e174961c 472 printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr);
1da177e4
LT
473
474 /* Even more device info helps when determining which kernel */
7f7f5316 475 /* provided which set of benchmarks. */
1da177e4 476 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
1da177e4
LT
477 printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
478 dev->name, priv->rx_ring_size, priv->tx_ring_size);
479
480 return 0;
481
482register_fail:
cc8c6e37 483 iounmap(priv->regs);
1da177e4
LT
484regs_fail:
485 free_netdev(dev);
bb40dcbb 486 return err;
1da177e4
LT
487}
488
b31a1d8b 489static int gfar_remove(struct of_device *ofdev)
1da177e4 490{
b31a1d8b 491 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
1da177e4 492
b31a1d8b 493 dev_set_drvdata(&ofdev->dev, NULL);
1da177e4 494
cc8c6e37 495 iounmap(priv->regs);
b31a1d8b 496 free_netdev(priv->dev);
1da177e4
LT
497
498 return 0;
499}
500
d87eb127 501#ifdef CONFIG_PM
b31a1d8b 502static int gfar_suspend(struct of_device *ofdev, pm_message_t state)
d87eb127 503{
b31a1d8b
AF
504 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
505 struct net_device *dev = priv->dev;
d87eb127
SW
506 unsigned long flags;
507 u32 tempval;
508
509 int magic_packet = priv->wol_en &&
b31a1d8b 510 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
d87eb127
SW
511
512 netif_device_detach(dev);
513
514 if (netif_running(dev)) {
515 spin_lock_irqsave(&priv->txlock, flags);
516 spin_lock(&priv->rxlock);
517
518 gfar_halt_nodisable(dev);
519
520 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
521 tempval = gfar_read(&priv->regs->maccfg1);
522
523 tempval &= ~MACCFG1_TX_EN;
524
525 if (!magic_packet)
526 tempval &= ~MACCFG1_RX_EN;
527
528 gfar_write(&priv->regs->maccfg1, tempval);
529
530 spin_unlock(&priv->rxlock);
531 spin_unlock_irqrestore(&priv->txlock, flags);
532
d87eb127 533 napi_disable(&priv->napi);
d87eb127
SW
534
535 if (magic_packet) {
536 /* Enable interrupt on Magic Packet */
537 gfar_write(&priv->regs->imask, IMASK_MAG);
538
539 /* Enable Magic Packet mode */
540 tempval = gfar_read(&priv->regs->maccfg2);
541 tempval |= MACCFG2_MPEN;
542 gfar_write(&priv->regs->maccfg2, tempval);
543 } else {
544 phy_stop(priv->phydev);
545 }
546 }
547
548 return 0;
549}
550
b31a1d8b 551static int gfar_resume(struct of_device *ofdev)
d87eb127 552{
b31a1d8b
AF
553 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
554 struct net_device *dev = priv->dev;
d87eb127
SW
555 unsigned long flags;
556 u32 tempval;
557 int magic_packet = priv->wol_en &&
b31a1d8b 558 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
d87eb127
SW
559
560 if (!netif_running(dev)) {
561 netif_device_attach(dev);
562 return 0;
563 }
564
565 if (!magic_packet && priv->phydev)
566 phy_start(priv->phydev);
567
568 /* Disable Magic Packet mode, in case something
569 * else woke us up.
570 */
571
572 spin_lock_irqsave(&priv->txlock, flags);
573 spin_lock(&priv->rxlock);
574
575 tempval = gfar_read(&priv->regs->maccfg2);
576 tempval &= ~MACCFG2_MPEN;
577 gfar_write(&priv->regs->maccfg2, tempval);
578
579 gfar_start(dev);
580
581 spin_unlock(&priv->rxlock);
582 spin_unlock_irqrestore(&priv->txlock, flags);
583
584 netif_device_attach(dev);
585
d87eb127 586 napi_enable(&priv->napi);
d87eb127
SW
587
588 return 0;
589}
590#else
591#define gfar_suspend NULL
592#define gfar_resume NULL
593#endif
1da177e4 594
e8a2b6a4
AF
595/* Reads the controller's registers to determine what interface
596 * connects it to the PHY.
597 */
598static phy_interface_t gfar_get_interface(struct net_device *dev)
599{
600 struct gfar_private *priv = netdev_priv(dev);
601 u32 ecntrl = gfar_read(&priv->regs->ecntrl);
602
603 if (ecntrl & ECNTRL_SGMII_MODE)
604 return PHY_INTERFACE_MODE_SGMII;
605
606 if (ecntrl & ECNTRL_TBI_MODE) {
607 if (ecntrl & ECNTRL_REDUCED_MODE)
608 return PHY_INTERFACE_MODE_RTBI;
609 else
610 return PHY_INTERFACE_MODE_TBI;
611 }
612
613 if (ecntrl & ECNTRL_REDUCED_MODE) {
614 if (ecntrl & ECNTRL_REDUCED_MII_MODE)
615 return PHY_INTERFACE_MODE_RMII;
7132ab7f 616 else {
b31a1d8b 617 phy_interface_t interface = priv->interface;
7132ab7f
AF
618
619 /*
620 * This isn't autodetected right now, so it must
621 * be set by the device tree or platform code.
622 */
623 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
624 return PHY_INTERFACE_MODE_RGMII_ID;
625
e8a2b6a4 626 return PHY_INTERFACE_MODE_RGMII;
7132ab7f 627 }
e8a2b6a4
AF
628 }
629
b31a1d8b 630 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
e8a2b6a4
AF
631 return PHY_INTERFACE_MODE_GMII;
632
633 return PHY_INTERFACE_MODE_MII;
634}
635
636
bb40dcbb
AF
637/* Initializes driver's PHY state, and attaches to the PHY.
638 * Returns 0 on success.
1da177e4
LT
639 */
640static int init_phy(struct net_device *dev)
641{
642 struct gfar_private *priv = netdev_priv(dev);
bb40dcbb 643 uint gigabit_support =
b31a1d8b 644 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
bb40dcbb
AF
645 SUPPORTED_1000baseT_Full : 0;
646 struct phy_device *phydev;
e8a2b6a4 647 phy_interface_t interface;
1da177e4
LT
648
649 priv->oldlink = 0;
650 priv->oldspeed = 0;
651 priv->oldduplex = -1;
652
e8a2b6a4
AF
653 interface = gfar_get_interface(dev);
654
b31a1d8b 655 phydev = phy_connect(dev, priv->phy_bus_id, &adjust_link, 0, interface);
1da177e4 656
d3c12873
KJ
657 if (interface == PHY_INTERFACE_MODE_SGMII)
658 gfar_configure_serdes(dev);
659
bb40dcbb
AF
660 if (IS_ERR(phydev)) {
661 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
662 return PTR_ERR(phydev);
1da177e4
LT
663 }
664
bb40dcbb
AF
665 /* Remove any features not supported by the controller */
666 phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
667 phydev->advertising = phydev->supported;
1da177e4 668
bb40dcbb 669 priv->phydev = phydev;
1da177e4
LT
670
671 return 0;
1da177e4
LT
672}
673
d0313587
PG
674/*
675 * Initialize TBI PHY interface for communicating with the
676 * SERDES lynx PHY on the chip. We communicate with this PHY
677 * through the MDIO bus on each controller, treating it as a
678 * "normal" PHY at the address found in the TBIPA register. We assume
679 * that the TBIPA register is valid. Either the MDIO bus code will set
680 * it to a value that doesn't conflict with other PHYs on the bus, or the
681 * value doesn't matter, as there are no other PHYs on the bus.
682 */
d3c12873
KJ
683static void gfar_configure_serdes(struct net_device *dev)
684{
685 struct gfar_private *priv = netdev_priv(dev);
c132419e 686
b31a1d8b
AF
687 if (!priv->tbiphy) {
688 printk(KERN_WARNING "SGMII mode requires that the device "
689 "tree specify a tbi-handle\n");
690 return;
691 }
d3c12873 692
b31a1d8b
AF
693 /*
694 * If the link is already up, we must already be ok, and don't need to
bdb59f94
TP
695 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
696 * everything for us? Resetting it takes the link down and requires
697 * several seconds for it to come back.
698 */
b31a1d8b
AF
699 if (phy_read(priv->tbiphy, MII_BMSR) & BMSR_LSTATUS)
700 return;
d3c12873 701
d0313587 702 /* Single clk mode, mii mode off(for serdes communication) */
b31a1d8b 703 phy_write(priv->tbiphy, MII_TBICON, TBICON_CLK_SELECT);
d3c12873 704
b31a1d8b 705 phy_write(priv->tbiphy, MII_ADVERTISE,
d3c12873
KJ
706 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
707 ADVERTISE_1000XPSE_ASYM);
708
b31a1d8b 709 phy_write(priv->tbiphy, MII_BMCR, BMCR_ANENABLE |
d3c12873
KJ
710 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
711}
712
1da177e4
LT
713static void init_registers(struct net_device *dev)
714{
715 struct gfar_private *priv = netdev_priv(dev);
716
717 /* Clear IEVENT */
718 gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR);
719
720 /* Initialize IMASK */
721 gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR);
722
723 /* Init hash registers to zero */
0bbaf069
KG
724 gfar_write(&priv->regs->igaddr0, 0);
725 gfar_write(&priv->regs->igaddr1, 0);
726 gfar_write(&priv->regs->igaddr2, 0);
727 gfar_write(&priv->regs->igaddr3, 0);
728 gfar_write(&priv->regs->igaddr4, 0);
729 gfar_write(&priv->regs->igaddr5, 0);
730 gfar_write(&priv->regs->igaddr6, 0);
731 gfar_write(&priv->regs->igaddr7, 0);
1da177e4
LT
732
733 gfar_write(&priv->regs->gaddr0, 0);
734 gfar_write(&priv->regs->gaddr1, 0);
735 gfar_write(&priv->regs->gaddr2, 0);
736 gfar_write(&priv->regs->gaddr3, 0);
737 gfar_write(&priv->regs->gaddr4, 0);
738 gfar_write(&priv->regs->gaddr5, 0);
739 gfar_write(&priv->regs->gaddr6, 0);
740 gfar_write(&priv->regs->gaddr7, 0);
741
1da177e4 742 /* Zero out the rmon mib registers if it has them */
b31a1d8b 743 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
cc8c6e37 744 memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib));
1da177e4
LT
745
746 /* Mask off the CAM interrupts */
747 gfar_write(&priv->regs->rmon.cam1, 0xffffffff);
748 gfar_write(&priv->regs->rmon.cam2, 0xffffffff);
749 }
750
751 /* Initialize the max receive buffer length */
752 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
753
1da177e4
LT
754 /* Initialize the Minimum Frame Length Register */
755 gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
1da177e4
LT
756}
757
0bbaf069
KG
758
759/* Halt the receive and transmit queues */
d87eb127 760static void gfar_halt_nodisable(struct net_device *dev)
1da177e4
LT
761{
762 struct gfar_private *priv = netdev_priv(dev);
cc8c6e37 763 struct gfar __iomem *regs = priv->regs;
1da177e4
LT
764 u32 tempval;
765
1da177e4
LT
766 /* Mask all interrupts */
767 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
768
769 /* Clear all interrupts */
770 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
771
772 /* Stop the DMA, and wait for it to stop */
773 tempval = gfar_read(&priv->regs->dmactrl);
774 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
775 != (DMACTRL_GRS | DMACTRL_GTS)) {
776 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
777 gfar_write(&priv->regs->dmactrl, tempval);
778
779 while (!(gfar_read(&priv->regs->ievent) &
780 (IEVENT_GRSC | IEVENT_GTSC)))
781 cpu_relax();
782 }
d87eb127 783}
d87eb127
SW
784
785/* Halt the receive and transmit queues */
786void gfar_halt(struct net_device *dev)
787{
788 struct gfar_private *priv = netdev_priv(dev);
789 struct gfar __iomem *regs = priv->regs;
790 u32 tempval;
1da177e4 791
2a54adc3
SW
792 gfar_halt_nodisable(dev);
793
1da177e4
LT
794 /* Disable Rx and Tx */
795 tempval = gfar_read(&regs->maccfg1);
796 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
797 gfar_write(&regs->maccfg1, tempval);
0bbaf069
KG
798}
799
800void stop_gfar(struct net_device *dev)
801{
802 struct gfar_private *priv = netdev_priv(dev);
cc8c6e37 803 struct gfar __iomem *regs = priv->regs;
0bbaf069
KG
804 unsigned long flags;
805
bb40dcbb
AF
806 phy_stop(priv->phydev);
807
0bbaf069 808 /* Lock it down */
fef6108d
AF
809 spin_lock_irqsave(&priv->txlock, flags);
810 spin_lock(&priv->rxlock);
0bbaf069 811
0bbaf069 812 gfar_halt(dev);
1da177e4 813
fef6108d
AF
814 spin_unlock(&priv->rxlock);
815 spin_unlock_irqrestore(&priv->txlock, flags);
1da177e4
LT
816
817 /* Free the IRQs */
b31a1d8b 818 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1da177e4
LT
819 free_irq(priv->interruptError, dev);
820 free_irq(priv->interruptTransmit, dev);
821 free_irq(priv->interruptReceive, dev);
822 } else {
bb40dcbb 823 free_irq(priv->interruptTransmit, dev);
1da177e4
LT
824 }
825
826 free_skb_resources(priv);
827
cf782298 828 dma_free_coherent(&dev->dev,
1da177e4
LT
829 sizeof(struct txbd8)*priv->tx_ring_size
830 + sizeof(struct rxbd8)*priv->rx_ring_size,
831 priv->tx_bd_base,
0bbaf069 832 gfar_read(&regs->tbase0));
1da177e4
LT
833}
834
835/* If there are any tx skbs or rx skbs still around, free them.
836 * Then free tx_skbuff and rx_skbuff */
bb40dcbb 837static void free_skb_resources(struct gfar_private *priv)
1da177e4
LT
838{
839 struct rxbd8 *rxbdp;
840 struct txbd8 *txbdp;
4669bc90 841 int i, j;
1da177e4
LT
842
843 /* Go through all the buffer descriptors and free their data buffers */
844 txbdp = priv->tx_bd_base;
845
846 for (i = 0; i < priv->tx_ring_size; i++) {
4669bc90
DH
847 if (!priv->tx_skbuff[i])
848 continue;
1da177e4 849
4669bc90
DH
850 dma_unmap_single(&priv->dev->dev, txbdp->bufPtr,
851 txbdp->length, DMA_TO_DEVICE);
852 txbdp->lstatus = 0;
853 for (j = 0; j < skb_shinfo(priv->tx_skbuff[i])->nr_frags; j++) {
854 txbdp++;
855 dma_unmap_page(&priv->dev->dev, txbdp->bufPtr,
856 txbdp->length, DMA_TO_DEVICE);
1da177e4 857 }
ad5da7ab 858 txbdp++;
4669bc90
DH
859 dev_kfree_skb_any(priv->tx_skbuff[i]);
860 priv->tx_skbuff[i] = NULL;
1da177e4
LT
861 }
862
863 kfree(priv->tx_skbuff);
864
865 rxbdp = priv->rx_bd_base;
866
867 /* rx_skbuff is not guaranteed to be allocated, so only
868 * free it and its contents if it is allocated */
869 if(priv->rx_skbuff != NULL) {
870 for (i = 0; i < priv->rx_ring_size; i++) {
871 if (priv->rx_skbuff[i]) {
cf782298 872 dma_unmap_single(&priv->dev->dev, rxbdp->bufPtr,
7f7f5316 873 priv->rx_buffer_size,
1da177e4
LT
874 DMA_FROM_DEVICE);
875
876 dev_kfree_skb_any(priv->rx_skbuff[i]);
877 priv->rx_skbuff[i] = NULL;
878 }
879
5a5efed4 880 rxbdp->lstatus = 0;
1da177e4
LT
881 rxbdp->bufPtr = 0;
882
883 rxbdp++;
884 }
885
886 kfree(priv->rx_skbuff);
887 }
888}
889
0bbaf069
KG
890void gfar_start(struct net_device *dev)
891{
892 struct gfar_private *priv = netdev_priv(dev);
cc8c6e37 893 struct gfar __iomem *regs = priv->regs;
0bbaf069
KG
894 u32 tempval;
895
896 /* Enable Rx and Tx in MACCFG1 */
897 tempval = gfar_read(&regs->maccfg1);
898 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
899 gfar_write(&regs->maccfg1, tempval);
900
901 /* Initialize DMACTRL to have WWR and WOP */
902 tempval = gfar_read(&priv->regs->dmactrl);
903 tempval |= DMACTRL_INIT_SETTINGS;
904 gfar_write(&priv->regs->dmactrl, tempval);
905
0bbaf069
KG
906 /* Make sure we aren't stopped */
907 tempval = gfar_read(&priv->regs->dmactrl);
908 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
909 gfar_write(&priv->regs->dmactrl, tempval);
910
fef6108d
AF
911 /* Clear THLT/RHLT, so that the DMA starts polling now */
912 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
913 gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT);
914
0bbaf069
KG
915 /* Unmask the interrupts we look for */
916 gfar_write(&regs->imask, IMASK_DEFAULT);
12dea57b
DH
917
918 dev->trans_start = jiffies;
0bbaf069
KG
919}
920
1da177e4
LT
921/* Bring the controller up and running */
922int startup_gfar(struct net_device *dev)
923{
924 struct txbd8 *txbdp;
925 struct rxbd8 *rxbdp;
f9663aea 926 dma_addr_t addr = 0;
1da177e4
LT
927 unsigned long vaddr;
928 int i;
929 struct gfar_private *priv = netdev_priv(dev);
cc8c6e37 930 struct gfar __iomem *regs = priv->regs;
1da177e4 931 int err = 0;
0bbaf069 932 u32 rctrl = 0;
7f7f5316 933 u32 attrs = 0;
1da177e4
LT
934
935 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
936
937 /* Allocate memory for the buffer descriptors */
cf782298 938 vaddr = (unsigned long) dma_alloc_coherent(&dev->dev,
1da177e4
LT
939 sizeof (struct txbd8) * priv->tx_ring_size +
940 sizeof (struct rxbd8) * priv->rx_ring_size,
941 &addr, GFP_KERNEL);
942
943 if (vaddr == 0) {
0bbaf069
KG
944 if (netif_msg_ifup(priv))
945 printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",
946 dev->name);
1da177e4
LT
947 return -ENOMEM;
948 }
949
950 priv->tx_bd_base = (struct txbd8 *) vaddr;
951
952 /* enet DMA only understands physical addresses */
0bbaf069 953 gfar_write(&regs->tbase0, addr);
1da177e4
LT
954
955 /* Start the rx descriptor ring where the tx ring leaves off */
956 addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;
957 vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size;
958 priv->rx_bd_base = (struct rxbd8 *) vaddr;
0bbaf069 959 gfar_write(&regs->rbase0, addr);
1da177e4
LT
960
961 /* Setup the skbuff rings */
962 priv->tx_skbuff =
963 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
964 priv->tx_ring_size, GFP_KERNEL);
965
bb40dcbb 966 if (NULL == priv->tx_skbuff) {
0bbaf069
KG
967 if (netif_msg_ifup(priv))
968 printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
969 dev->name);
1da177e4
LT
970 err = -ENOMEM;
971 goto tx_skb_fail;
972 }
973
974 for (i = 0; i < priv->tx_ring_size; i++)
975 priv->tx_skbuff[i] = NULL;
976
977 priv->rx_skbuff =
978 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
979 priv->rx_ring_size, GFP_KERNEL);
980
bb40dcbb 981 if (NULL == priv->rx_skbuff) {
0bbaf069
KG
982 if (netif_msg_ifup(priv))
983 printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
984 dev->name);
1da177e4
LT
985 err = -ENOMEM;
986 goto rx_skb_fail;
987 }
988
989 for (i = 0; i < priv->rx_ring_size; i++)
990 priv->rx_skbuff[i] = NULL;
991
992 /* Initialize some variables in our dev structure */
4669bc90 993 priv->num_txbdfree = priv->tx_ring_size;
1da177e4
LT
994 priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
995 priv->cur_rx = priv->rx_bd_base;
996 priv->skb_curtx = priv->skb_dirtytx = 0;
997 priv->skb_currx = 0;
998
999 /* Initialize Transmit Descriptor Ring */
1000 txbdp = priv->tx_bd_base;
1001 for (i = 0; i < priv->tx_ring_size; i++) {
5a5efed4 1002 txbdp->lstatus = 0;
1da177e4
LT
1003 txbdp->bufPtr = 0;
1004 txbdp++;
1005 }
1006
1007 /* Set the last descriptor in the ring to indicate wrap */
1008 txbdp--;
1009 txbdp->status |= TXBD_WRAP;
1010
1011 rxbdp = priv->rx_bd_base;
1012 for (i = 0; i < priv->rx_ring_size; i++) {
815b97c6 1013 struct sk_buff *skb;
1da177e4 1014
815b97c6 1015 skb = gfar_new_skb(dev);
1da177e4 1016
815b97c6
AF
1017 if (!skb) {
1018 printk(KERN_ERR "%s: Can't allocate RX buffers\n",
1019 dev->name);
1020
1021 goto err_rxalloc_fail;
1022 }
1da177e4
LT
1023
1024 priv->rx_skbuff[i] = skb;
1025
815b97c6
AF
1026 gfar_new_rxbdp(dev, rxbdp, skb);
1027
1da177e4
LT
1028 rxbdp++;
1029 }
1030
1031 /* Set the last descriptor in the ring to wrap */
1032 rxbdp--;
1033 rxbdp->status |= RXBD_WRAP;
1034
1035 /* If the device has multiple interrupts, register for
1036 * them. Otherwise, only register for the one */
b31a1d8b 1037 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
0bbaf069 1038 /* Install our interrupt handlers for Error,
1da177e4
LT
1039 * Transmit, and Receive */
1040 if (request_irq(priv->interruptError, gfar_error,
c50a5d9a 1041 0, priv->int_name_er, dev) < 0) {
0bbaf069
KG
1042 if (netif_msg_intr(priv))
1043 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1044 dev->name, priv->interruptError);
1da177e4
LT
1045
1046 err = -1;
1047 goto err_irq_fail;
1048 }
1049
1050 if (request_irq(priv->interruptTransmit, gfar_transmit,
c50a5d9a 1051 0, priv->int_name_tx, dev) < 0) {
0bbaf069
KG
1052 if (netif_msg_intr(priv))
1053 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1054 dev->name, priv->interruptTransmit);
1da177e4
LT
1055
1056 err = -1;
1057
1058 goto tx_irq_fail;
1059 }
1060
1061 if (request_irq(priv->interruptReceive, gfar_receive,
c50a5d9a 1062 0, priv->int_name_rx, dev) < 0) {
0bbaf069
KG
1063 if (netif_msg_intr(priv))
1064 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n",
1065 dev->name, priv->interruptReceive);
1da177e4
LT
1066
1067 err = -1;
1068 goto rx_irq_fail;
1069 }
1070 } else {
1071 if (request_irq(priv->interruptTransmit, gfar_interrupt,
c50a5d9a 1072 0, priv->int_name_tx, dev) < 0) {
0bbaf069
KG
1073 if (netif_msg_intr(priv))
1074 printk(KERN_ERR "%s: Can't get IRQ %d\n",
c50a5d9a 1075 dev->name, priv->interruptTransmit);
1da177e4
LT
1076
1077 err = -1;
1078 goto err_irq_fail;
1079 }
1080 }
1081
bb40dcbb 1082 phy_start(priv->phydev);
1da177e4
LT
1083
1084 /* Configure the coalescing support */
b46a8454 1085 gfar_write(&regs->txic, 0);
1da177e4 1086 if (priv->txcoalescing)
b46a8454 1087 gfar_write(&regs->txic, priv->txic);
1da177e4 1088
b46a8454 1089 gfar_write(&regs->rxic, 0);
1da177e4 1090 if (priv->rxcoalescing)
b46a8454 1091 gfar_write(&regs->rxic, priv->rxic);
1da177e4 1092
0bbaf069
KG
1093 if (priv->rx_csum_enable)
1094 rctrl |= RCTRL_CHECKSUMMING;
1da177e4 1095
7f7f5316 1096 if (priv->extended_hash) {
0bbaf069 1097 rctrl |= RCTRL_EXTHASH;
1da177e4 1098
7f7f5316
AF
1099 gfar_clear_exact_match(dev);
1100 rctrl |= RCTRL_EMEN;
1101 }
1102
7f7f5316
AF
1103 if (priv->padding) {
1104 rctrl &= ~RCTRL_PAL_MASK;
1105 rctrl |= RCTRL_PADDING(priv->padding);
1106 }
1107
0bbaf069
KG
1108 /* Init rctrl based on our settings */
1109 gfar_write(&priv->regs->rctrl, rctrl);
1da177e4 1110
0bbaf069
KG
1111 if (dev->features & NETIF_F_IP_CSUM)
1112 gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM);
1da177e4 1113
7f7f5316
AF
1114 /* Set the extraction length and index */
1115 attrs = ATTRELI_EL(priv->rx_stash_size) |
1116 ATTRELI_EI(priv->rx_stash_index);
1117
1118 gfar_write(&priv->regs->attreli, attrs);
1119
1120 /* Start with defaults, and add stashing or locking
1121 * depending on the approprate variables */
1122 attrs = ATTR_INIT_SETTINGS;
1123
1124 if (priv->bd_stash_en)
1125 attrs |= ATTR_BDSTASH;
1126
1127 if (priv->rx_stash_size != 0)
1128 attrs |= ATTR_BUFSTASH;
1129
1130 gfar_write(&priv->regs->attr, attrs);
1131
1132 gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold);
1133 gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve);
1134 gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
1135
1136 /* Start the controller */
0bbaf069 1137 gfar_start(dev);
1da177e4
LT
1138
1139 return 0;
1140
1141rx_irq_fail:
1142 free_irq(priv->interruptTransmit, dev);
1143tx_irq_fail:
1144 free_irq(priv->interruptError, dev);
1145err_irq_fail:
7d2e3cb7 1146err_rxalloc_fail:
1da177e4
LT
1147rx_skb_fail:
1148 free_skb_resources(priv);
1149tx_skb_fail:
cf782298 1150 dma_free_coherent(&dev->dev,
1da177e4
LT
1151 sizeof(struct txbd8)*priv->tx_ring_size
1152 + sizeof(struct rxbd8)*priv->rx_ring_size,
1153 priv->tx_bd_base,
0bbaf069 1154 gfar_read(&regs->tbase0));
1da177e4 1155
1da177e4
LT
1156 return err;
1157}
1158
1159/* Called when something needs to use the ethernet device */
1160/* Returns 0 for success. */
1161static int gfar_enet_open(struct net_device *dev)
1162{
94e8cc35 1163 struct gfar_private *priv = netdev_priv(dev);
1da177e4
LT
1164 int err;
1165
bea3348e
SH
1166 napi_enable(&priv->napi);
1167
1da177e4
LT
1168 /* Initialize a bunch of registers */
1169 init_registers(dev);
1170
1171 gfar_set_mac_address(dev);
1172
1173 err = init_phy(dev);
1174
bea3348e
SH
1175 if(err) {
1176 napi_disable(&priv->napi);
1da177e4 1177 return err;
bea3348e 1178 }
1da177e4
LT
1179
1180 err = startup_gfar(dev);
db0e8e3f 1181 if (err) {
bea3348e 1182 napi_disable(&priv->napi);
db0e8e3f
AV
1183 return err;
1184 }
1da177e4
LT
1185
1186 netif_start_queue(dev);
1187
1188 return err;
1189}
1190
a22823e7 1191static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
0bbaf069
KG
1192{
1193 struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN);
1194
a22823e7 1195 cacheable_memzero(fcb, GMAC_FCB_LEN);
0bbaf069 1196
0bbaf069
KG
1197 return fcb;
1198}
1199
1200static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
1201{
7f7f5316 1202 u8 flags = 0;
0bbaf069
KG
1203
1204 /* If we're here, it's a IP packet with a TCP or UDP
1205 * payload. We set it to checksum, using a pseudo-header
1206 * we provide
1207 */
7f7f5316 1208 flags = TXFCB_DEFAULT;
0bbaf069 1209
7f7f5316
AF
1210 /* Tell the controller what the protocol is */
1211 /* And provide the already calculated phcs */
eddc9ec5 1212 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
7f7f5316 1213 flags |= TXFCB_UDP;
4bedb452 1214 fcb->phcs = udp_hdr(skb)->check;
7f7f5316 1215 } else
8da32de5 1216 fcb->phcs = tcp_hdr(skb)->check;
0bbaf069
KG
1217
1218 /* l3os is the distance between the start of the
1219 * frame (skb->data) and the start of the IP hdr.
1220 * l4os is the distance between the start of the
1221 * l3 hdr and the l4 hdr */
bbe735e4 1222 fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
cfe1fc77 1223 fcb->l4os = skb_network_header_len(skb);
0bbaf069 1224
7f7f5316 1225 fcb->flags = flags;
0bbaf069
KG
1226}
1227
7f7f5316 1228void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
0bbaf069 1229{
7f7f5316 1230 fcb->flags |= TXFCB_VLN;
0bbaf069
KG
1231 fcb->vlctl = vlan_tx_tag_get(skb);
1232}
1233
4669bc90
DH
1234static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
1235 struct txbd8 *base, int ring_size)
1236{
1237 struct txbd8 *new_bd = bdp + stride;
1238
1239 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
1240}
1241
1242static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1243 int ring_size)
1244{
1245 return skip_txbd(bdp, 1, base, ring_size);
1246}
1247
1da177e4
LT
1248/* This is called by the kernel when a frame is ready for transmission. */
1249/* It is pointed to by the dev->hard_start_xmit function pointer */
1250static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1251{
1252 struct gfar_private *priv = netdev_priv(dev);
0bbaf069 1253 struct txfcb *fcb = NULL;
4669bc90 1254 struct txbd8 *txbdp, *txbdp_start, *base;
5a5efed4 1255 u32 lstatus;
4669bc90
DH
1256 int i;
1257 u32 bufaddr;
fef6108d 1258 unsigned long flags;
4669bc90
DH
1259 unsigned int nr_frags, length;
1260
1261 base = priv->tx_bd_base;
1262
1263 /* total number of fragments in the SKB */
1264 nr_frags = skb_shinfo(skb)->nr_frags;
1265
1266 spin_lock_irqsave(&priv->txlock, flags);
1267
1268 /* check if there is space to queue this packet */
1269 if (nr_frags > priv->num_txbdfree) {
1270 /* no space, stop the queue */
1271 netif_stop_queue(dev);
1272 dev->stats.tx_fifo_errors++;
1273 spin_unlock_irqrestore(&priv->txlock, flags);
1274 return NETDEV_TX_BUSY;
1275 }
1da177e4
LT
1276
1277 /* Update transmit stats */
09f75cd7 1278 dev->stats.tx_bytes += skb->len;
1da177e4 1279
4669bc90 1280 txbdp = txbdp_start = priv->cur_tx;
1da177e4 1281
4669bc90
DH
1282 if (nr_frags == 0) {
1283 lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1284 } else {
1285 /* Place the fragment addresses and lengths into the TxBDs */
1286 for (i = 0; i < nr_frags; i++) {
1287 /* Point at the next BD, wrapping as needed */
1288 txbdp = next_txbd(txbdp, base, priv->tx_ring_size);
1289
1290 length = skb_shinfo(skb)->frags[i].size;
1291
1292 lstatus = txbdp->lstatus | length |
1293 BD_LFLAG(TXBD_READY);
1294
1295 /* Handle the last BD specially */
1296 if (i == nr_frags - 1)
1297 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1da177e4 1298
4669bc90
DH
1299 bufaddr = dma_map_page(&dev->dev,
1300 skb_shinfo(skb)->frags[i].page,
1301 skb_shinfo(skb)->frags[i].page_offset,
1302 length,
1303 DMA_TO_DEVICE);
1304
1305 /* set the TxBD length and buffer pointer */
1306 txbdp->bufPtr = bufaddr;
1307 txbdp->lstatus = lstatus;
1308 }
1309
1310 lstatus = txbdp_start->lstatus;
1311 }
1da177e4 1312
0bbaf069 1313 /* Set up checksumming */
12dea57b 1314 if (CHECKSUM_PARTIAL == skb->ip_summed) {
a22823e7 1315 fcb = gfar_add_fcb(skb);
5a5efed4 1316 lstatus |= BD_LFLAG(TXBD_TOE);
0bbaf069
KG
1317 gfar_tx_checksum(skb, fcb);
1318 }
1319
77ecaf2d 1320 if (priv->vlgrp && vlan_tx_tag_present(skb)) {
7f7f5316 1321 if (unlikely(NULL == fcb)) {
a22823e7 1322 fcb = gfar_add_fcb(skb);
5a5efed4 1323 lstatus |= BD_LFLAG(TXBD_TOE);
7f7f5316 1324 }
0bbaf069
KG
1325
1326 gfar_tx_vlan(skb, fcb);
1327 }
1328
4669bc90 1329 /* setup the TxBD length and buffer pointer for the first BD */
1da177e4 1330 priv->tx_skbuff[priv->skb_curtx] = skb;
4669bc90
DH
1331 txbdp_start->bufPtr = dma_map_single(&dev->dev, skb->data,
1332 skb_headlen(skb), DMA_TO_DEVICE);
1da177e4 1333
4669bc90 1334 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
1da177e4 1335
4669bc90
DH
1336 /*
1337 * The powerpc-specific eieio() is used, as wmb() has too strong
3b6330ce
SW
1338 * semantics (it requires synchronization between cacheable and
1339 * uncacheable mappings, which eieio doesn't provide and which we
1340 * don't need), thus requiring a more expensive sync instruction. At
1341 * some point, the set of architecture-independent barrier functions
1342 * should be expanded to include weaker barriers.
1343 */
3b6330ce 1344 eieio();
7f7f5316 1345
4669bc90
DH
1346 txbdp_start->lstatus = lstatus;
1347
1348 /* Update the current skb pointer to the next entry we will use
1349 * (wrapping if necessary) */
1350 priv->skb_curtx = (priv->skb_curtx + 1) &
1351 TX_RING_MOD_MASK(priv->tx_ring_size);
1352
1353 priv->cur_tx = next_txbd(txbdp, base, priv->tx_ring_size);
1354
1355 /* reduce TxBD free count */
1356 priv->num_txbdfree -= (nr_frags + 1);
1357
1358 dev->trans_start = jiffies;
1da177e4
LT
1359
1360 /* If the next BD still needs to be cleaned up, then the bds
1361 are full. We need to tell the kernel to stop sending us stuff. */
4669bc90 1362 if (!priv->num_txbdfree) {
1da177e4
LT
1363 netif_stop_queue(dev);
1364
09f75cd7 1365 dev->stats.tx_fifo_errors++;
1da177e4
LT
1366 }
1367
1da177e4
LT
1368 /* Tell the DMA to go go go */
1369 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1370
1371 /* Unlock priv */
fef6108d 1372 spin_unlock_irqrestore(&priv->txlock, flags);
1da177e4
LT
1373
1374 return 0;
1375}
1376
1377/* Stops the kernel queue, and halts the controller */
1378static int gfar_close(struct net_device *dev)
1379{
1380 struct gfar_private *priv = netdev_priv(dev);
bea3348e
SH
1381
1382 napi_disable(&priv->napi);
1383
ab939905 1384 cancel_work_sync(&priv->reset_task);
1da177e4
LT
1385 stop_gfar(dev);
1386
bb40dcbb
AF
1387 /* Disconnect from the PHY */
1388 phy_disconnect(priv->phydev);
1389 priv->phydev = NULL;
1da177e4
LT
1390
1391 netif_stop_queue(dev);
1392
1393 return 0;
1394}
1395
1da177e4 1396/* Changes the mac address if the controller is not running. */
f162b9d5 1397static int gfar_set_mac_address(struct net_device *dev)
1da177e4 1398{
7f7f5316 1399 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
1da177e4
LT
1400
1401 return 0;
1402}
1403
1404
0bbaf069
KG
1405/* Enables and disables VLAN insertion/extraction */
1406static void gfar_vlan_rx_register(struct net_device *dev,
1407 struct vlan_group *grp)
1408{
1409 struct gfar_private *priv = netdev_priv(dev);
1410 unsigned long flags;
77ecaf2d 1411 struct vlan_group *old_grp;
0bbaf069
KG
1412 u32 tempval;
1413
fef6108d 1414 spin_lock_irqsave(&priv->rxlock, flags);
0bbaf069 1415
77ecaf2d
DH
1416 old_grp = priv->vlgrp;
1417
1418 if (old_grp == grp)
1419 return;
0bbaf069
KG
1420
1421 if (grp) {
1422 /* Enable VLAN tag insertion */
1423 tempval = gfar_read(&priv->regs->tctrl);
1424 tempval |= TCTRL_VLINS;
1425
1426 gfar_write(&priv->regs->tctrl, tempval);
6aa20a22 1427
0bbaf069
KG
1428 /* Enable VLAN tag extraction */
1429 tempval = gfar_read(&priv->regs->rctrl);
1430 tempval |= RCTRL_VLEX;
77ecaf2d 1431 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
0bbaf069
KG
1432 gfar_write(&priv->regs->rctrl, tempval);
1433 } else {
1434 /* Disable VLAN tag insertion */
1435 tempval = gfar_read(&priv->regs->tctrl);
1436 tempval &= ~TCTRL_VLINS;
1437 gfar_write(&priv->regs->tctrl, tempval);
1438
1439 /* Disable VLAN tag extraction */
1440 tempval = gfar_read(&priv->regs->rctrl);
1441 tempval &= ~RCTRL_VLEX;
77ecaf2d
DH
1442 /* If parse is no longer required, then disable parser */
1443 if (tempval & RCTRL_REQ_PARSER)
1444 tempval |= RCTRL_PRSDEP_INIT;
1445 else
1446 tempval &= ~RCTRL_PRSDEP_INIT;
0bbaf069
KG
1447 gfar_write(&priv->regs->rctrl, tempval);
1448 }
1449
77ecaf2d
DH
1450 gfar_change_mtu(dev, dev->mtu);
1451
fef6108d 1452 spin_unlock_irqrestore(&priv->rxlock, flags);
0bbaf069
KG
1453}
1454
1da177e4
LT
1455static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1456{
1457 int tempsize, tempval;
1458 struct gfar_private *priv = netdev_priv(dev);
1459 int oldsize = priv->rx_buffer_size;
0bbaf069
KG
1460 int frame_size = new_mtu + ETH_HLEN;
1461
77ecaf2d 1462 if (priv->vlgrp)
faa89577 1463 frame_size += VLAN_HLEN;
0bbaf069 1464
1da177e4 1465 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
0bbaf069
KG
1466 if (netif_msg_drv(priv))
1467 printk(KERN_ERR "%s: Invalid MTU setting\n",
1468 dev->name);
1da177e4
LT
1469 return -EINVAL;
1470 }
1471
77ecaf2d
DH
1472 if (gfar_uses_fcb(priv))
1473 frame_size += GMAC_FCB_LEN;
1474
1475 frame_size += priv->padding;
1476
1da177e4
LT
1477 tempsize =
1478 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
1479 INCREMENTAL_BUFFER_SIZE;
1480
1481 /* Only stop and start the controller if it isn't already
7f7f5316 1482 * stopped, and we changed something */
1da177e4
LT
1483 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1484 stop_gfar(dev);
1485
1486 priv->rx_buffer_size = tempsize;
1487
1488 dev->mtu = new_mtu;
1489
1490 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
1491 gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size);
1492
1493 /* If the mtu is larger than the max size for standard
1494 * ethernet frames (ie, a jumbo frame), then set maccfg2
1495 * to allow huge frames, and to check the length */
1496 tempval = gfar_read(&priv->regs->maccfg2);
1497
1498 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
1499 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1500 else
1501 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1502
1503 gfar_write(&priv->regs->maccfg2, tempval);
1504
1505 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1506 startup_gfar(dev);
1507
1508 return 0;
1509}
1510
ab939905 1511/* gfar_reset_task gets scheduled when a packet has not been
1da177e4
LT
1512 * transmitted after a set amount of time.
1513 * For now, assume that clearing out all the structures, and
ab939905
SS
1514 * starting over will fix the problem.
1515 */
1516static void gfar_reset_task(struct work_struct *work)
1da177e4 1517{
ab939905
SS
1518 struct gfar_private *priv = container_of(work, struct gfar_private,
1519 reset_task);
1520 struct net_device *dev = priv->dev;
1da177e4
LT
1521
1522 if (dev->flags & IFF_UP) {
1523 stop_gfar(dev);
1524 startup_gfar(dev);
1525 }
1526
263ba320 1527 netif_tx_schedule_all(dev);
1da177e4
LT
1528}
1529
ab939905
SS
1530static void gfar_timeout(struct net_device *dev)
1531{
1532 struct gfar_private *priv = netdev_priv(dev);
1533
1534 dev->stats.tx_errors++;
1535 schedule_work(&priv->reset_task);
1536}
1537
1da177e4 1538/* Interrupt Handler for Transmit complete */
f162b9d5 1539static int gfar_clean_tx_ring(struct net_device *dev)
1da177e4 1540{
d080cd63 1541 struct gfar_private *priv = netdev_priv(dev);
4669bc90
DH
1542 struct txbd8 *bdp;
1543 struct txbd8 *lbdp = NULL;
1544 struct txbd8 *base = priv->tx_bd_base;
1545 struct sk_buff *skb;
1546 int skb_dirtytx;
1547 int tx_ring_size = priv->tx_ring_size;
1548 int frags = 0;
1549 int i;
d080cd63 1550 int howmany = 0;
4669bc90 1551 u32 lstatus;
1da177e4 1552
1da177e4 1553 bdp = priv->dirty_tx;
4669bc90 1554 skb_dirtytx = priv->skb_dirtytx;
1da177e4 1555
4669bc90
DH
1556 while ((skb = priv->tx_skbuff[skb_dirtytx])) {
1557 frags = skb_shinfo(skb)->nr_frags;
1558 lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
1da177e4 1559
4669bc90 1560 lstatus = lbdp->lstatus;
1da177e4 1561
4669bc90
DH
1562 /* Only clean completed frames */
1563 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
1564 (lstatus & BD_LENGTH_MASK))
1565 break;
1566
1567 dma_unmap_single(&dev->dev,
1568 bdp->bufPtr,
1569 bdp->length,
1570 DMA_TO_DEVICE);
81183059 1571
4669bc90
DH
1572 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
1573 bdp = next_txbd(bdp, base, tx_ring_size);
d080cd63 1574
4669bc90
DH
1575 for (i = 0; i < frags; i++) {
1576 dma_unmap_page(&dev->dev,
1577 bdp->bufPtr,
1578 bdp->length,
1579 DMA_TO_DEVICE);
1580 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
1581 bdp = next_txbd(bdp, base, tx_ring_size);
1582 }
1da177e4 1583
4669bc90
DH
1584 dev_kfree_skb_any(skb);
1585 priv->tx_skbuff[skb_dirtytx] = NULL;
d080cd63 1586
4669bc90
DH
1587 skb_dirtytx = (skb_dirtytx + 1) &
1588 TX_RING_MOD_MASK(tx_ring_size);
1589
1590 howmany++;
1591 priv->num_txbdfree += frags + 1;
1592 }
1da177e4 1593
4669bc90
DH
1594 /* If we freed a buffer, we can restart transmission, if necessary */
1595 if (netif_queue_stopped(dev) && priv->num_txbdfree)
1596 netif_wake_queue(dev);
1da177e4 1597
4669bc90
DH
1598 /* Update dirty indicators */
1599 priv->skb_dirtytx = skb_dirtytx;
1600 priv->dirty_tx = bdp;
1da177e4 1601
d080cd63
DH
1602 dev->stats.tx_packets += howmany;
1603
1604 return howmany;
1605}
1606
8c7396ae 1607static void gfar_schedule_cleanup(struct net_device *dev)
d080cd63 1608{
d080cd63 1609 struct gfar_private *priv = netdev_priv(dev);
908a7a16 1610 if (netif_rx_schedule_prep(&priv->napi)) {
8c7396ae 1611 gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
908a7a16 1612 __netif_rx_schedule(&priv->napi);
2f448911 1613 }
8c7396ae 1614}
1da177e4 1615
8c7396ae
DH
1616/* Interrupt Handler for Transmit complete */
1617static irqreturn_t gfar_transmit(int irq, void *dev_id)
1618{
1619 gfar_schedule_cleanup((struct net_device *)dev_id);
1da177e4
LT
1620 return IRQ_HANDLED;
1621}
1622
815b97c6
AF
1623static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
1624 struct sk_buff *skb)
1625{
1626 struct gfar_private *priv = netdev_priv(dev);
5a5efed4 1627 u32 lstatus;
815b97c6
AF
1628
1629 bdp->bufPtr = dma_map_single(&dev->dev, skb->data,
1630 priv->rx_buffer_size, DMA_FROM_DEVICE);
1631
5a5efed4 1632 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
815b97c6
AF
1633
1634 if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1)
5a5efed4 1635 lstatus |= BD_LFLAG(RXBD_WRAP);
815b97c6
AF
1636
1637 eieio();
1638
5a5efed4 1639 bdp->lstatus = lstatus;
815b97c6
AF
1640}
1641
1642
1643struct sk_buff * gfar_new_skb(struct net_device *dev)
1da177e4 1644{
7f7f5316 1645 unsigned int alignamount;
1da177e4
LT
1646 struct gfar_private *priv = netdev_priv(dev);
1647 struct sk_buff *skb = NULL;
1da177e4
LT
1648
1649 /* We have to allocate the skb, so keep trying till we succeed */
815b97c6 1650 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
1da177e4 1651
815b97c6 1652 if (!skb)
1da177e4
LT
1653 return NULL;
1654
7f7f5316 1655 alignamount = RXBUF_ALIGNMENT -
bea3348e 1656 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1));
7f7f5316 1657
1da177e4
LT
1658 /* We need the data buffer to be aligned properly. We will reserve
1659 * as many bytes as needed to align the data properly
1660 */
7f7f5316 1661 skb_reserve(skb, alignamount);
1da177e4 1662
1da177e4
LT
1663 return skb;
1664}
1665
298e1a9e 1666static inline void count_errors(unsigned short status, struct net_device *dev)
1da177e4 1667{
298e1a9e 1668 struct gfar_private *priv = netdev_priv(dev);
09f75cd7 1669 struct net_device_stats *stats = &dev->stats;
1da177e4
LT
1670 struct gfar_extra_stats *estats = &priv->extra_stats;
1671
1672 /* If the packet was truncated, none of the other errors
1673 * matter */
1674 if (status & RXBD_TRUNCATED) {
1675 stats->rx_length_errors++;
1676
1677 estats->rx_trunc++;
1678
1679 return;
1680 }
1681 /* Count the errors, if there were any */
1682 if (status & (RXBD_LARGE | RXBD_SHORT)) {
1683 stats->rx_length_errors++;
1684
1685 if (status & RXBD_LARGE)
1686 estats->rx_large++;
1687 else
1688 estats->rx_short++;
1689 }
1690 if (status & RXBD_NONOCTET) {
1691 stats->rx_frame_errors++;
1692 estats->rx_nonoctet++;
1693 }
1694 if (status & RXBD_CRCERR) {
1695 estats->rx_crcerr++;
1696 stats->rx_crc_errors++;
1697 }
1698 if (status & RXBD_OVERRUN) {
1699 estats->rx_overrun++;
1700 stats->rx_crc_errors++;
1701 }
1702}
1703
7d12e780 1704irqreturn_t gfar_receive(int irq, void *dev_id)
1da177e4 1705{
8c7396ae 1706 gfar_schedule_cleanup((struct net_device *)dev_id);
1da177e4
LT
1707 return IRQ_HANDLED;
1708}
1709
0bbaf069
KG
1710static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
1711{
1712 /* If valid headers were found, and valid sums
1713 * were verified, then we tell the kernel that no
1714 * checksumming is necessary. Otherwise, it is */
7f7f5316 1715 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
0bbaf069
KG
1716 skb->ip_summed = CHECKSUM_UNNECESSARY;
1717 else
1718 skb->ip_summed = CHECKSUM_NONE;
1719}
1720
1721
1da177e4
LT
1722/* gfar_process_frame() -- handle one incoming packet if skb
1723 * isn't NULL. */
1724static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2c2db48a 1725 int amount_pull)
1da177e4
LT
1726{
1727 struct gfar_private *priv = netdev_priv(dev);
0bbaf069 1728 struct rxfcb *fcb = NULL;
1da177e4 1729
2c2db48a 1730 int ret;
1da177e4 1731
2c2db48a
DH
1732 /* fcb is at the beginning if exists */
1733 fcb = (struct rxfcb *)skb->data;
0bbaf069 1734
2c2db48a
DH
1735 /* Remove the FCB from the skb */
1736 /* Remove the padded bytes, if there are any */
1737 if (amount_pull)
1738 skb_pull(skb, amount_pull);
0bbaf069 1739
2c2db48a
DH
1740 if (priv->rx_csum_enable)
1741 gfar_rx_checksum(skb, fcb);
0bbaf069 1742
2c2db48a
DH
1743 /* Tell the skb what kind of packet this is */
1744 skb->protocol = eth_type_trans(skb, dev);
1da177e4 1745
2c2db48a
DH
1746 /* Send the packet up the stack */
1747 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
1748 ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, fcb->vlctl);
1749 else
1750 ret = netif_receive_skb(skb);
0bbaf069 1751
2c2db48a
DH
1752 if (NET_RX_DROP == ret)
1753 priv->extra_stats.kernel_dropped++;
1da177e4
LT
1754
1755 return 0;
1756}
1757
1758/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
0bbaf069 1759 * until the budget/quota has been reached. Returns the number
1da177e4
LT
1760 * of frames handled
1761 */
0bbaf069 1762int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1da177e4 1763{
31de198b 1764 struct rxbd8 *bdp, *base;
1da177e4 1765 struct sk_buff *skb;
2c2db48a
DH
1766 int pkt_len;
1767 int amount_pull;
1da177e4
LT
1768 int howmany = 0;
1769 struct gfar_private *priv = netdev_priv(dev);
1770
1771 /* Get the first full descriptor */
1772 bdp = priv->cur_rx;
31de198b 1773 base = priv->rx_bd_base;
1da177e4 1774
2c2db48a
DH
1775 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
1776 priv->padding;
1777
1da177e4 1778 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
815b97c6 1779 struct sk_buff *newskb;
3b6330ce 1780 rmb();
815b97c6
AF
1781
1782 /* Add another skb for the future */
1783 newskb = gfar_new_skb(dev);
1784
1da177e4
LT
1785 skb = priv->rx_skbuff[priv->skb_currx];
1786
81183059
AF
1787 dma_unmap_single(&priv->dev->dev, bdp->bufPtr,
1788 priv->rx_buffer_size, DMA_FROM_DEVICE);
1789
815b97c6
AF
1790 /* We drop the frame if we failed to allocate a new buffer */
1791 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
1792 bdp->status & RXBD_ERR)) {
1793 count_errors(bdp->status, dev);
1794
1795 if (unlikely(!newskb))
1796 newskb = skb;
8882d9a6 1797 else if (skb)
815b97c6 1798 dev_kfree_skb_any(skb);
815b97c6 1799 } else {
1da177e4 1800 /* Increment the number of packets */
09f75cd7 1801 dev->stats.rx_packets++;
1da177e4
LT
1802 howmany++;
1803
2c2db48a
DH
1804 if (likely(skb)) {
1805 pkt_len = bdp->length - ETH_FCS_LEN;
1806 /* Remove the FCS from the packet length */
1807 skb_put(skb, pkt_len);
1808 dev->stats.rx_bytes += pkt_len;
1da177e4 1809
2c2db48a
DH
1810 gfar_process_frame(dev, skb, amount_pull);
1811
1812 } else {
1813 if (netif_msg_rx_err(priv))
1814 printk(KERN_WARNING
1815 "%s: Missing skb!\n", dev->name);
1816 dev->stats.rx_dropped++;
1817 priv->extra_stats.rx_skbmissing++;
1818 }
1da177e4 1819
1da177e4
LT
1820 }
1821
815b97c6 1822 priv->rx_skbuff[priv->skb_currx] = newskb;
1da177e4 1823
815b97c6
AF
1824 /* Setup the new bdp */
1825 gfar_new_rxbdp(dev, bdp, newskb);
1da177e4
LT
1826
1827 /* Update to the next pointer */
31de198b 1828 bdp = next_bd(bdp, base, priv->rx_ring_size);
1da177e4
LT
1829
1830 /* update to point at the next skb */
1831 priv->skb_currx =
815b97c6
AF
1832 (priv->skb_currx + 1) &
1833 RX_RING_MOD_MASK(priv->rx_ring_size);
1da177e4
LT
1834 }
1835
1836 /* Update the current rxbd pointer to be the next one */
1837 priv->cur_rx = bdp;
1838
1da177e4
LT
1839 return howmany;
1840}
1841
bea3348e 1842static int gfar_poll(struct napi_struct *napi, int budget)
1da177e4 1843{
bea3348e
SH
1844 struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
1845 struct net_device *dev = priv->dev;
42199884
AF
1846 int tx_cleaned = 0;
1847 int rx_cleaned = 0;
d080cd63
DH
1848 unsigned long flags;
1849
8c7396ae
DH
1850 /* Clear IEVENT, so interrupts aren't called again
1851 * because of the packets that have already arrived */
1852 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
1853
d080cd63
DH
1854 /* If we fail to get the lock, don't bother with the TX BDs */
1855 if (spin_trylock_irqsave(&priv->txlock, flags)) {
42199884 1856 tx_cleaned = gfar_clean_tx_ring(dev);
d080cd63
DH
1857 spin_unlock_irqrestore(&priv->txlock, flags);
1858 }
1da177e4 1859
42199884 1860 rx_cleaned = gfar_clean_rx_ring(dev, budget);
1da177e4 1861
42199884
AF
1862 if (tx_cleaned)
1863 return budget;
1864
1865 if (rx_cleaned < budget) {
908a7a16 1866 netif_rx_complete(napi);
1da177e4
LT
1867
1868 /* Clear the halt bit in RSTAT */
1869 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1870
1871 gfar_write(&priv->regs->imask, IMASK_DEFAULT);
1872
1873 /* If we are coalescing interrupts, update the timer */
1874 /* Otherwise, clear it */
2f448911
AF
1875 if (likely(priv->rxcoalescing)) {
1876 gfar_write(&priv->regs->rxic, 0);
b46a8454 1877 gfar_write(&priv->regs->rxic, priv->rxic);
2f448911 1878 }
8c7396ae
DH
1879 if (likely(priv->txcoalescing)) {
1880 gfar_write(&priv->regs->txic, 0);
1881 gfar_write(&priv->regs->txic, priv->txic);
1882 }
1da177e4
LT
1883 }
1884
42199884 1885 return rx_cleaned;
1da177e4 1886}
1da177e4 1887
f2d71c2d
VW
1888#ifdef CONFIG_NET_POLL_CONTROLLER
1889/*
1890 * Polling 'interrupt' - used by things like netconsole to send skbs
1891 * without having to re-enable interrupts. It's not called while
1892 * the interrupt routine is executing.
1893 */
1894static void gfar_netpoll(struct net_device *dev)
1895{
1896 struct gfar_private *priv = netdev_priv(dev);
1897
1898 /* If the device has multiple interrupts, run tx/rx */
b31a1d8b 1899 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
f2d71c2d
VW
1900 disable_irq(priv->interruptTransmit);
1901 disable_irq(priv->interruptReceive);
1902 disable_irq(priv->interruptError);
1903 gfar_interrupt(priv->interruptTransmit, dev);
1904 enable_irq(priv->interruptError);
1905 enable_irq(priv->interruptReceive);
1906 enable_irq(priv->interruptTransmit);
1907 } else {
1908 disable_irq(priv->interruptTransmit);
1909 gfar_interrupt(priv->interruptTransmit, dev);
1910 enable_irq(priv->interruptTransmit);
1911 }
1912}
1913#endif
1914
1da177e4 1915/* The interrupt handler for devices with one interrupt */
7d12e780 1916static irqreturn_t gfar_interrupt(int irq, void *dev_id)
1da177e4
LT
1917{
1918 struct net_device *dev = dev_id;
1919 struct gfar_private *priv = netdev_priv(dev);
1920
1921 /* Save ievent for future reference */
1922 u32 events = gfar_read(&priv->regs->ievent);
1923
1da177e4 1924 /* Check for reception */
538cc7ee 1925 if (events & IEVENT_RX_MASK)
7d12e780 1926 gfar_receive(irq, dev_id);
1da177e4
LT
1927
1928 /* Check for transmit completion */
538cc7ee 1929 if (events & IEVENT_TX_MASK)
7d12e780 1930 gfar_transmit(irq, dev_id);
1da177e4 1931
538cc7ee
SS
1932 /* Check for errors */
1933 if (events & IEVENT_ERR_MASK)
1934 gfar_error(irq, dev_id);
1da177e4
LT
1935
1936 return IRQ_HANDLED;
1937}
1938
1da177e4
LT
1939/* Called every time the controller might need to be made
1940 * aware of new link state. The PHY code conveys this
bb40dcbb 1941 * information through variables in the phydev structure, and this
1da177e4
LT
1942 * function converts those variables into the appropriate
1943 * register values, and can bring down the device if needed.
1944 */
1945static void adjust_link(struct net_device *dev)
1946{
1947 struct gfar_private *priv = netdev_priv(dev);
cc8c6e37 1948 struct gfar __iomem *regs = priv->regs;
bb40dcbb
AF
1949 unsigned long flags;
1950 struct phy_device *phydev = priv->phydev;
1951 int new_state = 0;
1952
fef6108d 1953 spin_lock_irqsave(&priv->txlock, flags);
bb40dcbb
AF
1954 if (phydev->link) {
1955 u32 tempval = gfar_read(&regs->maccfg2);
7f7f5316 1956 u32 ecntrl = gfar_read(&regs->ecntrl);
1da177e4 1957
1da177e4
LT
1958 /* Now we make sure that we can be in full duplex mode.
1959 * If not, we operate in half-duplex mode. */
bb40dcbb
AF
1960 if (phydev->duplex != priv->oldduplex) {
1961 new_state = 1;
1962 if (!(phydev->duplex))
1da177e4 1963 tempval &= ~(MACCFG2_FULL_DUPLEX);
bb40dcbb 1964 else
1da177e4 1965 tempval |= MACCFG2_FULL_DUPLEX;
1da177e4 1966
bb40dcbb 1967 priv->oldduplex = phydev->duplex;
1da177e4
LT
1968 }
1969
bb40dcbb
AF
1970 if (phydev->speed != priv->oldspeed) {
1971 new_state = 1;
1972 switch (phydev->speed) {
1da177e4 1973 case 1000:
1da177e4
LT
1974 tempval =
1975 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
1da177e4
LT
1976 break;
1977 case 100:
1978 case 10:
1da177e4
LT
1979 tempval =
1980 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
7f7f5316
AF
1981
1982 /* Reduced mode distinguishes
1983 * between 10 and 100 */
1984 if (phydev->speed == SPEED_100)
1985 ecntrl |= ECNTRL_R100;
1986 else
1987 ecntrl &= ~(ECNTRL_R100);
1da177e4
LT
1988 break;
1989 default:
0bbaf069
KG
1990 if (netif_msg_link(priv))
1991 printk(KERN_WARNING
bb40dcbb
AF
1992 "%s: Ack! Speed (%d) is not 10/100/1000!\n",
1993 dev->name, phydev->speed);
1da177e4
LT
1994 break;
1995 }
1996
bb40dcbb 1997 priv->oldspeed = phydev->speed;
1da177e4
LT
1998 }
1999
bb40dcbb 2000 gfar_write(&regs->maccfg2, tempval);
7f7f5316 2001 gfar_write(&regs->ecntrl, ecntrl);
bb40dcbb 2002
1da177e4 2003 if (!priv->oldlink) {
bb40dcbb 2004 new_state = 1;
1da177e4 2005 priv->oldlink = 1;
1da177e4 2006 }
bb40dcbb
AF
2007 } else if (priv->oldlink) {
2008 new_state = 1;
2009 priv->oldlink = 0;
2010 priv->oldspeed = 0;
2011 priv->oldduplex = -1;
1da177e4 2012 }
1da177e4 2013
bb40dcbb
AF
2014 if (new_state && netif_msg_link(priv))
2015 phy_print_status(phydev);
2016
fef6108d 2017 spin_unlock_irqrestore(&priv->txlock, flags);
bb40dcbb 2018}
1da177e4
LT
2019
2020/* Update the hash table based on the current list of multicast
2021 * addresses we subscribe to. Also, change the promiscuity of
2022 * the device based on the flags (this function is called
2023 * whenever dev->flags is changed */
2024static void gfar_set_multi(struct net_device *dev)
2025{
2026 struct dev_mc_list *mc_ptr;
2027 struct gfar_private *priv = netdev_priv(dev);
cc8c6e37 2028 struct gfar __iomem *regs = priv->regs;
1da177e4
LT
2029 u32 tempval;
2030
2031 if(dev->flags & IFF_PROMISC) {
1da177e4
LT
2032 /* Set RCTRL to PROM */
2033 tempval = gfar_read(&regs->rctrl);
2034 tempval |= RCTRL_PROM;
2035 gfar_write(&regs->rctrl, tempval);
2036 } else {
2037 /* Set RCTRL to not PROM */
2038 tempval = gfar_read(&regs->rctrl);
2039 tempval &= ~(RCTRL_PROM);
2040 gfar_write(&regs->rctrl, tempval);
2041 }
6aa20a22 2042
1da177e4
LT
2043 if(dev->flags & IFF_ALLMULTI) {
2044 /* Set the hash to rx all multicast frames */
0bbaf069
KG
2045 gfar_write(&regs->igaddr0, 0xffffffff);
2046 gfar_write(&regs->igaddr1, 0xffffffff);
2047 gfar_write(&regs->igaddr2, 0xffffffff);
2048 gfar_write(&regs->igaddr3, 0xffffffff);
2049 gfar_write(&regs->igaddr4, 0xffffffff);
2050 gfar_write(&regs->igaddr5, 0xffffffff);
2051 gfar_write(&regs->igaddr6, 0xffffffff);
2052 gfar_write(&regs->igaddr7, 0xffffffff);
1da177e4
LT
2053 gfar_write(&regs->gaddr0, 0xffffffff);
2054 gfar_write(&regs->gaddr1, 0xffffffff);
2055 gfar_write(&regs->gaddr2, 0xffffffff);
2056 gfar_write(&regs->gaddr3, 0xffffffff);
2057 gfar_write(&regs->gaddr4, 0xffffffff);
2058 gfar_write(&regs->gaddr5, 0xffffffff);
2059 gfar_write(&regs->gaddr6, 0xffffffff);
2060 gfar_write(&regs->gaddr7, 0xffffffff);
2061 } else {
7f7f5316
AF
2062 int em_num;
2063 int idx;
2064
1da177e4 2065 /* zero out the hash */
0bbaf069
KG
2066 gfar_write(&regs->igaddr0, 0x0);
2067 gfar_write(&regs->igaddr1, 0x0);
2068 gfar_write(&regs->igaddr2, 0x0);
2069 gfar_write(&regs->igaddr3, 0x0);
2070 gfar_write(&regs->igaddr4, 0x0);
2071 gfar_write(&regs->igaddr5, 0x0);
2072 gfar_write(&regs->igaddr6, 0x0);
2073 gfar_write(&regs->igaddr7, 0x0);
1da177e4
LT
2074 gfar_write(&regs->gaddr0, 0x0);
2075 gfar_write(&regs->gaddr1, 0x0);
2076 gfar_write(&regs->gaddr2, 0x0);
2077 gfar_write(&regs->gaddr3, 0x0);
2078 gfar_write(&regs->gaddr4, 0x0);
2079 gfar_write(&regs->gaddr5, 0x0);
2080 gfar_write(&regs->gaddr6, 0x0);
2081 gfar_write(&regs->gaddr7, 0x0);
2082
7f7f5316
AF
2083 /* If we have extended hash tables, we need to
2084 * clear the exact match registers to prepare for
2085 * setting them */
2086 if (priv->extended_hash) {
2087 em_num = GFAR_EM_NUM + 1;
2088 gfar_clear_exact_match(dev);
2089 idx = 1;
2090 } else {
2091 idx = 0;
2092 em_num = 0;
2093 }
2094
1da177e4
LT
2095 if(dev->mc_count == 0)
2096 return;
2097
2098 /* Parse the list, and set the appropriate bits */
2099 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
7f7f5316
AF
2100 if (idx < em_num) {
2101 gfar_set_mac_for_addr(dev, idx,
2102 mc_ptr->dmi_addr);
2103 idx++;
2104 } else
2105 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
1da177e4
LT
2106 }
2107 }
2108
2109 return;
2110}
2111
7f7f5316
AF
2112
2113/* Clears each of the exact match registers to zero, so they
2114 * don't interfere with normal reception */
2115static void gfar_clear_exact_match(struct net_device *dev)
2116{
2117 int idx;
2118 u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};
2119
2120 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
2121 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
2122}
2123
1da177e4
LT
2124/* Set the appropriate hash bit for the given addr */
2125/* The algorithm works like so:
2126 * 1) Take the Destination Address (ie the multicast address), and
2127 * do a CRC on it (little endian), and reverse the bits of the
2128 * result.
2129 * 2) Use the 8 most significant bits as a hash into a 256-entry
2130 * table. The table is controlled through 8 32-bit registers:
2131 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
2132 * gaddr7. This means that the 3 most significant bits in the
2133 * hash index which gaddr register to use, and the 5 other bits
2134 * indicate which bit (assuming an IBM numbering scheme, which
2135 * for PowerPC (tm) is usually the case) in the register holds
2136 * the entry. */
2137static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
2138{
2139 u32 tempval;
2140 struct gfar_private *priv = netdev_priv(dev);
1da177e4 2141 u32 result = ether_crc(MAC_ADDR_LEN, addr);
0bbaf069
KG
2142 int width = priv->hash_width;
2143 u8 whichbit = (result >> (32 - width)) & 0x1f;
2144 u8 whichreg = result >> (32 - width + 5);
1da177e4
LT
2145 u32 value = (1 << (31-whichbit));
2146
0bbaf069 2147 tempval = gfar_read(priv->hash_regs[whichreg]);
1da177e4 2148 tempval |= value;
0bbaf069 2149 gfar_write(priv->hash_regs[whichreg], tempval);
1da177e4
LT
2150
2151 return;
2152}
2153
7f7f5316
AF
2154
2155/* There are multiple MAC Address register pairs on some controllers
2156 * This function sets the numth pair to a given address
2157 */
2158static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
2159{
2160 struct gfar_private *priv = netdev_priv(dev);
2161 int idx;
2162 char tmpbuf[MAC_ADDR_LEN];
2163 u32 tempval;
cc8c6e37 2164 u32 __iomem *macptr = &priv->regs->macstnaddr1;
7f7f5316
AF
2165
2166 macptr += num*2;
2167
2168 /* Now copy it into the mac registers backwards, cuz */
2169 /* little endian is silly */
2170 for (idx = 0; idx < MAC_ADDR_LEN; idx++)
2171 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
2172
2173 gfar_write(macptr, *((u32 *) (tmpbuf)));
2174
2175 tempval = *((u32 *) (tmpbuf + 4));
2176
2177 gfar_write(macptr+1, tempval);
2178}
2179
1da177e4 2180/* GFAR error interrupt handler */
7d12e780 2181static irqreturn_t gfar_error(int irq, void *dev_id)
1da177e4
LT
2182{
2183 struct net_device *dev = dev_id;
2184 struct gfar_private *priv = netdev_priv(dev);
2185
2186 /* Save ievent for future reference */
2187 u32 events = gfar_read(&priv->regs->ievent);
2188
2189 /* Clear IEVENT */
d87eb127
SW
2190 gfar_write(&priv->regs->ievent, events & IEVENT_ERR_MASK);
2191
2192 /* Magic Packet is not an error. */
b31a1d8b 2193 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
d87eb127
SW
2194 (events & IEVENT_MAG))
2195 events &= ~IEVENT_MAG;
1da177e4
LT
2196
2197 /* Hmm... */
0bbaf069
KG
2198 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
2199 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
538cc7ee 2200 dev->name, events, gfar_read(&priv->regs->imask));
1da177e4
LT
2201
2202 /* Update the error counters */
2203 if (events & IEVENT_TXE) {
09f75cd7 2204 dev->stats.tx_errors++;
1da177e4
LT
2205
2206 if (events & IEVENT_LC)
09f75cd7 2207 dev->stats.tx_window_errors++;
1da177e4 2208 if (events & IEVENT_CRL)
09f75cd7 2209 dev->stats.tx_aborted_errors++;
1da177e4 2210 if (events & IEVENT_XFUN) {
0bbaf069 2211 if (netif_msg_tx_err(priv))
538cc7ee
SS
2212 printk(KERN_DEBUG "%s: TX FIFO underrun, "
2213 "packet dropped.\n", dev->name);
09f75cd7 2214 dev->stats.tx_dropped++;
1da177e4
LT
2215 priv->extra_stats.tx_underrun++;
2216
2217 /* Reactivate the Tx Queues */
2218 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
2219 }
0bbaf069
KG
2220 if (netif_msg_tx_err(priv))
2221 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
1da177e4
LT
2222 }
2223 if (events & IEVENT_BSY) {
09f75cd7 2224 dev->stats.rx_errors++;
1da177e4
LT
2225 priv->extra_stats.rx_bsy++;
2226
7d12e780 2227 gfar_receive(irq, dev_id);
1da177e4 2228
0bbaf069 2229 if (netif_msg_rx_err(priv))
538cc7ee
SS
2230 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
2231 dev->name, gfar_read(&priv->regs->rstat));
1da177e4
LT
2232 }
2233 if (events & IEVENT_BABR) {
09f75cd7 2234 dev->stats.rx_errors++;
1da177e4
LT
2235 priv->extra_stats.rx_babr++;
2236
0bbaf069 2237 if (netif_msg_rx_err(priv))
538cc7ee 2238 printk(KERN_DEBUG "%s: babbling RX error\n", dev->name);
1da177e4
LT
2239 }
2240 if (events & IEVENT_EBERR) {
2241 priv->extra_stats.eberr++;
0bbaf069 2242 if (netif_msg_rx_err(priv))
538cc7ee 2243 printk(KERN_DEBUG "%s: bus error\n", dev->name);
1da177e4 2244 }
0bbaf069 2245 if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
538cc7ee 2246 printk(KERN_DEBUG "%s: control frame\n", dev->name);
1da177e4
LT
2247
2248 if (events & IEVENT_BABT) {
2249 priv->extra_stats.tx_babt++;
0bbaf069 2250 if (netif_msg_tx_err(priv))
538cc7ee 2251 printk(KERN_DEBUG "%s: babbling TX error\n", dev->name);
1da177e4
LT
2252 }
2253 return IRQ_HANDLED;
2254}
2255
72abb461
KS
2256/* work with hotplug and coldplug */
2257MODULE_ALIAS("platform:fsl-gianfar");
2258
b31a1d8b
AF
2259static struct of_device_id gfar_match[] =
2260{
2261 {
2262 .type = "network",
2263 .compatible = "gianfar",
2264 },
2265 {},
2266};
2267
1da177e4 2268/* Structure for a device driver */
b31a1d8b
AF
2269static struct of_platform_driver gfar_driver = {
2270 .name = "fsl-gianfar",
2271 .match_table = gfar_match,
2272
1da177e4
LT
2273 .probe = gfar_probe,
2274 .remove = gfar_remove,
d87eb127
SW
2275 .suspend = gfar_suspend,
2276 .resume = gfar_resume,
1da177e4
LT
2277};
2278
2279static int __init gfar_init(void)
2280{
bb40dcbb
AF
2281 int err = gfar_mdio_init();
2282
2283 if (err)
2284 return err;
2285
b31a1d8b 2286 err = of_register_platform_driver(&gfar_driver);
bb40dcbb
AF
2287
2288 if (err)
2289 gfar_mdio_exit();
6aa20a22 2290
bb40dcbb 2291 return err;
1da177e4
LT
2292}
2293
2294static void __exit gfar_exit(void)
2295{
b31a1d8b 2296 of_unregister_platform_driver(&gfar_driver);
bb40dcbb 2297 gfar_mdio_exit();
1da177e4
LT
2298}
2299
2300module_init(gfar_init);
2301module_exit(gfar_exit);
2302