]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/spider_net.c
qla3xxx: cleanup checksum offload code
[net-next-2.6.git] / drivers / net / spider_net.c
CommitLineData
aaec0fab 1/*
3342cf0e 2 * Network device driver for Cell Processor-Based Blade and Celleb platform
aaec0fab
JO
3 *
4 * (C) Copyright IBM Corp. 2005
3342cf0e 5 * (C) Copyright 2006 TOSHIBA CORPORATION
aaec0fab
JO
6 *
7 * Authors : Utz Bacher <utz.bacher@de.ibm.com>
8 * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
aaec0fab
JO
25#include <linux/compiler.h>
26#include <linux/crc32.h>
27#include <linux/delay.h>
28#include <linux/etherdevice.h>
29#include <linux/ethtool.h>
30#include <linux/firmware.h>
31#include <linux/if_vlan.h>
7c5c220e 32#include <linux/in.h>
aaec0fab
JO
33#include <linux/init.h>
34#include <linux/ioport.h>
35#include <linux/ip.h>
36#include <linux/kernel.h>
37#include <linux/mii.h>
38#include <linux/module.h>
39#include <linux/netdevice.h>
40#include <linux/device.h>
41#include <linux/pci.h>
42#include <linux/skbuff.h>
43#include <linux/slab.h>
44#include <linux/tcp.h>
45#include <linux/types.h>
11f1a52b 46#include <linux/vmalloc.h>
aaec0fab
JO
47#include <linux/wait.h>
48#include <linux/workqueue.h>
49#include <asm/bitops.h>
50#include <asm/pci-bridge.h>
51#include <net/checksum.h>
52
53#include "spider_net.h"
54
55MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \
56 "<Jens.Osterkamp@de.ibm.com>");
57MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver");
58MODULE_LICENSE("GPL");
90f10841 59MODULE_VERSION(VERSION);
aaec0fab
JO
60
61static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT;
62static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT;
63
e2874f2e
LV
64module_param(rx_descriptors, int, 0444);
65module_param(tx_descriptors, int, 0444);
aaec0fab
JO
66
67MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \
68 "in rx chains");
69MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \
70 "in tx chain");
71
72char spider_net_driver_name[] = "spidernet";
73
74static struct pci_device_id spider_net_pci_tbl[] = {
75 { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET,
76 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
77 { 0, }
78};
79
80MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl);
81
82/**
83 * spider_net_read_reg - reads an SMMIO register of a card
84 * @card: device structure
85 * @reg: register to read from
86 *
87 * returns the content of the specified SMMIO register.
88 */
bdd01503 89static inline u32
aaec0fab
JO
90spider_net_read_reg(struct spider_net_card *card, u32 reg)
91{
3bc0f40c
BH
92 /* We use the powerpc specific variants instead of readl_be() because
93 * we know spidernet is not a real PCI device and we can thus avoid the
94 * performance hit caused by the PCI workarounds.
95 */
96 return in_be32(card->regs + reg);
aaec0fab
JO
97}
98
99/**
100 * spider_net_write_reg - writes to an SMMIO register of a card
101 * @card: device structure
102 * @reg: register to write to
103 * @value: value to write into the specified SMMIO register
104 */
bdd01503 105static inline void
aaec0fab
JO
106spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
107{
3bc0f40c
BH
108 /* We use the powerpc specific variants instead of writel_be() because
109 * we know spidernet is not a real PCI device and we can thus avoid the
110 * performance hit caused by the PCI workarounds.
111 */
112 out_be32(card->regs + reg, value);
aaec0fab
JO
113}
114
aaec0fab
JO
115/** spider_net_write_phy - write to phy register
116 * @netdev: adapter to be written to
117 * @mii_id: id of MII
118 * @reg: PHY register
119 * @val: value to be written to phy register
120 *
121 * spider_net_write_phy_register writes to an arbitrary PHY
122 * register via the spider GPCWOPCMD register. We assume the queue does
123 * not run full (not more than 15 commands outstanding).
124 **/
125static void
126spider_net_write_phy(struct net_device *netdev, int mii_id,
127 int reg, int val)
128{
129 struct spider_net_card *card = netdev_priv(netdev);
130 u32 writevalue;
131
132 writevalue = ((u32)mii_id << 21) |
133 ((u32)reg << 16) | ((u32)val);
134
135 spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue);
136}
137
138/** spider_net_read_phy - read from phy register
139 * @netdev: network device to be read from
140 * @mii_id: id of MII
141 * @reg: PHY register
142 *
143 * Returns value read from PHY register
144 *
145 * spider_net_write_phy reads from an arbitrary PHY
146 * register via the spider GPCROPCMD register
147 **/
148static int
149spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
150{
151 struct spider_net_card *card = netdev_priv(netdev);
152 u32 readvalue;
153
154 readvalue = ((u32)mii_id << 21) | ((u32)reg << 16);
155 spider_net_write_reg(card, SPIDER_NET_GPCROPCMD, readvalue);
156
157 /* we don't use semaphores to wait for an SPIDER_NET_GPROPCMPINT
158 * interrupt, as we poll for the completion of the read operation
159 * in spider_net_read_phy. Should take about 50 us */
160 do {
161 readvalue = spider_net_read_reg(card, SPIDER_NET_GPCROPCMD);
162 } while (readvalue & SPIDER_NET_GPREXEC);
163
164 readvalue &= SPIDER_NET_GPRDAT_MASK;
165
166 return readvalue;
167}
168
abdb66b5
KI
169/**
170 * spider_net_setup_aneg - initial auto-negotiation setup
171 * @card: device structure
172 **/
173static void
174spider_net_setup_aneg(struct spider_net_card *card)
175{
176 struct mii_phy *phy = &card->phy;
177 u32 advertise = 0;
a1c38a4a 178 u16 bmsr, estat;
abdb66b5 179
a1c38a4a
IK
180 bmsr = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
181 estat = spider_net_read_phy(card->netdev, phy->mii_id, MII_ESTATUS);
abdb66b5
KI
182
183 if (bmsr & BMSR_10HALF)
184 advertise |= ADVERTISED_10baseT_Half;
185 if (bmsr & BMSR_10FULL)
186 advertise |= ADVERTISED_10baseT_Full;
187 if (bmsr & BMSR_100HALF)
188 advertise |= ADVERTISED_100baseT_Half;
189 if (bmsr & BMSR_100FULL)
190 advertise |= ADVERTISED_100baseT_Full;
191
192 if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_TFULL))
193 advertise |= SUPPORTED_1000baseT_Full;
194 if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_THALF))
195 advertise |= SUPPORTED_1000baseT_Half;
196
197 mii_phy_probe(phy, phy->mii_id);
198 phy->def->ops->setup_aneg(phy, advertise);
199
200}
201
aaec0fab 202/**
11f1a52b 203 * spider_net_rx_irq_off - switch off rx irq on this spider card
aaec0fab
JO
204 * @card: device structure
205 *
11f1a52b 206 * switches off rx irq by masking them out in the GHIINTnMSK register
aaec0fab
JO
207 */
208static void
11f1a52b 209spider_net_rx_irq_off(struct spider_net_card *card)
aaec0fab
JO
210{
211 u32 regvalue;
aaec0fab 212
11f1a52b
AB
213 regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT);
214 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
aaec0fab
JO
215}
216
217/**
11f1a52b 218 * spider_net_rx_irq_on - switch on rx irq on this spider card
aaec0fab
JO
219 * @card: device structure
220 *
11f1a52b 221 * switches on rx irq by enabling them in the GHIINTnMSK register
aaec0fab
JO
222 */
223static void
11f1a52b 224spider_net_rx_irq_on(struct spider_net_card *card)
aaec0fab
JO
225{
226 u32 regvalue;
aaec0fab 227
11f1a52b
AB
228 regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT;
229 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
aaec0fab
JO
230}
231
232/**
233 * spider_net_set_promisc - sets the unicast address or the promiscuous mode
234 * @card: card structure
235 *
236 * spider_net_set_promisc sets the unicast destination address filter and
237 * thus either allows for non-promisc mode or promisc mode
238 */
239static void
240spider_net_set_promisc(struct spider_net_card *card)
241{
242 u32 macu, macl;
243 struct net_device *netdev = card->netdev;
244
245 if (netdev->flags & IFF_PROMISC) {
246 /* clear destination entry 0 */
247 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, 0);
248 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, 0);
249 spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
250 SPIDER_NET_PROMISC_VALUE);
251 } else {
252 macu = netdev->dev_addr[0];
253 macu <<= 8;
254 macu |= netdev->dev_addr[1];
255 memcpy(&macl, &netdev->dev_addr[2], sizeof(macl));
256
257 macu |= SPIDER_NET_UA_DESCR_VALUE;
258 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, macu);
259 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, macl);
260 spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
261 SPIDER_NET_NONPROMISC_VALUE);
262 }
263}
264
265/**
266 * spider_net_get_mac_address - read mac address from spider card
267 * @card: device structure
268 *
269 * reads MAC address from GMACUNIMACU and GMACUNIMACL registers
270 */
271static int
272spider_net_get_mac_address(struct net_device *netdev)
273{
274 struct spider_net_card *card = netdev_priv(netdev);
275 u32 macl, macu;
276
277 macl = spider_net_read_reg(card, SPIDER_NET_GMACUNIMACL);
278 macu = spider_net_read_reg(card, SPIDER_NET_GMACUNIMACU);
279
280 netdev->dev_addr[0] = (macu >> 24) & 0xff;
281 netdev->dev_addr[1] = (macu >> 16) & 0xff;
282 netdev->dev_addr[2] = (macu >> 8) & 0xff;
283 netdev->dev_addr[3] = macu & 0xff;
284 netdev->dev_addr[4] = (macl >> 8) & 0xff;
285 netdev->dev_addr[5] = macl & 0xff;
286
287 if (!is_valid_ether_addr(&netdev->dev_addr[0]))
288 return -EINVAL;
289
290 return 0;
291}
292
293/**
294 * spider_net_get_descr_status -- returns the status of a descriptor
295 * @descr: descriptor to look at
296 *
297 * returns the status as in the dmac_cmd_status field of the descriptor
298 */
bdd01503 299static inline int
4cb6f9e5 300spider_net_get_descr_status(struct spider_net_hw_descr *hwdescr)
aaec0fab 301{
4cb6f9e5 302 return hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;
aaec0fab
JO
303}
304
305/**
306 * spider_net_free_chain - free descriptor chain
307 * @card: card structure
308 * @chain: address of chain
309 *
310 */
311static void
312spider_net_free_chain(struct spider_net_card *card,
313 struct spider_net_descr_chain *chain)
314{
315 struct spider_net_descr *descr;
316
d4ed8f8d
LV
317 descr = chain->ring;
318 do {
aaec0fab 319 descr->bus_addr = 0;
4cb6f9e5 320 descr->hwdescr->next_descr_addr = 0;
d4ed8f8d
LV
321 descr = descr->next;
322 } while (descr != chain->ring);
323
324 dma_free_coherent(&card->pdev->dev, chain->num_desc,
4cb6f9e5 325 chain->hwring, chain->dma_addr);
aaec0fab
JO
326}
327
328/**
d4ed8f8d 329 * spider_net_init_chain - alloc and link descriptor chain
aaec0fab
JO
330 * @card: card structure
331 * @chain: address of chain
aaec0fab 332 *
d4ed8f8d 333 * We manage a circular list that mirrors the hardware structure,
aaec0fab
JO
334 * except that the hardware uses bus addresses.
335 *
d4ed8f8d 336 * Returns 0 on success, <0 on failure
aaec0fab
JO
337 */
338static int
339spider_net_init_chain(struct spider_net_card *card,
d4ed8f8d 340 struct spider_net_descr_chain *chain)
aaec0fab
JO
341{
342 int i;
343 struct spider_net_descr *descr;
4cb6f9e5 344 struct spider_net_hw_descr *hwdescr;
11f1a52b 345 dma_addr_t buf;
d4ed8f8d 346 size_t alloc_size;
aaec0fab 347
4cb6f9e5 348 alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr);
aaec0fab 349
4cb6f9e5 350 chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
d4ed8f8d 351 &chain->dma_addr, GFP_KERNEL);
aaec0fab 352
4cb6f9e5 353 if (!chain->hwring)
d4ed8f8d 354 return -ENOMEM;
aaec0fab 355
4cb6f9e5 356 memset(chain->ring, 0, chain->num_desc * sizeof(struct spider_net_descr));
d4ed8f8d
LV
357
358 /* Set up the hardware pointers in each descriptor */
4cb6f9e5
LV
359 descr = chain->ring;
360 hwdescr = chain->hwring;
d4ed8f8d 361 buf = chain->dma_addr;
4cb6f9e5
LV
362 for (i=0; i < chain->num_desc; i++, descr++, hwdescr++) {
363 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
364 hwdescr->next_descr_addr = 0;
aaec0fab 365
4cb6f9e5 366 descr->hwdescr = hwdescr;
11f1a52b 367 descr->bus_addr = buf;
aaec0fab
JO
368 descr->next = descr + 1;
369 descr->prev = descr - 1;
370
4cb6f9e5 371 buf += sizeof(struct spider_net_hw_descr);
aaec0fab
JO
372 }
373 /* do actual circular list */
d4ed8f8d
LV
374 (descr-1)->next = chain->ring;
375 chain->ring->prev = descr-1;
aaec0fab 376
bdd01503 377 spin_lock_init(&chain->lock);
d4ed8f8d
LV
378 chain->head = chain->ring;
379 chain->tail = chain->ring;
aaec0fab 380 return 0;
aaec0fab
JO
381}
382
383/**
384 * spider_net_free_rx_chain_contents - frees descr contents in rx chain
385 * @card: card structure
386 *
387 * returns 0 on success, <0 on failure
388 */
389static void
390spider_net_free_rx_chain_contents(struct spider_net_card *card)
391{
392 struct spider_net_descr *descr;
393
394 descr = card->rx_chain.head;
64751910 395 do {
aaec0fab 396 if (descr->skb) {
4cb6f9e5 397 pci_unmap_single(card->pdev, descr->hwdescr->buf_addr,
11f1a52b 398 SPIDER_NET_MAX_FRAME,
348bc2a6 399 PCI_DMA_BIDIRECTIONAL);
d9c199ee
LV
400 dev_kfree_skb(descr->skb);
401 descr->skb = NULL;
aaec0fab
JO
402 }
403 descr = descr->next;
64751910 404 } while (descr != card->rx_chain.head);
aaec0fab
JO
405}
406
407/**
a4182c50 408 * spider_net_prepare_rx_descr - Reinitialize RX descriptor
aaec0fab
JO
409 * @card: card structure
410 * @descr: descriptor to re-init
411 *
a4182c50 412 * Return 0 on succes, <0 on failure.
aaec0fab 413 *
a4182c50
LV
414 * Allocates a new rx skb, iommu-maps it and attaches it to the
415 * descriptor. Mark the descriptor as activated, ready-to-use.
aaec0fab
JO
416 */
417static int
418spider_net_prepare_rx_descr(struct spider_net_card *card,
419 struct spider_net_descr *descr)
420{
4cb6f9e5 421 struct spider_net_hw_descr *hwdescr = descr->hwdescr;
8e0a613b 422 dma_addr_t buf;
aaec0fab
JO
423 int offset;
424 int bufsize;
425
426 /* we need to round up the buffer size to a multiple of 128 */
11f1a52b 427 bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) &
aaec0fab
JO
428 (~(SPIDER_NET_RXBUF_ALIGN - 1));
429
430 /* and we need to have it 128 byte aligned, therefore we allocate a
431 * bit more */
432 /* allocate an skb */
98739407
CH
433 descr->skb = netdev_alloc_skb(card->netdev,
434 bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
aaec0fab 435 if (!descr->skb) {
11f1a52b
AB
436 if (netif_msg_rx_err(card) && net_ratelimit())
437 pr_err("Not enough memory to allocate rx buffer\n");
9b6b0b81 438 card->spider_stats.alloc_rx_skb_error++;
aaec0fab
JO
439 return -ENOMEM;
440 }
4cb6f9e5
LV
441 hwdescr->buf_size = bufsize;
442 hwdescr->result_size = 0;
443 hwdescr->valid_size = 0;
444 hwdescr->data_status = 0;
445 hwdescr->data_error = 0;
aaec0fab
JO
446
447 offset = ((unsigned long)descr->skb->data) &
448 (SPIDER_NET_RXBUF_ALIGN - 1);
449 if (offset)
450 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
a4182c50 451 /* iommu-map the skb */
8e0a613b 452 buf = pci_map_single(card->pdev, descr->skb->data,
bdd01503 453 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
d4b0a4c1 454 if (pci_dma_mapping_error(buf)) {
aaec0fab 455 dev_kfree_skb_any(descr->skb);
d9c199ee 456 descr->skb = NULL;
11f1a52b 457 if (netif_msg_rx_err(card) && net_ratelimit())
aaec0fab 458 pr_err("Could not iommu-map rx buffer\n");
9b6b0b81 459 card->spider_stats.rx_iommu_map_error++;
4cb6f9e5 460 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
aaec0fab 461 } else {
4cb6f9e5 462 hwdescr->buf_addr = buf;
90476a20 463 wmb();
4cb6f9e5 464 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
bdd01503 465 SPIDER_NET_DMAC_NOINTR_COMPLETE;
aaec0fab
JO
466 }
467
a4182c50 468 return 0;
aaec0fab
JO
469}
470
471/**
11f1a52b 472 * spider_net_enable_rxchtails - sets RX dmac chain tail addresses
aaec0fab
JO
473 * @card: card structure
474 *
11f1a52b 475 * spider_net_enable_rxchtails sets the RX DMAC chain tail adresses in the
aaec0fab
JO
476 * chip by writing to the appropriate register. DMA is enabled in
477 * spider_net_enable_rxdmac.
478 */
bdd01503 479static inline void
aaec0fab
JO
480spider_net_enable_rxchtails(struct spider_net_card *card)
481{
482 /* assume chain is aligned correctly */
483 spider_net_write_reg(card, SPIDER_NET_GDADCHA ,
484 card->rx_chain.tail->bus_addr);
485}
486
487/**
488 * spider_net_enable_rxdmac - enables a receive DMA controller
489 * @card: card structure
490 *
491 * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
492 * in the GDADMACCNTR register
493 */
bdd01503 494static inline void
aaec0fab
JO
495spider_net_enable_rxdmac(struct spider_net_card *card)
496{
11f1a52b 497 wmb();
aaec0fab
JO
498 spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
499 SPIDER_NET_DMA_RX_VALUE);
500}
501
502/**
503 * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
504 * @card: card structure
505 *
11f1a52b 506 * refills descriptors in the rx chain: allocates skbs and iommu-maps them.
aaec0fab
JO
507 */
508static void
509spider_net_refill_rx_chain(struct spider_net_card *card)
510{
bdd01503
JO
511 struct spider_net_descr_chain *chain = &card->rx_chain;
512 unsigned long flags;
aaec0fab 513
11f1a52b
AB
514 /* one context doing the refill (and a second context seeing that
515 * and omitting it) is ok. If called by NAPI, we'll be called again
516 * as spider_net_decode_one_descr is called several times. If some
517 * interrupt calls us, the NAPI is about to clean up anyway. */
bdd01503
JO
518 if (!spin_trylock_irqsave(&chain->lock, flags))
519 return;
520
4cb6f9e5 521 while (spider_net_get_descr_status(chain->head->hwdescr) ==
bdd01503
JO
522 SPIDER_NET_DESCR_NOT_IN_USE) {
523 if (spider_net_prepare_rx_descr(card, chain->head))
524 break;
525 chain->head = chain->head->next;
526 }
aaec0fab 527
bdd01503 528 spin_unlock_irqrestore(&chain->lock, flags);
aaec0fab
JO
529}
530
531/**
2c307db7 532 * spider_net_alloc_rx_skbs - Allocates rx skbs in rx descriptor chains
aaec0fab
JO
533 * @card: card structure
534 *
2c307db7 535 * Returns 0 on success, <0 on failure.
aaec0fab
JO
536 */
537static int
538spider_net_alloc_rx_skbs(struct spider_net_card *card)
539{
2bf27a0d
LV
540 struct spider_net_descr_chain *chain = &card->rx_chain;
541 struct spider_net_descr *start = chain->tail;
542 struct spider_net_descr *descr = start;
aaec0fab 543
2bf27a0d
LV
544 /* Link up the hardware chain pointers */
545 do {
546 descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
547 descr = descr->next;
548 } while (descr != start);
aaec0fab 549
2c307db7
LV
550 /* Put at least one buffer into the chain. if this fails,
551 * we've got a problem. If not, spider_net_refill_rx_chain
552 * will do the rest at the end of this function. */
aaec0fab
JO
553 if (spider_net_prepare_rx_descr(card, chain->head))
554 goto error;
555 else
556 chain->head = chain->head->next;
557
2c307db7
LV
558 /* This will allocate the rest of the rx buffers;
559 * if not, it's business as usual later on. */
aaec0fab 560 spider_net_refill_rx_chain(card);
11f1a52b 561 spider_net_enable_rxdmac(card);
aaec0fab
JO
562 return 0;
563
564error:
565 spider_net_free_rx_chain_contents(card);
2bf27a0d 566 return -ENOMEM;
aaec0fab
JO
567}
568
aaec0fab
JO
569/**
570 * spider_net_get_multicast_hash - generates hash for multicast filter table
571 * @addr: multicast address
572 *
573 * returns the hash value.
574 *
575 * spider_net_get_multicast_hash calculates a hash value for a given multicast
576 * address, that is used to set the multicast filter tables
577 */
578static u8
579spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
580{
aaec0fab
JO
581 u32 crc;
582 u8 hash;
11f1a52b
AB
583 char addr_for_crc[ETH_ALEN] = { 0, };
584 int i, bit;
585
586 for (i = 0; i < ETH_ALEN * 8; i++) {
587 bit = (addr[i / 8] >> (i % 8)) & 1;
588 addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8));
589 }
aaec0fab 590
11f1a52b 591 crc = crc32_be(~0, addr_for_crc, netdev->addr_len);
aaec0fab
JO
592
593 hash = (crc >> 27);
594 hash <<= 3;
595 hash |= crc & 7;
11f1a52b 596 hash &= 0xff;
aaec0fab
JO
597
598 return hash;
599}
600
601/**
602 * spider_net_set_multi - sets multicast addresses and promisc flags
603 * @netdev: interface device structure
604 *
605 * spider_net_set_multi configures multicast addresses as needed for the
606 * netdev interface. It also sets up multicast, allmulti and promisc
607 * flags appropriately
608 */
609static void
610spider_net_set_multi(struct net_device *netdev)
611{
612 struct dev_mc_list *mc;
613 u8 hash;
614 int i;
615 u32 reg;
616 struct spider_net_card *card = netdev_priv(netdev);
617 unsigned long bitmask[SPIDER_NET_MULTICAST_HASHES / BITS_PER_LONG] =
618 {0, };
619
620 spider_net_set_promisc(card);
621
622 if (netdev->flags & IFF_ALLMULTI) {
623 for (i = 0; i < SPIDER_NET_MULTICAST_HASHES; i++) {
624 set_bit(i, bitmask);
625 }
626 goto write_hash;
627 }
628
629 /* well, we know, what the broadcast hash value is: it's xfd
630 hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
631 set_bit(0xfd, bitmask);
632
633 for (mc = netdev->mc_list; mc; mc = mc->next) {
634 hash = spider_net_get_multicast_hash(netdev, mc->dmi_addr);
635 set_bit(hash, bitmask);
636 }
637
638write_hash:
639 for (i = 0; i < SPIDER_NET_MULTICAST_HASHES / 4; i++) {
640 reg = 0;
641 if (test_bit(i * 4, bitmask))
642 reg += 0x08;
643 reg <<= 8;
644 if (test_bit(i * 4 + 1, bitmask))
645 reg += 0x08;
646 reg <<= 8;
647 if (test_bit(i * 4 + 2, bitmask))
648 reg += 0x08;
649 reg <<= 8;
650 if (test_bit(i * 4 + 3, bitmask))
651 reg += 0x08;
652
653 spider_net_write_reg(card, SPIDER_NET_GMRMHFILnR + i * 4, reg);
654 }
655}
656
657/**
658 * spider_net_disable_rxdmac - disables the receive DMA controller
659 * @card: card structure
660 *
661 * spider_net_disable_rxdmac terminates processing on the DMA controller by
662 * turing off DMA and issueing a force end
663 */
664static void
665spider_net_disable_rxdmac(struct spider_net_card *card)
666{
667 spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
668 SPIDER_NET_DMA_RX_FEND_VALUE);
669}
670
aaec0fab
JO
671/**
672 * spider_net_prepare_tx_descr - fill tx descriptor with skb data
673 * @card: card structure
674 * @descr: descriptor structure to fill out
675 * @skb: packet to use
676 *
677 * returns 0 on success, <0 on failure.
678 *
679 * fills out the descriptor structure with skb data and len. Copies data,
680 * if needed (32bit DMA!)
681 */
682static int
683spider_net_prepare_tx_descr(struct spider_net_card *card,
aaec0fab
JO
684 struct sk_buff *skb)
685{
d9c199ee 686 struct spider_net_descr_chain *chain = &card->tx_chain;
9cc7bf7e 687 struct spider_net_descr *descr;
4cb6f9e5 688 struct spider_net_hw_descr *hwdescr;
11f1a52b 689 dma_addr_t buf;
9cc7bf7e 690 unsigned long flags;
11f1a52b 691
9c434f5e 692 buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
d4b0a4c1 693 if (pci_dma_mapping_error(buf)) {
11f1a52b 694 if (netif_msg_tx_err(card) && net_ratelimit())
aaec0fab 695 pr_err("could not iommu-map packet (%p, %i). "
9c434f5e 696 "Dropping packet\n", skb->data, skb->len);
9b6b0b81 697 card->spider_stats.tx_iommu_map_error++;
aaec0fab
JO
698 return -ENOMEM;
699 }
700
d9c199ee 701 spin_lock_irqsave(&chain->lock, flags);
9cc7bf7e 702 descr = card->tx_chain.head;
d9c199ee
LV
703 if (descr->next == chain->tail->prev) {
704 spin_unlock_irqrestore(&chain->lock, flags);
705 pci_unmap_single(card->pdev, buf, skb->len, PCI_DMA_TODEVICE);
706 return -ENOMEM;
707 }
4cb6f9e5 708 hwdescr = descr->hwdescr;
d9c199ee 709 chain->head = descr->next;
9cc7bf7e 710
aaec0fab 711 descr->skb = skb;
4cb6f9e5
LV
712 hwdescr->buf_addr = buf;
713 hwdescr->buf_size = skb->len;
714 hwdescr->next_descr_addr = 0;
715 hwdescr->data_status = 0;
aaec0fab 716
4cb6f9e5 717 hwdescr->dmac_cmd_status =
bdd01503 718 SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS;
d9c199ee 719 spin_unlock_irqrestore(&chain->lock, flags);
9cc7bf7e 720
3a2c892d 721 if (skb->ip_summed == CHECKSUM_PARTIAL)
eddc9ec5 722 switch (ip_hdr(skb)->protocol) {
bdd01503 723 case IPPROTO_TCP:
4cb6f9e5 724 hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
bdd01503
JO
725 break;
726 case IPPROTO_UDP:
4cb6f9e5 727 hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;
bdd01503
JO
728 break;
729 }
730
204e5fa1 731 /* Chain the bus address, so that the DMA engine finds this descr. */
4cb6f9e5
LV
732 wmb();
733 descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
bdd01503 734
917a5b8e 735 card->netdev->trans_start = jiffies; /* set netdev watchdog timer */
bdd01503
JO
736 return 0;
737}
738
a664ccf4 739static int
204e5fa1
LV
740spider_net_set_low_watermark(struct spider_net_card *card)
741{
4cb6f9e5
LV
742 struct spider_net_descr *descr = card->tx_chain.tail;
743 struct spider_net_hw_descr *hwdescr;
9cc7bf7e 744 unsigned long flags;
204e5fa1
LV
745 int status;
746 int cnt=0;
747 int i;
204e5fa1 748
9cc7bf7e
LV
749 /* Measure the length of the queue. Measurement does not
750 * need to be precise -- does not need a lock. */
204e5fa1 751 while (descr != card->tx_chain.head) {
4cb6f9e5 752 status = descr->hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE;
204e5fa1
LV
753 if (status == SPIDER_NET_DESCR_NOT_IN_USE)
754 break;
755 descr = descr->next;
756 cnt++;
757 }
758
759 /* If TX queue is short, don't even bother with interrupts */
d4ed8f8d 760 if (cnt < card->tx_chain.num_desc/4)
a664ccf4 761 return cnt;
204e5fa1
LV
762
763 /* Set low-watermark 3/4th's of the way into the queue. */
764 descr = card->tx_chain.tail;
765 cnt = (cnt*3)/4;
766 for (i=0;i<cnt; i++)
767 descr = descr->next;
768
769 /* Set the new watermark, clear the old watermark */
9cc7bf7e 770 spin_lock_irqsave(&card->tx_chain.lock, flags);
4cb6f9e5
LV
771 descr->hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG;
772 if (card->low_watermark && card->low_watermark != descr) {
773 hwdescr = card->low_watermark->hwdescr;
774 hwdescr->dmac_cmd_status =
775 hwdescr->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG;
776 }
204e5fa1 777 card->low_watermark = descr;
9cc7bf7e 778 spin_unlock_irqrestore(&card->tx_chain.lock, flags);
a664ccf4 779 return cnt;
204e5fa1
LV
780}
781
bdd01503
JO
782/**
783 * spider_net_release_tx_chain - processes sent tx descriptors
784 * @card: adapter structure
785 * @brutal: if set, don't care about whether descriptor seems to be in use
786 *
787 * returns 0 if the tx ring is empty, otherwise 1.
788 *
789 * spider_net_release_tx_chain releases the tx descriptors that spider has
790 * finished with (if non-brutal) or simply release tx descriptors (if brutal).
791 * If some other context is calling this function, we return 1 so that we're
792 * scheduled again (if we were scheduled) and will not loose initiative.
793 */
794static int
795spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
796{
797 struct spider_net_descr_chain *chain = &card->tx_chain;
9cc7bf7e 798 struct spider_net_descr *descr;
4cb6f9e5 799 struct spider_net_hw_descr *hwdescr;
9cc7bf7e
LV
800 struct sk_buff *skb;
801 u32 buf_addr;
802 unsigned long flags;
bdd01503
JO
803 int status;
804
5c8e98fe 805 while (1) {
9cc7bf7e 806 spin_lock_irqsave(&chain->lock, flags);
5c8e98fe
LV
807 if (chain->tail == chain->head) {
808 spin_unlock_irqrestore(&chain->lock, flags);
809 return 0;
810 }
9cc7bf7e 811 descr = chain->tail;
4cb6f9e5 812 hwdescr = descr->hwdescr;
9cc7bf7e 813
4cb6f9e5 814 status = spider_net_get_descr_status(hwdescr);
bdd01503
JO
815 switch (status) {
816 case SPIDER_NET_DESCR_COMPLETE:
817 card->netdev_stats.tx_packets++;
9cc7bf7e 818 card->netdev_stats.tx_bytes += descr->skb->len;
bdd01503
JO
819 break;
820
821 case SPIDER_NET_DESCR_CARDOWNED:
9cc7bf7e
LV
822 if (!brutal) {
823 spin_unlock_irqrestore(&chain->lock, flags);
bdd01503 824 return 1;
9cc7bf7e
LV
825 }
826
bdd01503
JO
827 /* fallthrough, if we release the descriptors
828 * brutally (then we don't care about
829 * SPIDER_NET_DESCR_CARDOWNED) */
830
831 case SPIDER_NET_DESCR_RESPONSE_ERROR:
832 case SPIDER_NET_DESCR_PROTECTION_ERROR:
833 case SPIDER_NET_DESCR_FORCE_END:
834 if (netif_msg_tx_err(card))
835 pr_err("%s: forcing end of tx descriptor "
836 "with status x%02x\n",
837 card->netdev->name, status);
838 card->netdev_stats.tx_errors++;
839 break;
840
841 default:
842 card->netdev_stats.tx_dropped++;
9cc7bf7e
LV
843 if (!brutal) {
844 spin_unlock_irqrestore(&chain->lock, flags);
c3fee4c5 845 return 1;
9cc7bf7e 846 }
bdd01503 847 }
aaec0fab 848
9cc7bf7e 849 chain->tail = descr->next;
4cb6f9e5 850 hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
9cc7bf7e 851 skb = descr->skb;
d9c199ee 852 descr->skb = NULL;
4cb6f9e5 853 buf_addr = hwdescr->buf_addr;
9cc7bf7e
LV
854 spin_unlock_irqrestore(&chain->lock, flags);
855
856 /* unmap the skb */
857 if (skb) {
9c434f5e
JL
858 pci_unmap_single(card->pdev, buf_addr, skb->len,
859 PCI_DMA_TODEVICE);
9cc7bf7e
LV
860 dev_kfree_skb(skb);
861 }
862 }
aaec0fab
JO
863 return 0;
864}
865
866/**
867 * spider_net_kick_tx_dma - enables TX DMA processing
868 * @card: card structure
869 * @descr: descriptor address to enable TX processing at
870 *
a664ccf4
LV
871 * This routine will start the transmit DMA running if
872 * it is not already running. This routine ned only be
873 * called when queueing a new packet to an empty tx queue.
874 * Writes the current tx chain head as start address
875 * of the tx descriptor chain and enables the transmission
876 * DMA engine.
aaec0fab 877 */
bdd01503
JO
878static inline void
879spider_net_kick_tx_dma(struct spider_net_card *card)
aaec0fab 880{
bdd01503 881 struct spider_net_descr *descr;
aaec0fab 882
bdd01503
JO
883 if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) &
884 SPIDER_NET_TX_DMA_EN)
885 goto out;
aaec0fab 886
bdd01503
JO
887 descr = card->tx_chain.tail;
888 for (;;) {
4cb6f9e5 889 if (spider_net_get_descr_status(descr->hwdescr) ==
bdd01503
JO
890 SPIDER_NET_DESCR_CARDOWNED) {
891 spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
892 descr->bus_addr);
893 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
894 SPIDER_NET_DMA_TX_VALUE);
895 break;
896 }
897 if (descr == card->tx_chain.head)
898 break;
899 descr = descr->next;
900 }
901
902out:
903 mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
aaec0fab
JO
904}
905
906/**
907 * spider_net_xmit - transmits a frame over the device
908 * @skb: packet to send out
909 * @netdev: interface device structure
910 *
bdd01503 911 * returns 0 on success, !0 on failure
aaec0fab
JO
912 */
913static int
914spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
915{
a664ccf4 916 int cnt;
aaec0fab 917 struct spider_net_card *card = netdev_priv(netdev);
bdd01503 918
11f1a52b 919 spider_net_release_tx_chain(card, 0);
aaec0fab 920
d9c199ee 921 if (spider_net_prepare_tx_descr(card, skb) != 0) {
9b6b0b81 922 card->netdev_stats.tx_dropped++;
313ef4b7
LV
923 netif_stop_queue(netdev);
924 return NETDEV_TX_BUSY;
bdd01503 925 }
aaec0fab 926
a664ccf4
LV
927 cnt = spider_net_set_low_watermark(card);
928 if (cnt < 5)
929 spider_net_kick_tx_dma(card);
313ef4b7 930 return NETDEV_TX_OK;
bdd01503 931}
11f1a52b 932
bdd01503
JO
933/**
934 * spider_net_cleanup_tx_ring - cleans up the TX ring
935 * @card: card structure
936 *
68a8c609
LV
937 * spider_net_cleanup_tx_ring is called by either the tx_timer
938 * or from the NAPI polling routine.
939 * This routine releases resources associted with transmitted
940 * packets, including updating the queue tail pointer.
bdd01503
JO
941 */
942static void
943spider_net_cleanup_tx_ring(struct spider_net_card *card)
944{
bdd01503 945 if ((spider_net_release_tx_chain(card, 0) != 0) &&
313ef4b7 946 (card->netdev->flags & IFF_UP)) {
bdd01503 947 spider_net_kick_tx_dma(card);
313ef4b7
LV
948 netif_wake_queue(card->netdev);
949 }
aaec0fab
JO
950}
951
952/**
953 * spider_net_do_ioctl - called for device ioctls
954 * @netdev: interface device structure
955 * @ifr: request parameter structure for ioctl
956 * @cmd: command code for ioctl
957 *
958 * returns 0 on success, <0 on failure. Currently, we have no special ioctls.
959 * -EOPNOTSUPP is returned, if an unknown ioctl was requested
960 */
961static int
962spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
963{
964 switch (cmd) {
965 default:
966 return -EOPNOTSUPP;
967 }
968}
969
970/**
971 * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
972 * @descr: descriptor to process
973 * @card: card structure
974 *
05b346b5
LV
975 * Fills out skb structure and passes the data to the stack.
976 * The descriptor state is not changed.
aaec0fab 977 */
7f7223b8 978static void
aaec0fab 979spider_net_pass_skb_up(struct spider_net_descr *descr,
1cd173f6 980 struct spider_net_card *card)
aaec0fab 981{
4cb6f9e5 982 struct spider_net_hw_descr *hwdescr= descr->hwdescr;
aaec0fab
JO
983 struct sk_buff *skb;
984 struct net_device *netdev;
985 u32 data_status, data_error;
986
4cb6f9e5
LV
987 data_status = hwdescr->data_status;
988 data_error = hwdescr->data_error;
aaec0fab
JO
989 netdev = card->netdev;
990
11f1a52b 991 skb = descr->skb;
4cb6f9e5 992 skb_put(skb, hwdescr->valid_size);
aaec0fab
JO
993
994 /* the card seems to add 2 bytes of junk in front
995 * of the ethernet frame */
996#define SPIDER_MISALIGN 2
997 skb_pull(skb, SPIDER_MISALIGN);
998 skb->protocol = eth_type_trans(skb, netdev);
999
1000 /* checksum offload */
1001 if (card->options.rx_csum) {
11f1a52b
AB
1002 if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) ==
1003 SPIDER_NET_DATA_STATUS_CKSUM_MASK) &&
1004 !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
aaec0fab
JO
1005 skb->ip_summed = CHECKSUM_UNNECESSARY;
1006 else
1007 skb->ip_summed = CHECKSUM_NONE;
11f1a52b 1008 } else
aaec0fab 1009 skb->ip_summed = CHECKSUM_NONE;
aaec0fab
JO
1010
1011 if (data_status & SPIDER_NET_VLAN_PACKET) {
1012 /* further enhancements: HW-accel VLAN
1013 * vlan_hwaccel_receive_skb
1014 */
1015 }
1016
aaec0fab
JO
1017 /* update netdevice statistics */
1018 card->netdev_stats.rx_packets++;
1019 card->netdev_stats.rx_bytes += skb->len;
93c1d3b7
FM
1020
1021 /* pass skb up to stack */
1022 netif_receive_skb(skb);
aaec0fab
JO
1023}
1024
6d24998f
LV
1025#ifdef DEBUG
1026static void show_rx_chain(struct spider_net_card *card)
1027{
1028 struct spider_net_descr_chain *chain = &card->rx_chain;
1029 struct spider_net_descr *start= chain->tail;
1030 struct spider_net_descr *descr= start;
1031 int status;
1032
1033 int cnt = 0;
1034 int cstat = spider_net_get_descr_status(descr);
1035 printk(KERN_INFO "RX chain tail at descr=%ld\n",
1036 (start - card->descr) - card->tx_chain.num_desc);
1037 status = cstat;
1038 do
1039 {
1040 status = spider_net_get_descr_status(descr);
1041 if (cstat != status) {
1042 printk(KERN_INFO "Have %d descrs with stat=x%08x\n", cnt, cstat);
1043 cstat = status;
1044 cnt = 0;
1045 }
1046 cnt ++;
1047 descr = descr->next;
1048 } while (descr != start);
1049 printk(KERN_INFO "Last %d descrs with stat=x%08x\n", cnt, cstat);
1050}
1051#endif
1052
4c4bd5a9
LV
1053/**
1054 * spider_net_resync_head_ptr - Advance head ptr past empty descrs
1055 *
1056 * If the driver fails to keep up and empty the queue, then the
1057 * hardware wil run out of room to put incoming packets. This
1058 * will cause the hardware to skip descrs that are full (instead
1059 * of halting/retrying). Thus, once the driver runs, it wil need
1060 * to "catch up" to where the hardware chain pointer is at.
1061 */
1062static void spider_net_resync_head_ptr(struct spider_net_card *card)
1063{
1064 unsigned long flags;
1065 struct spider_net_descr_chain *chain = &card->rx_chain;
1066 struct spider_net_descr *descr;
1067 int i, status;
1068
1069 /* Advance head pointer past any empty descrs */
1070 descr = chain->head;
1071 status = spider_net_get_descr_status(descr->hwdescr);
1072
1073 if (status == SPIDER_NET_DESCR_NOT_IN_USE)
1074 return;
1075
1076 spin_lock_irqsave(&chain->lock, flags);
1077
1078 descr = chain->head;
1079 status = spider_net_get_descr_status(descr->hwdescr);
1080 for (i=0; i<chain->num_desc; i++) {
1081 if (status != SPIDER_NET_DESCR_CARDOWNED) break;
1082 descr = descr->next;
1083 status = spider_net_get_descr_status(descr->hwdescr);
1084 }
1085 chain->head = descr;
1086
1087 spin_unlock_irqrestore(&chain->lock, flags);
1088}
1089
1090static int spider_net_resync_tail_ptr(struct spider_net_card *card)
1091{
1092 struct spider_net_descr_chain *chain = &card->rx_chain;
1093 struct spider_net_descr *descr;
1094 int i, status;
1095
1096 /* Advance tail pointer past any empty and reaped descrs */
1097 descr = chain->tail;
1098 status = spider_net_get_descr_status(descr->hwdescr);
1099
1100 for (i=0; i<chain->num_desc; i++) {
1101 if ((status != SPIDER_NET_DESCR_CARDOWNED) &&
1102 (status != SPIDER_NET_DESCR_NOT_IN_USE)) break;
1103 descr = descr->next;
1104 status = spider_net_get_descr_status(descr->hwdescr);
1105 }
1106 chain->tail = descr;
1107
1108 if ((i == chain->num_desc) || (i == 0))
1109 return 1;
1110 return 0;
1111}
1112
aaec0fab 1113/**
7376e732 1114 * spider_net_decode_one_descr - processes an RX descriptor
aaec0fab
JO
1115 * @card: card structure
1116 *
7376e732 1117 * Returns 1 if a packet has been sent to the stack, otherwise 0.
aaec0fab 1118 *
7376e732
LV
1119 * Processes an RX descriptor by iommu-unmapping the data buffer
1120 * and passing the packet up to the stack. This function is called
1121 * in softirq context, e.g. either bottom half from interrupt or
1122 * NAPI polling context.
aaec0fab
JO
1123 */
1124static int
1cd173f6 1125spider_net_decode_one_descr(struct spider_net_card *card)
aaec0fab 1126{
bdd01503
JO
1127 struct spider_net_descr_chain *chain = &card->rx_chain;
1128 struct spider_net_descr *descr = chain->tail;
4cb6f9e5 1129 struct spider_net_hw_descr *hwdescr = descr->hwdescr;
bdd01503 1130 int status;
aaec0fab 1131
4cb6f9e5 1132 status = spider_net_get_descr_status(hwdescr);
aaec0fab 1133
80dab7c7
LV
1134 /* Nothing in the descriptor, or ring must be empty */
1135 if ((status == SPIDER_NET_DESCR_CARDOWNED) ||
1136 (status == SPIDER_NET_DESCR_NOT_IN_USE))
1cd173f6 1137 return 0;
aaec0fab 1138
11f1a52b 1139 /* descriptor definitively used -- move on tail */
aaec0fab
JO
1140 chain->tail = descr->next;
1141
05b346b5 1142 /* unmap descriptor */
4cb6f9e5 1143 pci_unmap_single(card->pdev, hwdescr->buf_addr,
05b346b5
LV
1144 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
1145
aaec0fab
JO
1146 if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
1147 (status == SPIDER_NET_DESCR_PROTECTION_ERROR) ||
1148 (status == SPIDER_NET_DESCR_FORCE_END) ) {
1149 if (netif_msg_rx_err(card))
1150 pr_err("%s: dropping RX descriptor with state %d\n",
1151 card->netdev->name, status);
1152 card->netdev_stats.rx_dropped++;
7f7223b8 1153 goto bad_desc;
aaec0fab
JO
1154 }
1155
1156 if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
1157 (status != SPIDER_NET_DESCR_FRAME_END) ) {
5a028877 1158 if (netif_msg_rx_err(card))
7376e732 1159 pr_err("%s: RX descriptor with unknown state %d\n",
aaec0fab 1160 card->netdev->name, status);
5a028877 1161 card->spider_stats.rx_desc_unk_state++;
7f7223b8 1162 goto bad_desc;
aaec0fab
JO
1163 }
1164
366684bd 1165 /* The cases we'll throw away the packet immediately */
4cb6f9e5 1166 if (hwdescr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
366684bd
LV
1167 if (netif_msg_rx_err(card))
1168 pr_err("%s: error in received descriptor found, "
1169 "data_status=x%08x, data_error=x%08x\n",
1170 card->netdev->name,
4cb6f9e5 1171 hwdescr->data_status, hwdescr->data_error);
7f7223b8 1172 goto bad_desc;
366684bd
LV
1173 }
1174
c3d1182a 1175 if (hwdescr->dmac_cmd_status & 0xfcf4) {
6d24998f
LV
1176 pr_err("%s: bad status, cmd_status=x%08x\n",
1177 card->netdev->name,
4cb6f9e5
LV
1178 hwdescr->dmac_cmd_status);
1179 pr_err("buf_addr=x%08x\n", hwdescr->buf_addr);
1180 pr_err("buf_size=x%08x\n", hwdescr->buf_size);
1181 pr_err("next_descr_addr=x%08x\n", hwdescr->next_descr_addr);
1182 pr_err("result_size=x%08x\n", hwdescr->result_size);
1183 pr_err("valid_size=x%08x\n", hwdescr->valid_size);
1184 pr_err("data_status=x%08x\n", hwdescr->data_status);
1185 pr_err("data_error=x%08x\n", hwdescr->data_error);
6d24998f
LV
1186 pr_err("which=%ld\n", descr - card->rx_chain.ring);
1187
1188 card->spider_stats.rx_desc_error++;
1189 goto bad_desc;
1190 }
1191
7f7223b8
LV
1192 /* Ok, we've got a packet in descr */
1193 spider_net_pass_skb_up(descr, card);
83d35145 1194 descr->skb = NULL;
4cb6f9e5 1195 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
7f7223b8
LV
1196 return 1;
1197
1198bad_desc:
1199 dev_kfree_skb_irq(descr->skb);
d9c199ee 1200 descr->skb = NULL;
4cb6f9e5 1201 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
7f7223b8 1202 return 0;
aaec0fab
JO
1203}
1204
1205/**
1206 * spider_net_poll - NAPI poll function called by the stack to return packets
1207 * @netdev: interface device structure
1208 * @budget: number of packets we can pass to the stack at most
1209 *
1210 * returns 0 if no more packets available to the driver/stack. Returns 1,
1211 * if the quota is exceeded, but the driver has still packets.
1212 *
1213 * spider_net_poll returns all packets from the rx descriptors to the stack
1214 * (using netif_receive_skb). If all/enough packets are up, the driver
1215 * reenables interrupts and returns 0. If not, 1 is returned.
1216 */
1217static int
1218spider_net_poll(struct net_device *netdev, int *budget)
1219{
1220 struct spider_net_card *card = netdev_priv(netdev);
1221 int packets_to_do, packets_done = 0;
1222 int no_more_packets = 0;
1223
68a8c609 1224 spider_net_cleanup_tx_ring(card);
aaec0fab
JO
1225 packets_to_do = min(*budget, netdev->quota);
1226
1227 while (packets_to_do) {
1cd173f6 1228 if (spider_net_decode_one_descr(card)) {
aaec0fab
JO
1229 packets_done++;
1230 packets_to_do--;
1231 } else {
1232 /* no more packets for the stack */
1233 no_more_packets = 1;
1234 break;
1235 }
1236 }
1237
4c4bd5a9
LV
1238 if ((packets_done == 0) && (card->num_rx_ints != 0)) {
1239 no_more_packets = spider_net_resync_tail_ptr(card);
1240 spider_net_resync_head_ptr(card);
1241 }
1242 card->num_rx_ints = 0;
1243
aaec0fab
JO
1244 netdev->quota -= packets_done;
1245 *budget -= packets_done;
11f1a52b 1246 spider_net_refill_rx_chain(card);
80dab7c7 1247 spider_net_enable_rxdmac(card);
aaec0fab
JO
1248
1249 /* if all packets are in the stack, enable interrupts and return 0 */
1250 /* if not, return 1 */
1251 if (no_more_packets) {
1252 netif_rx_complete(netdev);
1253 spider_net_rx_irq_on(card);
c3d1182a 1254 card->ignore_rx_ramfull = 0;
aaec0fab
JO
1255 return 0;
1256 }
1257
1258 return 1;
1259}
1260
aaec0fab
JO
1261/**
1262 * spider_net_get_stats - get interface statistics
1263 * @netdev: interface device structure
1264 *
1265 * returns the interface statistics residing in the spider_net_card struct
1266 */
1267static struct net_device_stats *
1268spider_net_get_stats(struct net_device *netdev)
1269{
1270 struct spider_net_card *card = netdev_priv(netdev);
1271 struct net_device_stats *stats = &card->netdev_stats;
1272 return stats;
1273}
1274
1275/**
1276 * spider_net_change_mtu - changes the MTU of an interface
1277 * @netdev: interface device structure
1278 * @new_mtu: new MTU value
1279 *
1280 * returns 0 on success, <0 on failure
1281 */
1282static int
1283spider_net_change_mtu(struct net_device *netdev, int new_mtu)
1284{
1285 /* no need to re-alloc skbs or so -- the max mtu is about 2.3k
1286 * and mtu is outbound only anyway */
1287 if ( (new_mtu < SPIDER_NET_MIN_MTU ) ||
1288 (new_mtu > SPIDER_NET_MAX_MTU) )
1289 return -EINVAL;
1290 netdev->mtu = new_mtu;
1291 return 0;
1292}
1293
1294/**
1295 * spider_net_set_mac - sets the MAC of an interface
1296 * @netdev: interface device structure
1297 * @ptr: pointer to new MAC address
1298 *
1299 * Returns 0 on success, <0 on failure. Currently, we don't support this
1300 * and will always return EOPNOTSUPP.
1301 */
1302static int
1303spider_net_set_mac(struct net_device *netdev, void *p)
1304{
1305 struct spider_net_card *card = netdev_priv(netdev);
054034db 1306 u32 macl, macu, regvalue;
aaec0fab
JO
1307 struct sockaddr *addr = p;
1308
aaec0fab
JO
1309 if (!is_valid_ether_addr(addr->sa_data))
1310 return -EADDRNOTAVAIL;
1311
054034db
JO
1312 /* switch off GMACTPE and GMACRPE */
1313 regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
1314 regvalue &= ~((1 << 5) | (1 << 6));
1315 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
1316
1317 /* write mac */
aaec0fab
JO
1318 macu = (addr->sa_data[0]<<24) + (addr->sa_data[1]<<16) +
1319 (addr->sa_data[2]<<8) + (addr->sa_data[3]);
1320 macl = (addr->sa_data[4]<<8) + (addr->sa_data[5]);
1321 spider_net_write_reg(card, SPIDER_NET_GMACUNIMACU, macu);
1322 spider_net_write_reg(card, SPIDER_NET_GMACUNIMACL, macl);
1323
054034db
JO
1324 /* switch GMACTPE and GMACRPE back on */
1325 regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
1326 regvalue |= ((1 << 5) | (1 << 6));
1327 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
1328
aaec0fab
JO
1329 spider_net_set_promisc(card);
1330
1331 /* look up, whether we have been successful */
1332 if (spider_net_get_mac_address(netdev))
1333 return -EADDRNOTAVAIL;
1334 if (memcmp(netdev->dev_addr,addr->sa_data,netdev->addr_len))
1335 return -EADDRNOTAVAIL;
1336
1337 return 0;
1338}
1339
abdb66b5
KI
1340/**
1341 * spider_net_link_reset
1342 * @netdev: net device structure
1343 *
1344 * This is called when the PHY_LINK signal is asserted. For the blade this is
1345 * not connected so we should never get here.
1346 *
1347 */
1348static void
1349spider_net_link_reset(struct net_device *netdev)
1350{
1351
1352 struct spider_net_card *card = netdev_priv(netdev);
1353
1354 del_timer_sync(&card->aneg_timer);
1355
1356 /* clear interrupt, block further interrupts */
1357 spider_net_write_reg(card, SPIDER_NET_GMACST,
1358 spider_net_read_reg(card, SPIDER_NET_GMACST));
1359 spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
1360
1361 /* reset phy and setup aneg */
1362 spider_net_setup_aneg(card);
1363 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
1364
1365}
1366
aaec0fab
JO
1367/**
1368 * spider_net_handle_error_irq - handles errors raised by an interrupt
1369 * @card: card structure
1370 * @status_reg: interrupt status register 0 (GHIINT0STS)
1371 *
1372 * spider_net_handle_error_irq treats or ignores all error conditions
1373 * found when an interrupt is presented
1374 */
1375static void
1376spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1377{
1378 u32 error_reg1, error_reg2;
1379 u32 i;
1380 int show_error = 1;
1381
1382 error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS);
1383 error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS);
1384
1385 /* check GHIINT0STS ************************************/
1386 if (status_reg)
1387 for (i = 0; i < 32; i++)
1388 if (status_reg & (1<<i))
1389 switch (i)
1390 {
1391 /* let error_reg1 and error_reg2 evaluation decide, what to do
1392 case SPIDER_NET_PHYINT:
1393 case SPIDER_NET_GMAC2INT:
1394 case SPIDER_NET_GMAC1INT:
aaec0fab
JO
1395 case SPIDER_NET_GFIFOINT:
1396 case SPIDER_NET_DMACINT:
1397 case SPIDER_NET_GSYSINT:
1398 break; */
1399
98b9040c
LV
1400 case SPIDER_NET_GIPSINT:
1401 show_error = 0;
1402 break;
1403
aaec0fab
JO
1404 case SPIDER_NET_GPWOPCMPINT:
1405 /* PHY write operation completed */
1406 show_error = 0;
1407 break;
1408 case SPIDER_NET_GPROPCMPINT:
1409 /* PHY read operation completed */
1410 /* we don't use semaphores, as we poll for the completion
1411 * of the read operation in spider_net_read_phy. Should take
1412 * about 50 us */
1413 show_error = 0;
1414 break;
1415 case SPIDER_NET_GPWFFINT:
1416 /* PHY command queue full */
1417 if (netif_msg_intr(card))
1418 pr_err("PHY write queue full\n");
1419 show_error = 0;
1420 break;
1421
1422 /* case SPIDER_NET_GRMDADRINT: not used. print a message */
1423 /* case SPIDER_NET_GRMARPINT: not used. print a message */
1424 /* case SPIDER_NET_GRMMPINT: not used. print a message */
1425
1426 case SPIDER_NET_GDTDEN0INT:
1427 /* someone has set TX_DMA_EN to 0 */
1428 show_error = 0;
1429 break;
1430
1431 case SPIDER_NET_GDDDEN0INT: /* fallthrough */
1432 case SPIDER_NET_GDCDEN0INT: /* fallthrough */
1433 case SPIDER_NET_GDBDEN0INT: /* fallthrough */
1434 case SPIDER_NET_GDADEN0INT:
1435 /* someone has set RX_DMA_EN to 0 */
1436 show_error = 0;
1437 break;
1438
1439 /* RX interrupts */
1440 case SPIDER_NET_GDDFDCINT:
1441 case SPIDER_NET_GDCFDCINT:
1442 case SPIDER_NET_GDBFDCINT:
1443 case SPIDER_NET_GDAFDCINT:
1444 /* case SPIDER_NET_GDNMINT: not used. print a message */
1445 /* case SPIDER_NET_GCNMINT: not used. print a message */
1446 /* case SPIDER_NET_GBNMINT: not used. print a message */
1447 /* case SPIDER_NET_GANMINT: not used. print a message */
1448 /* case SPIDER_NET_GRFNMINT: not used. print a message */
1449 show_error = 0;
1450 break;
1451
1452 /* TX interrupts */
1453 case SPIDER_NET_GDTFDCINT:
1454 show_error = 0;
1455 break;
1456 case SPIDER_NET_GTTEDINT:
1457 show_error = 0;
1458 break;
1459 case SPIDER_NET_GDTDCEINT:
1460 /* chain end. If a descriptor should be sent, kick off
1461 * tx dma
98b9040c 1462 if (card->tx_chain.tail != card->tx_chain.head)
aaec0fab 1463 spider_net_kick_tx_dma(card);
98b9040c
LV
1464 */
1465 show_error = 0;
aaec0fab
JO
1466 break;
1467
1468 /* case SPIDER_NET_G1TMCNTINT: not used. print a message */
1469 /* case SPIDER_NET_GFREECNTINT: not used. print a message */
1470 }
1471
1472 /* check GHIINT1STS ************************************/
1473 if (error_reg1)
1474 for (i = 0; i < 32; i++)
1475 if (error_reg1 & (1<<i))
1476 switch (i)
1477 {
1478 case SPIDER_NET_GTMFLLINT:
fc8e13da
IK
1479 /* TX RAM full may happen on a usual case.
1480 * Logging is not needed. */
aaec0fab
JO
1481 show_error = 0;
1482 break;
11f1a52b
AB
1483 case SPIDER_NET_GRFDFLLINT: /* fallthrough */
1484 case SPIDER_NET_GRFCFLLINT: /* fallthrough */
1485 case SPIDER_NET_GRFBFLLINT: /* fallthrough */
1486 case SPIDER_NET_GRFAFLLINT: /* fallthrough */
aaec0fab 1487 case SPIDER_NET_GRMFLLINT:
4c4bd5a9 1488 /* Could happen when rx chain is full */
c3d1182a
LV
1489 if (card->ignore_rx_ramfull == 0) {
1490 card->ignore_rx_ramfull = 1;
1491 spider_net_resync_head_ptr(card);
1492 spider_net_refill_rx_chain(card);
1493 spider_net_enable_rxdmac(card);
1494 card->num_rx_ints ++;
1495 netif_rx_schedule(card->netdev);
1496 }
11f1a52b 1497 show_error = 0;
aaec0fab
JO
1498 break;
1499
1500 /* case SPIDER_NET_GTMSHTINT: problem, print a message */
1501 case SPIDER_NET_GDTINVDINT:
1502 /* allrighty. tx from previous descr ok */
1503 show_error = 0;
1504 break;
aaec0fab
JO
1505
1506 /* chain end */
1507 case SPIDER_NET_GDDDCEINT: /* fallthrough */
1508 case SPIDER_NET_GDCDCEINT: /* fallthrough */
1509 case SPIDER_NET_GDBDCEINT: /* fallthrough */
1510 case SPIDER_NET_GDADCEINT:
4c4bd5a9 1511 spider_net_resync_head_ptr(card);
aaec0fab 1512 spider_net_refill_rx_chain(card);
11f1a52b 1513 spider_net_enable_rxdmac(card);
4c4bd5a9
LV
1514 card->num_rx_ints ++;
1515 netif_rx_schedule(card->netdev);
aaec0fab
JO
1516 show_error = 0;
1517 break;
1518
1519 /* invalid descriptor */
1520 case SPIDER_NET_GDDINVDINT: /* fallthrough */
1521 case SPIDER_NET_GDCINVDINT: /* fallthrough */
1522 case SPIDER_NET_GDBINVDINT: /* fallthrough */
1523 case SPIDER_NET_GDAINVDINT:
4c4bd5a9
LV
1524 /* Could happen when rx chain is full */
1525 spider_net_resync_head_ptr(card);
aaec0fab 1526 spider_net_refill_rx_chain(card);
11f1a52b 1527 spider_net_enable_rxdmac(card);
4c4bd5a9
LV
1528 card->num_rx_ints ++;
1529 netif_rx_schedule(card->netdev);
aaec0fab
JO
1530 show_error = 0;
1531 break;
1532
1533 /* case SPIDER_NET_GDTRSERINT: problem, print a message */
1534 /* case SPIDER_NET_GDDRSERINT: problem, print a message */
1535 /* case SPIDER_NET_GDCRSERINT: problem, print a message */
1536 /* case SPIDER_NET_GDBRSERINT: problem, print a message */
1537 /* case SPIDER_NET_GDARSERINT: problem, print a message */
1538 /* case SPIDER_NET_GDSERINT: problem, print a message */
1539 /* case SPIDER_NET_GDTPTERINT: problem, print a message */
1540 /* case SPIDER_NET_GDDPTERINT: problem, print a message */
1541 /* case SPIDER_NET_GDCPTERINT: problem, print a message */
1542 /* case SPIDER_NET_GDBPTERINT: problem, print a message */
1543 /* case SPIDER_NET_GDAPTERINT: problem, print a message */
1544 default:
1545 show_error = 1;
1546 break;
1547 }
1548
1549 /* check GHIINT2STS ************************************/
1550 if (error_reg2)
1551 for (i = 0; i < 32; i++)
1552 if (error_reg2 & (1<<i))
1553 switch (i)
1554 {
1555 /* there is nothing we can (want to) do at this time. Log a
1556 * message, we can switch on and off the specific values later on
1557 case SPIDER_NET_GPROPERINT:
1558 case SPIDER_NET_GMCTCRSNGINT:
1559 case SPIDER_NET_GMCTLCOLINT:
1560 case SPIDER_NET_GMCTTMOTINT:
1561 case SPIDER_NET_GMCRCAERINT:
1562 case SPIDER_NET_GMCRCALERINT:
1563 case SPIDER_NET_GMCRALNERINT:
1564 case SPIDER_NET_GMCROVRINT:
1565 case SPIDER_NET_GMCRRNTINT:
1566 case SPIDER_NET_GMCRRXERINT:
1567 case SPIDER_NET_GTITCSERINT:
1568 case SPIDER_NET_GTIFMTERINT:
1569 case SPIDER_NET_GTIPKTRVKINT:
1570 case SPIDER_NET_GTISPINGINT:
1571 case SPIDER_NET_GTISADNGINT:
1572 case SPIDER_NET_GTISPDNGINT:
1573 case SPIDER_NET_GRIFMTERINT:
1574 case SPIDER_NET_GRIPKTRVKINT:
1575 case SPIDER_NET_GRISPINGINT:
1576 case SPIDER_NET_GRISADNGINT:
1577 case SPIDER_NET_GRISPDNGINT:
1578 break;
1579 */
1580 default:
1581 break;
1582 }
1583
5a028877 1584 if ((show_error) && (netif_msg_intr(card)) && net_ratelimit())
98b9040c 1585 pr_err("Got error interrupt on %s, GHIINT0STS = 0x%08x, "
aaec0fab 1586 "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
98b9040c 1587 card->netdev->name,
aaec0fab
JO
1588 status_reg, error_reg1, error_reg2);
1589
1590 /* clear interrupt sources */
1591 spider_net_write_reg(card, SPIDER_NET_GHIINT1STS, error_reg1);
1592 spider_net_write_reg(card, SPIDER_NET_GHIINT2STS, error_reg2);
1593}
1594
1595/**
1596 * spider_net_interrupt - interrupt handler for spider_net
1597 * @irq: interupt number
1598 * @ptr: pointer to net_device
1599 * @regs: PU registers
1600 *
1601 * returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no
1602 * interrupt found raised by card.
1603 *
1604 * This is the interrupt handler, that turns off
1605 * interrupts for this device and makes the stack poll the driver
1606 */
1607static irqreturn_t
7d12e780 1608spider_net_interrupt(int irq, void *ptr)
aaec0fab
JO
1609{
1610 struct net_device *netdev = ptr;
1611 struct spider_net_card *card = netdev_priv(netdev);
1612 u32 status_reg;
1613
1614 status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS);
1615
1616 if (!status_reg)
1617 return IRQ_NONE;
1618
aaec0fab
JO
1619 if (status_reg & SPIDER_NET_RXINT ) {
1620 spider_net_rx_irq_off(card);
1621 netif_rx_schedule(netdev);
4c4bd5a9 1622 card->num_rx_ints ++;
aaec0fab 1623 }
68a8c609
LV
1624 if (status_reg & SPIDER_NET_TXINT)
1625 netif_rx_schedule(netdev);
aaec0fab 1626
abdb66b5
KI
1627 if (status_reg & SPIDER_NET_LINKINT)
1628 spider_net_link_reset(netdev);
1629
11f1a52b
AB
1630 if (status_reg & SPIDER_NET_ERRINT )
1631 spider_net_handle_error_irq(card, status_reg);
aaec0fab
JO
1632
1633 /* clear interrupt sources */
1634 spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
1635
1636 return IRQ_HANDLED;
1637}
1638
1639#ifdef CONFIG_NET_POLL_CONTROLLER
1640/**
1641 * spider_net_poll_controller - artificial interrupt for netconsole etc.
1642 * @netdev: interface device structure
1643 *
1644 * see Documentation/networking/netconsole.txt
1645 */
1646static void
1647spider_net_poll_controller(struct net_device *netdev)
1648{
1649 disable_irq(netdev->irq);
7d12e780 1650 spider_net_interrupt(netdev->irq, netdev);
aaec0fab
JO
1651 enable_irq(netdev->irq);
1652}
1653#endif /* CONFIG_NET_POLL_CONTROLLER */
1654
1655/**
1656 * spider_net_init_card - initializes the card
1657 * @card: card structure
1658 *
1659 * spider_net_init_card initializes the card so that other registers can
1660 * be used
1661 */
1662static void
1663spider_net_init_card(struct spider_net_card *card)
1664{
1665 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
1666 SPIDER_NET_CKRCTRL_STOP_VALUE);
1667
1668 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
1669 SPIDER_NET_CKRCTRL_RUN_VALUE);
3342cf0e
KI
1670
1671 /* trigger ETOMOD signal */
1672 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
1673 spider_net_read_reg(card, SPIDER_NET_GMACOPEMD) | 0x4);
1674
aaec0fab
JO
1675}
1676
1677/**
1678 * spider_net_enable_card - enables the card by setting all kinds of regs
1679 * @card: card structure
1680 *
1681 * spider_net_enable_card sets a lot of SMMIO registers to enable the device
1682 */
1683static void
1684spider_net_enable_card(struct spider_net_card *card)
1685{
1686 int i;
1687 /* the following array consists of (register),(value) pairs
1688 * that are set in this function. A register of 0 ends the list */
1689 u32 regs[][2] = {
1690 { SPIDER_NET_GRESUMINTNUM, 0 },
1691 { SPIDER_NET_GREINTNUM, 0 },
1692
1693 /* set interrupt frame number registers */
1694 /* clear the single DMA engine registers first */
1695 { SPIDER_NET_GFAFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1696 { SPIDER_NET_GFBFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1697 { SPIDER_NET_GFCFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1698 { SPIDER_NET_GFDFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1699 /* then set, what we really need */
1700 { SPIDER_NET_GFFRMNUM, SPIDER_NET_FRAMENUM_VALUE },
1701
1702 /* timer counter registers and stuff */
1703 { SPIDER_NET_GFREECNNUM, 0 },
1704 { SPIDER_NET_GONETIMENUM, 0 },
1705 { SPIDER_NET_GTOUTFRMNUM, 0 },
1706
1707 /* RX mode setting */
1708 { SPIDER_NET_GRXMDSET, SPIDER_NET_RXMODE_VALUE },
1709 /* TX mode setting */
1710 { SPIDER_NET_GTXMDSET, SPIDER_NET_TXMODE_VALUE },
1711 /* IPSEC mode setting */
1712 { SPIDER_NET_GIPSECINIT, SPIDER_NET_IPSECINIT_VALUE },
1713
1714 { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE },
1715
1716 { SPIDER_NET_GMRWOLCTRL, 0 },
b636d17a
JO
1717 { SPIDER_NET_GTESTMD, 0x10000000 },
1718 { SPIDER_NET_GTTQMSK, 0x00400040 },
aaec0fab
JO
1719
1720 { SPIDER_NET_GMACINTEN, 0 },
1721
1722 /* flow control stuff */
1723 { SPIDER_NET_GMACAPAUSE, SPIDER_NET_MACAPAUSE_VALUE },
1724 { SPIDER_NET_GMACTXPAUSE, SPIDER_NET_TXPAUSE_VALUE },
1725
1726 { SPIDER_NET_GMACBSTLMT, SPIDER_NET_BURSTLMT_VALUE },
1727 { 0, 0}
1728 };
1729
1730 i = 0;
1731 while (regs[i][0]) {
1732 spider_net_write_reg(card, regs[i][0], regs[i][1]);
1733 i++;
1734 }
1735
1736 /* clear unicast filter table entries 1 to 14 */
1737 for (i = 1; i <= 14; i++) {
1738 spider_net_write_reg(card,
1739 SPIDER_NET_GMRUAFILnR + i * 8,
1740 0x00080000);
1741 spider_net_write_reg(card,
1742 SPIDER_NET_GMRUAFILnR + i * 8 + 4,
1743 0x00000000);
1744 }
1745
1746 spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, 0x08080000);
1747
1748 spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE);
1749
1750 /* set chain tail adress for RX chains and
1751 * enable DMA */
1752 spider_net_enable_rxchtails(card);
1753 spider_net_enable_rxdmac(card);
1754
1755 spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE);
1756
aaec0fab
JO
1757 spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,
1758 SPIDER_NET_LENLMT_VALUE);
aaec0fab
JO
1759 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
1760 SPIDER_NET_OPMODE_VALUE);
1761
1762 /* set interrupt mask registers */
1763 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK,
1764 SPIDER_NET_INT0_MASK_VALUE);
1765 spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK,
1766 SPIDER_NET_INT1_MASK_VALUE);
1767 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK,
1768 SPIDER_NET_INT2_MASK_VALUE);
bdd01503
JO
1769
1770 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
7bd54c86 1771 SPIDER_NET_GDTBSTA);
aaec0fab
JO
1772}
1773
3cf761dd
KI
1774/**
1775 * spider_net_download_firmware - loads firmware into the adapter
1776 * @card: card structure
1777 * @firmware_ptr: pointer to firmware data
1778 *
1779 * spider_net_download_firmware loads the firmware data into the
1780 * adapter. It assumes the length etc. to be allright.
1781 */
1782static int
1783spider_net_download_firmware(struct spider_net_card *card,
1784 const void *firmware_ptr)
1785{
1786 int sequencer, i;
1787 const u32 *fw_ptr = firmware_ptr;
1788
1789 /* stop sequencers */
1790 spider_net_write_reg(card, SPIDER_NET_GSINIT,
1791 SPIDER_NET_STOP_SEQ_VALUE);
1792
1793 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
1794 sequencer++) {
1795 spider_net_write_reg(card,
1796 SPIDER_NET_GSnPRGADR + sequencer * 8, 0);
1797 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
1798 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
1799 sequencer * 8, *fw_ptr);
1800 fw_ptr++;
1801 }
1802 }
1803
1804 if (spider_net_read_reg(card, SPIDER_NET_GSINIT))
1805 return -EIO;
1806
1807 spider_net_write_reg(card, SPIDER_NET_GSINIT,
1808 SPIDER_NET_RUN_SEQ_VALUE);
1809
1810 return 0;
1811}
1812
1813/**
1814 * spider_net_init_firmware - reads in firmware parts
1815 * @card: card structure
1816 *
1817 * Returns 0 on success, <0 on failure
1818 *
1819 * spider_net_init_firmware opens the sequencer firmware and does some basic
1820 * checks. This function opens and releases the firmware structure. A call
1821 * to download the firmware is performed before the release.
1822 *
1823 * Firmware format
1824 * ===============
1825 * spider_fw.bin is expected to be a file containing 6*1024*4 bytes, 4k being
1826 * the program for each sequencer. Use the command
1827 * tail -q -n +2 Seq_code1_0x088.txt Seq_code2_0x090.txt \
1828 * Seq_code3_0x098.txt Seq_code4_0x0A0.txt Seq_code5_0x0A8.txt \
1829 * Seq_code6_0x0B0.txt | xxd -r -p -c4 > spider_fw.bin
1830 *
1831 * to generate spider_fw.bin, if you have sequencer programs with something
1832 * like the following contents for each sequencer:
1833 * <ONE LINE COMMENT>
1834 * <FIRST 4-BYTES-WORD FOR SEQUENCER>
1835 * <SECOND 4-BYTES-WORD FOR SEQUENCER>
1836 * ...
1837 * <1024th 4-BYTES-WORD FOR SEQUENCER>
1838 */
1839static int
1840spider_net_init_firmware(struct spider_net_card *card)
1841{
1842 struct firmware *firmware = NULL;
1843 struct device_node *dn;
1844 const u8 *fw_prop = NULL;
1845 int err = -ENOENT;
1846 int fw_size;
1847
1848 if (request_firmware((const struct firmware **)&firmware,
1849 SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) {
1850 if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) &&
1851 netif_msg_probe(card) ) {
1852 pr_err("Incorrect size of spidernet firmware in " \
1853 "filesystem. Looking in host firmware...\n");
1854 goto try_host_fw;
1855 }
1856 err = spider_net_download_firmware(card, firmware->data);
1857
1858 release_firmware(firmware);
1859 if (err)
1860 goto try_host_fw;
1861
1862 goto done;
1863 }
1864
1865try_host_fw:
1866 dn = pci_device_to_OF_node(card->pdev);
1867 if (!dn)
1868 goto out_err;
1869
40cd3a45 1870 fw_prop = of_get_property(dn, "firmware", &fw_size);
3cf761dd
KI
1871 if (!fw_prop)
1872 goto out_err;
1873
1874 if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) &&
1875 netif_msg_probe(card) ) {
1876 pr_err("Incorrect size of spidernet firmware in " \
1877 "host firmware\n");
1878 goto done;
1879 }
1880
1881 err = spider_net_download_firmware(card, fw_prop);
1882
1883done:
1884 return err;
1885out_err:
1886 if (netif_msg_probe(card))
1887 pr_err("Couldn't find spidernet firmware in filesystem " \
1888 "or host firmware\n");
1889 return err;
1890}
1891
aaec0fab
JO
1892/**
1893 * spider_net_open - called upon ifonfig up
1894 * @netdev: interface device structure
1895 *
1896 * returns 0 on success, <0 on failure
1897 *
1898 * spider_net_open allocates all the descriptors and memory needed for
1899 * operation, sets up multicast list and enables interrupts
1900 */
1901int
1902spider_net_open(struct net_device *netdev)
1903{
1904 struct spider_net_card *card = netdev_priv(netdev);
d4ed8f8d 1905 int result;
aaec0fab 1906
3cf761dd
KI
1907 result = spider_net_init_firmware(card);
1908 if (result)
1909 goto init_firmware_failed;
1910
abdb66b5
KI
1911 /* start probing with copper */
1912 spider_net_setup_aneg(card);
1913 if (card->phy.def->phy_id)
1914 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
1915
d4ed8f8d
LV
1916 result = spider_net_init_chain(card, &card->tx_chain);
1917 if (result)
aaec0fab 1918 goto alloc_tx_failed;
204e5fa1
LV
1919 card->low_watermark = NULL;
1920
d4ed8f8d
LV
1921 result = spider_net_init_chain(card, &card->rx_chain);
1922 if (result)
aaec0fab
JO
1923 goto alloc_rx_failed;
1924
d4ed8f8d 1925 /* Allocate rx skbs */
aaec0fab
JO
1926 if (spider_net_alloc_rx_skbs(card))
1927 goto alloc_skbs_failed;
1928
1929 spider_net_set_multi(netdev);
1930
1931 /* further enhancement: setup hw vlan, if needed */
1932
1933 result = -EBUSY;
1934 if (request_irq(netdev->irq, spider_net_interrupt,
1fb9df5d 1935 IRQF_SHARED, netdev->name, netdev))
aaec0fab
JO
1936 goto register_int_failed;
1937
1938 spider_net_enable_card(card);
1939
543cec51
JO
1940 netif_start_queue(netdev);
1941 netif_carrier_on(netdev);
1942 netif_poll_enable(netdev);
1943
aaec0fab
JO
1944 return 0;
1945
1946register_int_failed:
1947 spider_net_free_rx_chain_contents(card);
1948alloc_skbs_failed:
1949 spider_net_free_chain(card, &card->rx_chain);
1950alloc_rx_failed:
1951 spider_net_free_chain(card, &card->tx_chain);
1952alloc_tx_failed:
abdb66b5 1953 del_timer_sync(&card->aneg_timer);
3cf761dd 1954init_firmware_failed:
aaec0fab
JO
1955 return result;
1956}
1957
abdb66b5
KI
1958/**
1959 * spider_net_link_phy
1960 * @data: used for pointer to card structure
1961 *
1962 */
1963static void spider_net_link_phy(unsigned long data)
1964{
1965 struct spider_net_card *card = (struct spider_net_card *)data;
1966 struct mii_phy *phy = &card->phy;
1967
1968 /* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */
1969 if (card->aneg_count > SPIDER_NET_ANEG_TIMEOUT) {
1970
1971 pr_info("%s: link is down trying to bring it up\n", card->netdev->name);
1972
4b23a554
JO
1973 switch (card->medium) {
1974 case BCM54XX_COPPER:
abdb66b5
KI
1975 /* enable fiber with autonegotiation first */
1976 if (phy->def->ops->enable_fiber)
1977 phy->def->ops->enable_fiber(phy, 1);
4b23a554 1978 card->medium = BCM54XX_FIBER;
abdb66b5
KI
1979 break;
1980
4b23a554 1981 case BCM54XX_FIBER:
abdb66b5
KI
1982 /* fiber didn't come up, try to disable fiber autoneg */
1983 if (phy->def->ops->enable_fiber)
1984 phy->def->ops->enable_fiber(phy, 0);
4b23a554 1985 card->medium = BCM54XX_UNKNOWN;
abdb66b5
KI
1986 break;
1987
4b23a554 1988 case BCM54XX_UNKNOWN:
abdb66b5
KI
1989 /* copper, fiber with and without failed,
1990 * retry from beginning */
1991 spider_net_setup_aneg(card);
4b23a554 1992 card->medium = BCM54XX_COPPER;
abdb66b5
KI
1993 break;
1994 }
1995
1996 card->aneg_count = 0;
1997 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
1998 return;
1999 }
2000
2001 /* link still not up, try again later */
2002 if (!(phy->def->ops->poll_link(phy))) {
2003 card->aneg_count++;
2004 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
2005 return;
2006 }
2007
2008 /* link came up, get abilities */
2009 phy->def->ops->read_link(phy);
2010
2011 spider_net_write_reg(card, SPIDER_NET_GMACST,
2012 spider_net_read_reg(card, SPIDER_NET_GMACST));
2013 spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0x4);
2014
2015 if (phy->speed == 1000)
2016 spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0x00000001);
2017 else
2018 spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0);
2019
2020 card->aneg_count = 0;
2021
2022 pr_debug("Found %s with %i Mbps, %s-duplex %sautoneg.\n",
2023 phy->def->name, phy->speed, phy->duplex==1 ? "Full" : "Half",
2024 phy->autoneg==1 ? "" : "no ");
2025
2026 return;
2027}
2028
aaec0fab
JO
2029/**
2030 * spider_net_setup_phy - setup PHY
2031 * @card: card structure
2032 *
2033 * returns 0 on success, <0 on failure
2034 *
abdb66b5 2035 * spider_net_setup_phy is used as part of spider_net_probe.
aaec0fab
JO
2036 **/
2037static int
2038spider_net_setup_phy(struct spider_net_card *card)
2039{
2040 struct mii_phy *phy = &card->phy;
2041
2042 spider_net_write_reg(card, SPIDER_NET_GDTDMASEL,
2043 SPIDER_NET_DMASEL_VALUE);
2044 spider_net_write_reg(card, SPIDER_NET_GPCCTRL,
2045 SPIDER_NET_PHY_CTRL_VALUE);
abdb66b5 2046
aaec0fab
JO
2047 phy->dev = card->netdev;
2048 phy->mdio_read = spider_net_read_phy;
2049 phy->mdio_write = spider_net_write_phy;
2050
abdb66b5
KI
2051 for (phy->mii_id = 1; phy->mii_id <= 31; phy->mii_id++) {
2052 unsigned short id;
2053 id = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
2054 if (id != 0x0000 && id != 0xffff) {
2055 if (!mii_phy_probe(phy, phy->mii_id)) {
2056 pr_info("Found %s.\n", phy->def->name);
2057 break;
2058 }
2059 }
2060 }
aaec0fab
JO
2061
2062 return 0;
2063}
2064
aaec0fab
JO
2065/**
2066 * spider_net_workaround_rxramfull - work around firmware bug
2067 * @card: card structure
2068 *
2069 * no return value
2070 **/
2071static void
2072spider_net_workaround_rxramfull(struct spider_net_card *card)
2073{
2074 int i, sequencer = 0;
2075
2076 /* cancel reset */
2077 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
2078 SPIDER_NET_CKRCTRL_RUN_VALUE);
2079
2080 /* empty sequencer data */
11f1a52b
AB
2081 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
2082 sequencer++) {
ee962a5c 2083 spider_net_write_reg(card, SPIDER_NET_GSnPRGADR +
aaec0fab 2084 sequencer * 8, 0x0);
11f1a52b 2085 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
aaec0fab
JO
2086 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
2087 sequencer * 8, 0x0);
2088 }
2089 }
2090
2091 /* set sequencer operation */
2092 spider_net_write_reg(card, SPIDER_NET_GSINIT, 0x000000fe);
2093
2094 /* reset */
2095 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
2096 SPIDER_NET_CKRCTRL_STOP_VALUE);
2097}
2098
bdd01503
JO
2099/**
2100 * spider_net_stop - called upon ifconfig down
2101 * @netdev: interface device structure
2102 *
2103 * always returns 0
2104 */
2105int
2106spider_net_stop(struct net_device *netdev)
2107{
2108 struct spider_net_card *card = netdev_priv(netdev);
2109
bdd01503
JO
2110 netif_poll_disable(netdev);
2111 netif_carrier_off(netdev);
2112 netif_stop_queue(netdev);
2113 del_timer_sync(&card->tx_timer);
abdb66b5 2114 del_timer_sync(&card->aneg_timer);
bdd01503
JO
2115
2116 /* disable/mask all interrupts */
2117 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
2118 spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
2119 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
abdb66b5 2120 spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
bdd01503 2121
d406eafe 2122 free_irq(netdev->irq, netdev);
bdd01503
JO
2123
2124 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
2125 SPIDER_NET_DMA_TX_FEND_VALUE);
2126
2127 /* turn off DMA, force end */
2128 spider_net_disable_rxdmac(card);
2129
2130 /* release chains */
9cc7bf7e 2131 spider_net_release_tx_chain(card, 1);
d4ed8f8d 2132 spider_net_free_rx_chain_contents(card);
bdd01503
JO
2133
2134 spider_net_free_chain(card, &card->tx_chain);
2135 spider_net_free_chain(card, &card->rx_chain);
2136
2137 return 0;
2138}
2139
aaec0fab
JO
2140/**
2141 * spider_net_tx_timeout_task - task scheduled by the watchdog timeout
2142 * function (to be called not under interrupt status)
2143 * @data: data, is interface device structure
2144 *
2145 * called as task when tx hangs, resets interface (if interface is up)
2146 */
2147static void
c4028958 2148spider_net_tx_timeout_task(struct work_struct *work)
aaec0fab 2149{
c4028958
DH
2150 struct spider_net_card *card =
2151 container_of(work, struct spider_net_card, tx_timeout_task);
2152 struct net_device *netdev = card->netdev;
aaec0fab
JO
2153
2154 if (!(netdev->flags & IFF_UP))
2155 goto out;
2156
2157 netif_device_detach(netdev);
2158 spider_net_stop(netdev);
2159
2160 spider_net_workaround_rxramfull(card);
2161 spider_net_init_card(card);
2162
2163 if (spider_net_setup_phy(card))
2164 goto out;
aaec0fab
JO
2165
2166 spider_net_open(netdev);
bdd01503 2167 spider_net_kick_tx_dma(card);
aaec0fab
JO
2168 netif_device_attach(netdev);
2169
2170out:
2171 atomic_dec(&card->tx_timeout_task_counter);
2172}
2173
2174/**
2175 * spider_net_tx_timeout - called when the tx timeout watchdog kicks in.
2176 * @netdev: interface device structure
2177 *
2178 * called, if tx hangs. Schedules a task that resets the interface
2179 */
2180static void
2181spider_net_tx_timeout(struct net_device *netdev)
2182{
2183 struct spider_net_card *card;
2184
2185 card = netdev_priv(netdev);
2186 atomic_inc(&card->tx_timeout_task_counter);
2187 if (netdev->flags & IFF_UP)
2188 schedule_work(&card->tx_timeout_task);
2189 else
2190 atomic_dec(&card->tx_timeout_task_counter);
9b6b0b81 2191 card->spider_stats.tx_timeouts++;
aaec0fab
JO
2192}
2193
2194/**
2195 * spider_net_setup_netdev_ops - initialization of net_device operations
2196 * @netdev: net_device structure
2197 *
2198 * fills out function pointers in the net_device structure
2199 */
2200static void
2201spider_net_setup_netdev_ops(struct net_device *netdev)
2202{
2203 netdev->open = &spider_net_open;
2204 netdev->stop = &spider_net_stop;
2205 netdev->hard_start_xmit = &spider_net_xmit;
2206 netdev->get_stats = &spider_net_get_stats;
2207 netdev->set_multicast_list = &spider_net_set_multi;
2208 netdev->set_mac_address = &spider_net_set_mac;
2209 netdev->change_mtu = &spider_net_change_mtu;
2210 netdev->do_ioctl = &spider_net_do_ioctl;
2211 /* tx watchdog */
2212 netdev->tx_timeout = &spider_net_tx_timeout;
2213 netdev->watchdog_timeo = SPIDER_NET_WATCHDOG_TIMEOUT;
2214 /* NAPI */
2215 netdev->poll = &spider_net_poll;
2216 netdev->weight = SPIDER_NET_NAPI_WEIGHT;
2217 /* HW VLAN */
aaec0fab
JO
2218#ifdef CONFIG_NET_POLL_CONTROLLER
2219 /* poll controller */
2220 netdev->poll_controller = &spider_net_poll_controller;
2221#endif /* CONFIG_NET_POLL_CONTROLLER */
2222 /* ethtool ops */
2223 netdev->ethtool_ops = &spider_net_ethtool_ops;
2224}
2225
2226/**
2227 * spider_net_setup_netdev - initialization of net_device
2228 * @card: card structure
2229 *
2230 * Returns 0 on success or <0 on failure
2231 *
2232 * spider_net_setup_netdev initializes the net_device structure
2233 **/
2234static int
2235spider_net_setup_netdev(struct spider_net_card *card)
2236{
2237 int result;
2238 struct net_device *netdev = card->netdev;
2239 struct device_node *dn;
2240 struct sockaddr addr;
1a2509c9 2241 const u8 *mac;
aaec0fab
JO
2242
2243 SET_MODULE_OWNER(netdev);
2244 SET_NETDEV_DEV(netdev, &card->pdev->dev);
2245
2246 pci_set_drvdata(card->pdev, netdev);
11f1a52b 2247
11f1a52b
AB
2248 init_timer(&card->tx_timer);
2249 card->tx_timer.function =
2250 (void (*)(unsigned long)) spider_net_cleanup_tx_ring;
2251 card->tx_timer.data = (unsigned long) card;
aaec0fab
JO
2252 netdev->irq = card->pdev->irq;
2253
abdb66b5
KI
2254 card->aneg_count = 0;
2255 init_timer(&card->aneg_timer);
2256 card->aneg_timer.function = spider_net_link_phy;
2257 card->aneg_timer.data = (unsigned long) card;
2258
aaec0fab
JO
2259 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
2260
2261 spider_net_setup_netdev_ops(netdev);
2262
3a2c892d 2263 netdev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX;
aaec0fab
JO
2264 /* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
2265 * NETIF_F_HW_VLAN_FILTER */
2266
2267 netdev->irq = card->pdev->irq;
4c4bd5a9 2268 card->num_rx_ints = 0;
c3d1182a 2269 card->ignore_rx_ramfull = 0;
aaec0fab
JO
2270
2271 dn = pci_device_to_OF_node(card->pdev);
543cec51
JO
2272 if (!dn)
2273 return -EIO;
2274
40cd3a45 2275 mac = of_get_property(dn, "local-mac-address", NULL);
543cec51
JO
2276 if (!mac)
2277 return -EIO;
aaec0fab
JO
2278 memcpy(addr.sa_data, mac, ETH_ALEN);
2279
2280 result = spider_net_set_mac(netdev, &addr);
2281 if ((result) && (netif_msg_probe(card)))
2282 pr_err("Failed to set MAC address: %i\n", result);
2283
2284 result = register_netdev(netdev);
2285 if (result) {
2286 if (netif_msg_probe(card))
2287 pr_err("Couldn't register net_device: %i\n",
2288 result);
2289 return result;
2290 }
2291
2292 if (netif_msg_probe(card))
2293 pr_info("Initialized device %s.\n", netdev->name);
2294
2295 return 0;
2296}
2297
2298/**
2299 * spider_net_alloc_card - allocates net_device and card structure
2300 *
2301 * returns the card structure or NULL in case of errors
2302 *
2303 * the card and net_device structures are linked to each other
2304 */
2305static struct spider_net_card *
2306spider_net_alloc_card(void)
2307{
2308 struct net_device *netdev;
2309 struct spider_net_card *card;
4cb6f9e5 2310 size_t alloc_size;
aaec0fab 2311
4cb6f9e5
LV
2312 alloc_size = sizeof(struct spider_net_card) +
2313 (tx_descriptors + rx_descriptors) * sizeof(struct spider_net_descr);
2314 netdev = alloc_etherdev(alloc_size);
aaec0fab
JO
2315 if (!netdev)
2316 return NULL;
2317
2318 card = netdev_priv(netdev);
2319 card->netdev = netdev;
2320 card->msg_enable = SPIDER_NET_DEFAULT_MSG;
c4028958 2321 INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task);
aaec0fab
JO
2322 init_waitqueue_head(&card->waitq);
2323 atomic_set(&card->tx_timeout_task_counter, 0);
2324
4cb6f9e5
LV
2325 card->rx_chain.num_desc = rx_descriptors;
2326 card->rx_chain.ring = card->darray;
2327 card->tx_chain.num_desc = tx_descriptors;
2328 card->tx_chain.ring = card->darray + rx_descriptors;
2329
aaec0fab
JO
2330 return card;
2331}
2332
2333/**
2334 * spider_net_undo_pci_setup - releases PCI ressources
2335 * @card: card structure
2336 *
2337 * spider_net_undo_pci_setup releases the mapped regions
2338 */
2339static void
2340spider_net_undo_pci_setup(struct spider_net_card *card)
2341{
2342 iounmap(card->regs);
2343 pci_release_regions(card->pdev);
2344}
2345
2346/**
2347 * spider_net_setup_pci_dev - sets up the device in terms of PCI operations
2348 * @card: card structure
2349 * @pdev: PCI device
2350 *
2351 * Returns the card structure or NULL if any errors occur
2352 *
2353 * spider_net_setup_pci_dev initializes pdev and together with the
2354 * functions called in spider_net_open configures the device so that
2355 * data can be transferred over it
2356 * The net_device structure is attached to the card structure, if the
2357 * function returns without error.
2358 **/
2359static struct spider_net_card *
2360spider_net_setup_pci_dev(struct pci_dev *pdev)
2361{
2362 struct spider_net_card *card;
2363 unsigned long mmio_start, mmio_len;
2364
2365 if (pci_enable_device(pdev)) {
2366 pr_err("Couldn't enable PCI device\n");
2367 return NULL;
2368 }
2369
2370 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2371 pr_err("Couldn't find proper PCI device base address.\n");
2372 goto out_disable_dev;
2373 }
2374
2375 if (pci_request_regions(pdev, spider_net_driver_name)) {
2376 pr_err("Couldn't obtain PCI resources, aborting.\n");
2377 goto out_disable_dev;
2378 }
2379
2380 pci_set_master(pdev);
2381
2382 card = spider_net_alloc_card();
2383 if (!card) {
2384 pr_err("Couldn't allocate net_device structure, "
2385 "aborting.\n");
2386 goto out_release_regions;
2387 }
2388 card->pdev = pdev;
2389
2390 /* fetch base address and length of first resource */
2391 mmio_start = pci_resource_start(pdev, 0);
2392 mmio_len = pci_resource_len(pdev, 0);
2393
2394 card->netdev->mem_start = mmio_start;
2395 card->netdev->mem_end = mmio_start + mmio_len;
2396 card->regs = ioremap(mmio_start, mmio_len);
2397
2398 if (!card->regs) {
2399 pr_err("Couldn't obtain PCI resources, aborting.\n");
2400 goto out_release_regions;
2401 }
2402
2403 return card;
2404
2405out_release_regions:
2406 pci_release_regions(pdev);
2407out_disable_dev:
2408 pci_disable_device(pdev);
2409 pci_set_drvdata(pdev, NULL);
2410 return NULL;
2411}
2412
2413/**
2414 * spider_net_probe - initialization of a device
2415 * @pdev: PCI device
2416 * @ent: entry in the device id list
2417 *
2418 * Returns 0 on success, <0 on failure
2419 *
2420 * spider_net_probe initializes pdev and registers a net_device
2421 * structure for it. After that, the device can be ifconfig'ed up
2422 **/
2423static int __devinit
2424spider_net_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2425{
2426 int err = -EIO;
2427 struct spider_net_card *card;
2428
2429 card = spider_net_setup_pci_dev(pdev);
2430 if (!card)
2431 goto out;
2432
2433 spider_net_workaround_rxramfull(card);
2434 spider_net_init_card(card);
2435
2436 err = spider_net_setup_phy(card);
2437 if (err)
2438 goto out_undo_pci;
2439
aaec0fab
JO
2440 err = spider_net_setup_netdev(card);
2441 if (err)
2442 goto out_undo_pci;
2443
2444 return 0;
2445
2446out_undo_pci:
2447 spider_net_undo_pci_setup(card);
2448 free_netdev(card->netdev);
2449out:
2450 return err;
2451}
2452
2453/**
2454 * spider_net_remove - removal of a device
2455 * @pdev: PCI device
2456 *
2457 * Returns 0 on success, <0 on failure
2458 *
2459 * spider_net_remove is called to remove the device and unregisters the
2460 * net_device
2461 **/
2462static void __devexit
2463spider_net_remove(struct pci_dev *pdev)
2464{
2465 struct net_device *netdev;
2466 struct spider_net_card *card;
2467
2468 netdev = pci_get_drvdata(pdev);
2469 card = netdev_priv(netdev);
2470
2471 wait_event(card->waitq,
2472 atomic_read(&card->tx_timeout_task_counter) == 0);
2473
2474 unregister_netdev(netdev);
543cec51
JO
2475
2476 /* switch off card */
2477 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
2478 SPIDER_NET_CKRCTRL_STOP_VALUE);
2479 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
2480 SPIDER_NET_CKRCTRL_RUN_VALUE);
2481
aaec0fab
JO
2482 spider_net_undo_pci_setup(card);
2483 free_netdev(netdev);
aaec0fab
JO
2484}
2485
2486static struct pci_driver spider_net_driver = {
aaec0fab
JO
2487 .name = spider_net_driver_name,
2488 .id_table = spider_net_pci_tbl,
2489 .probe = spider_net_probe,
2490 .remove = __devexit_p(spider_net_remove)
2491};
2492
2493/**
2494 * spider_net_init - init function when the driver is loaded
2495 *
2496 * spider_net_init registers the device driver
2497 */
2498static int __init spider_net_init(void)
2499{
90f10841
LV
2500 printk(KERN_INFO "Spidernet version %s.\n", VERSION);
2501
aaec0fab
JO
2502 if (rx_descriptors < SPIDER_NET_RX_DESCRIPTORS_MIN) {
2503 rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MIN;
2504 pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
2505 }
2506 if (rx_descriptors > SPIDER_NET_RX_DESCRIPTORS_MAX) {
2507 rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MAX;
2508 pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
2509 }
2510 if (tx_descriptors < SPIDER_NET_TX_DESCRIPTORS_MIN) {
2511 tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MIN;
2512 pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
2513 }
2514 if (tx_descriptors > SPIDER_NET_TX_DESCRIPTORS_MAX) {
2515 tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MAX;
2516 pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
2517 }
2518
2519 return pci_register_driver(&spider_net_driver);
2520}
2521
2522/**
2523 * spider_net_cleanup - exit function when driver is unloaded
2524 *
2525 * spider_net_cleanup unregisters the device driver
2526 */
2527static void __exit spider_net_cleanup(void)
2528{
2529 pci_unregister_driver(&spider_net_driver);
2530}
2531
2532module_init(spider_net_init);
2533module_exit(spider_net_cleanup);