]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/b44.c
[PATCH] irq-flags: misc drivers: Use the new IRQF_ constants
[net-next-2.6.git] / drivers / net / b44.c
CommitLineData
1da177e4
LT
1/* b44.c: Broadcom 4400 device driver.
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
8056bfaf 5 * Copyright (C) 2006 Broadcom Corporation.
1da177e4
LT
6 *
7 * Distribute under GPL.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/types.h>
14#include <linux/netdevice.h>
15#include <linux/ethtool.h>
16#include <linux/mii.h>
17#include <linux/if_ether.h>
18#include <linux/etherdevice.h>
19#include <linux/pci.h>
20#include <linux/delay.h>
21#include <linux/init.h>
89358f90 22#include <linux/dma-mapping.h>
1da177e4
LT
23
24#include <asm/uaccess.h>
25#include <asm/io.h>
26#include <asm/irq.h>
27
28#include "b44.h"
29
30#define DRV_MODULE_NAME "b44"
31#define PFX DRV_MODULE_NAME ": "
4d1dabdb
GZ
32#define DRV_MODULE_VERSION "1.01"
33#define DRV_MODULE_RELDATE "Jun 16, 2006"
1da177e4
LT
34
35#define B44_DEF_MSG_ENABLE \
36 (NETIF_MSG_DRV | \
37 NETIF_MSG_PROBE | \
38 NETIF_MSG_LINK | \
39 NETIF_MSG_TIMER | \
40 NETIF_MSG_IFDOWN | \
41 NETIF_MSG_IFUP | \
42 NETIF_MSG_RX_ERR | \
43 NETIF_MSG_TX_ERR)
44
45/* length of time before we decide the hardware is borked,
46 * and dev->tx_timeout() should be called to fix the problem
47 */
48#define B44_TX_TIMEOUT (5 * HZ)
49
50/* hardware minimum and maximum for a single frame's data payload */
51#define B44_MIN_MTU 60
52#define B44_MAX_MTU 1500
53
54#define B44_RX_RING_SIZE 512
55#define B44_DEF_RX_RING_PENDING 200
56#define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
57 B44_RX_RING_SIZE)
58#define B44_TX_RING_SIZE 512
59#define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
60#define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
61 B44_TX_RING_SIZE)
62#define B44_DMA_MASK 0x3fffffff
63
64#define TX_RING_GAP(BP) \
65 (B44_TX_RING_SIZE - (BP)->tx_pending)
66#define TX_BUFFS_AVAIL(BP) \
67 (((BP)->tx_cons <= (BP)->tx_prod) ? \
68 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
69 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
70#define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
71
72#define RX_PKT_BUF_SZ (1536 + bp->rx_offset + 64)
73#define TX_PKT_BUF_SZ (B44_MAX_MTU + ETH_HLEN + 8)
74
75/* minimum number of free TX descriptors required to wake up TX process */
76#define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
77
725ad800
GZ
78/* b44 internal pattern match filter info */
79#define B44_PATTERN_BASE 0x400
80#define B44_PATTERN_SIZE 0x80
81#define B44_PMASK_BASE 0x600
82#define B44_PMASK_SIZE 0x10
83#define B44_MAX_PATTERNS 16
84#define B44_ETHIPV6UDP_HLEN 62
85#define B44_ETHIPV4UDP_HLEN 42
86
1da177e4
LT
87static char version[] __devinitdata =
88 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
89
90MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
91MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
92MODULE_LICENSE("GPL");
93MODULE_VERSION(DRV_MODULE_VERSION);
94
95static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
96module_param(b44_debug, int, 0);
97MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
98
99static struct pci_device_id b44_pci_tbl[] = {
100 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
101 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
102 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
103 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
105 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
106 { } /* terminate list with empty entry */
107};
108
109MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
110
111static void b44_halt(struct b44 *);
112static void b44_init_rings(struct b44 *);
00e8b3aa 113static void b44_init_hw(struct b44 *, int);
1da177e4 114
9f38c636
JL
115static int dma_desc_align_mask;
116static int dma_desc_sync_size;
117
3353930d
FR
118static const char b44_gstrings[][ETH_GSTRING_LEN] = {
119#define _B44(x...) # x,
120B44_STAT_REG_DECLARE
121#undef _B44
122};
123
9f38c636
JL
124static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
125 dma_addr_t dma_base,
126 unsigned long offset,
127 enum dma_data_direction dir)
128{
129 dma_sync_single_range_for_device(&pdev->dev, dma_base,
130 offset & dma_desc_align_mask,
131 dma_desc_sync_size, dir);
132}
133
134static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
135 dma_addr_t dma_base,
136 unsigned long offset,
137 enum dma_data_direction dir)
138{
139 dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
140 offset & dma_desc_align_mask,
141 dma_desc_sync_size, dir);
142}
143
1da177e4
LT
144static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
145{
146 return readl(bp->regs + reg);
147}
148
10badc21 149static inline void bw32(const struct b44 *bp,
1da177e4
LT
150 unsigned long reg, unsigned long val)
151{
152 writel(val, bp->regs + reg);
153}
154
155static int b44_wait_bit(struct b44 *bp, unsigned long reg,
156 u32 bit, unsigned long timeout, const int clear)
157{
158 unsigned long i;
159
160 for (i = 0; i < timeout; i++) {
161 u32 val = br32(bp, reg);
162
163 if (clear && !(val & bit))
164 break;
165 if (!clear && (val & bit))
166 break;
167 udelay(10);
168 }
169 if (i == timeout) {
170 printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
171 "%lx to %s.\n",
172 bp->dev->name,
173 bit, reg,
174 (clear ? "clear" : "set"));
175 return -ENODEV;
176 }
177 return 0;
178}
179
180/* Sonics SiliconBackplane support routines. ROFL, you should see all the
181 * buzz words used on this company's website :-)
182 *
183 * All of these routines must be invoked with bp->lock held and
184 * interrupts disabled.
185 */
186
187#define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */
188#define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */
189
190static u32 ssb_get_core_rev(struct b44 *bp)
191{
192 return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
193}
194
195static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
196{
197 u32 bar_orig, pci_rev, val;
198
199 pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
200 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
201 pci_rev = ssb_get_core_rev(bp);
202
203 val = br32(bp, B44_SBINTVEC);
204 val |= cores;
205 bw32(bp, B44_SBINTVEC, val);
206
207 val = br32(bp, SSB_PCI_TRANS_2);
208 val |= SSB_PCI_PREF | SSB_PCI_BURST;
209 bw32(bp, SSB_PCI_TRANS_2, val);
210
211 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
212
213 return pci_rev;
214}
215
216static void ssb_core_disable(struct b44 *bp)
217{
218 if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
219 return;
220
221 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
222 b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
223 b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
224 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
225 SBTMSLOW_REJECT | SBTMSLOW_RESET));
226 br32(bp, B44_SBTMSLOW);
227 udelay(1);
228 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
229 br32(bp, B44_SBTMSLOW);
230 udelay(1);
231}
232
233static void ssb_core_reset(struct b44 *bp)
234{
235 u32 val;
236
237 ssb_core_disable(bp);
238 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
239 br32(bp, B44_SBTMSLOW);
240 udelay(1);
241
242 /* Clear SERR if set, this is a hw bug workaround. */
243 if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
244 bw32(bp, B44_SBTMSHIGH, 0);
245
246 val = br32(bp, B44_SBIMSTATE);
247 if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
248 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
249
250 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
251 br32(bp, B44_SBTMSLOW);
252 udelay(1);
253
254 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
255 br32(bp, B44_SBTMSLOW);
256 udelay(1);
257}
258
259static int ssb_core_unit(struct b44 *bp)
260{
261#if 0
262 u32 val = br32(bp, B44_SBADMATCH0);
263 u32 base;
264
265 type = val & SBADMATCH0_TYPE_MASK;
266 switch (type) {
267 case 0:
268 base = val & SBADMATCH0_BS0_MASK;
269 break;
270
271 case 1:
272 base = val & SBADMATCH0_BS1_MASK;
273 break;
274
275 case 2:
276 default:
277 base = val & SBADMATCH0_BS2_MASK;
278 break;
279 };
280#endif
281 return 0;
282}
283
284static int ssb_is_core_up(struct b44 *bp)
285{
286 return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
287 == SBTMSLOW_CLOCK);
288}
289
290static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
291{
292 u32 val;
293
294 val = ((u32) data[2]) << 24;
295 val |= ((u32) data[3]) << 16;
296 val |= ((u32) data[4]) << 8;
297 val |= ((u32) data[5]) << 0;
298 bw32(bp, B44_CAM_DATA_LO, val);
10badc21 299 val = (CAM_DATA_HI_VALID |
1da177e4
LT
300 (((u32) data[0]) << 8) |
301 (((u32) data[1]) << 0));
302 bw32(bp, B44_CAM_DATA_HI, val);
303 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
304 (index << CAM_CTRL_INDEX_SHIFT)));
10badc21 305 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
1da177e4
LT
306}
307
308static inline void __b44_disable_ints(struct b44 *bp)
309{
310 bw32(bp, B44_IMASK, 0);
311}
312
313static void b44_disable_ints(struct b44 *bp)
314{
315 __b44_disable_ints(bp);
316
317 /* Flush posted writes. */
318 br32(bp, B44_IMASK);
319}
320
321static void b44_enable_ints(struct b44 *bp)
322{
323 bw32(bp, B44_IMASK, bp->imask);
324}
325
326static int b44_readphy(struct b44 *bp, int reg, u32 *val)
327{
328 int err;
329
330 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
331 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
332 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
333 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
334 (reg << MDIO_DATA_RA_SHIFT) |
335 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
336 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
337 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
338
339 return err;
340}
341
342static int b44_writephy(struct b44 *bp, int reg, u32 val)
343{
344 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
345 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
346 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
347 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
348 (reg << MDIO_DATA_RA_SHIFT) |
349 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
350 (val & MDIO_DATA_DATA)));
351 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
352}
353
354/* miilib interface */
355/* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
356 * due to code existing before miilib use was added to this driver.
357 * Someone should remove this artificial driver limitation in
358 * b44_{read,write}phy. bp->phy_addr itself is fine (and needed).
359 */
360static int b44_mii_read(struct net_device *dev, int phy_id, int location)
361{
362 u32 val;
363 struct b44 *bp = netdev_priv(dev);
364 int rc = b44_readphy(bp, location, &val);
365 if (rc)
366 return 0xffffffff;
367 return val;
368}
369
370static void b44_mii_write(struct net_device *dev, int phy_id, int location,
371 int val)
372{
373 struct b44 *bp = netdev_priv(dev);
374 b44_writephy(bp, location, val);
375}
376
377static int b44_phy_reset(struct b44 *bp)
378{
379 u32 val;
380 int err;
381
382 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
383 if (err)
384 return err;
385 udelay(100);
386 err = b44_readphy(bp, MII_BMCR, &val);
387 if (!err) {
388 if (val & BMCR_RESET) {
389 printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
390 bp->dev->name);
391 err = -ENODEV;
392 }
393 }
394
395 return 0;
396}
397
398static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
399{
400 u32 val;
401
402 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
403 bp->flags |= pause_flags;
404
405 val = br32(bp, B44_RXCONFIG);
406 if (pause_flags & B44_FLAG_RX_PAUSE)
407 val |= RXCONFIG_FLOW;
408 else
409 val &= ~RXCONFIG_FLOW;
410 bw32(bp, B44_RXCONFIG, val);
411
412 val = br32(bp, B44_MAC_FLOW);
413 if (pause_flags & B44_FLAG_TX_PAUSE)
414 val |= (MAC_FLOW_PAUSE_ENAB |
415 (0xc0 & MAC_FLOW_RX_HI_WATER));
416 else
417 val &= ~MAC_FLOW_PAUSE_ENAB;
418 bw32(bp, B44_MAC_FLOW, val);
419}
420
421static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
422{
10badc21 423 u32 pause_enab = 0;
2b474cf5
GZ
424
425 /* The driver supports only rx pause by default because
10badc21
JG
426 the b44 mac tx pause mechanism generates excessive
427 pause frames.
2b474cf5
GZ
428 Use ethtool to turn on b44 tx pause if necessary.
429 */
430 if ((local & ADVERTISE_PAUSE_CAP) &&
10badc21 431 (local & ADVERTISE_PAUSE_ASYM)){
2b474cf5
GZ
432 if ((remote & LPA_PAUSE_ASYM) &&
433 !(remote & LPA_PAUSE_CAP))
434 pause_enab |= B44_FLAG_RX_PAUSE;
1da177e4
LT
435 }
436
437 __b44_set_flow_ctrl(bp, pause_enab);
438}
439
440static int b44_setup_phy(struct b44 *bp)
441{
442 u32 val;
443 int err;
444
445 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
446 goto out;
447 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
448 val & MII_ALEDCTRL_ALLMSK)) != 0)
449 goto out;
450 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
451 goto out;
452 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
453 val | MII_TLEDCTRL_ENABLE)) != 0)
454 goto out;
455
456 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
457 u32 adv = ADVERTISE_CSMA;
458
459 if (bp->flags & B44_FLAG_ADV_10HALF)
460 adv |= ADVERTISE_10HALF;
461 if (bp->flags & B44_FLAG_ADV_10FULL)
462 adv |= ADVERTISE_10FULL;
463 if (bp->flags & B44_FLAG_ADV_100HALF)
464 adv |= ADVERTISE_100HALF;
465 if (bp->flags & B44_FLAG_ADV_100FULL)
466 adv |= ADVERTISE_100FULL;
467
468 if (bp->flags & B44_FLAG_PAUSE_AUTO)
469 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
470
471 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
472 goto out;
473 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
474 BMCR_ANRESTART))) != 0)
475 goto out;
476 } else {
477 u32 bmcr;
478
479 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
480 goto out;
481 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
482 if (bp->flags & B44_FLAG_100_BASE_T)
483 bmcr |= BMCR_SPEED100;
484 if (bp->flags & B44_FLAG_FULL_DUPLEX)
485 bmcr |= BMCR_FULLDPLX;
486 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
487 goto out;
488
489 /* Since we will not be negotiating there is no safe way
490 * to determine if the link partner supports flow control
491 * or not. So just disable it completely in this case.
492 */
493 b44_set_flow_ctrl(bp, 0, 0);
494 }
495
496out:
497 return err;
498}
499
500static void b44_stats_update(struct b44 *bp)
501{
502 unsigned long reg;
503 u32 *val;
504
505 val = &bp->hw_stats.tx_good_octets;
506 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
507 *val++ += br32(bp, reg);
508 }
3353930d
FR
509
510 /* Pad */
511 reg += 8*4UL;
512
1da177e4
LT
513 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
514 *val++ += br32(bp, reg);
515 }
516}
517
518static void b44_link_report(struct b44 *bp)
519{
520 if (!netif_carrier_ok(bp->dev)) {
521 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
522 } else {
523 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
524 bp->dev->name,
525 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
526 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
527
528 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
529 "%s for RX.\n",
530 bp->dev->name,
531 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
532 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
533 }
534}
535
536static void b44_check_phy(struct b44 *bp)
537{
538 u32 bmsr, aux;
539
540 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
541 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
542 (bmsr != 0xffff)) {
543 if (aux & MII_AUXCTRL_SPEED)
544 bp->flags |= B44_FLAG_100_BASE_T;
545 else
546 bp->flags &= ~B44_FLAG_100_BASE_T;
547 if (aux & MII_AUXCTRL_DUPLEX)
548 bp->flags |= B44_FLAG_FULL_DUPLEX;
549 else
550 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
551
552 if (!netif_carrier_ok(bp->dev) &&
553 (bmsr & BMSR_LSTATUS)) {
554 u32 val = br32(bp, B44_TX_CTRL);
555 u32 local_adv, remote_adv;
556
557 if (bp->flags & B44_FLAG_FULL_DUPLEX)
558 val |= TX_CTRL_DUPLEX;
559 else
560 val &= ~TX_CTRL_DUPLEX;
561 bw32(bp, B44_TX_CTRL, val);
562
563 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
564 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
565 !b44_readphy(bp, MII_LPA, &remote_adv))
566 b44_set_flow_ctrl(bp, local_adv, remote_adv);
567
568 /* Link now up */
569 netif_carrier_on(bp->dev);
570 b44_link_report(bp);
571 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
572 /* Link now down */
573 netif_carrier_off(bp->dev);
574 b44_link_report(bp);
575 }
576
577 if (bmsr & BMSR_RFAULT)
578 printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
579 bp->dev->name);
580 if (bmsr & BMSR_JCD)
581 printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
582 bp->dev->name);
583 }
584}
585
586static void b44_timer(unsigned long __opaque)
587{
588 struct b44 *bp = (struct b44 *) __opaque;
589
590 spin_lock_irq(&bp->lock);
591
592 b44_check_phy(bp);
593
594 b44_stats_update(bp);
595
596 spin_unlock_irq(&bp->lock);
597
598 bp->timer.expires = jiffies + HZ;
599 add_timer(&bp->timer);
600}
601
602static void b44_tx(struct b44 *bp)
603{
604 u32 cur, cons;
605
606 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
607 cur /= sizeof(struct dma_desc);
608
609 /* XXX needs updating when NETIF_F_SG is supported */
610 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
611 struct ring_info *rp = &bp->tx_buffers[cons];
612 struct sk_buff *skb = rp->skb;
613
5d9428de 614 BUG_ON(skb == NULL);
1da177e4
LT
615
616 pci_unmap_single(bp->pdev,
617 pci_unmap_addr(rp, mapping),
618 skb->len,
619 PCI_DMA_TODEVICE);
620 rp->skb = NULL;
621 dev_kfree_skb_irq(skb);
622 }
623
624 bp->tx_cons = cons;
625 if (netif_queue_stopped(bp->dev) &&
626 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
627 netif_wake_queue(bp->dev);
628
629 bw32(bp, B44_GPTIMER, 0);
630}
631
632/* Works like this. This chip writes a 'struct rx_header" 30 bytes
633 * before the DMA address you give it. So we allocate 30 more bytes
634 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
635 * point the chip at 30 bytes past where the rx_header will go.
636 */
637static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
638{
639 struct dma_desc *dp;
640 struct ring_info *src_map, *map;
641 struct rx_header *rh;
642 struct sk_buff *skb;
643 dma_addr_t mapping;
644 int dest_idx;
645 u32 ctrl;
646
647 src_map = NULL;
648 if (src_idx >= 0)
649 src_map = &bp->rx_buffers[src_idx];
650 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
651 map = &bp->rx_buffers[dest_idx];
652 skb = dev_alloc_skb(RX_PKT_BUF_SZ);
653 if (skb == NULL)
654 return -ENOMEM;
655
656 mapping = pci_map_single(bp->pdev, skb->data,
657 RX_PKT_BUF_SZ,
658 PCI_DMA_FROMDEVICE);
659
660 /* Hardware bug work-around, the chip is unable to do PCI DMA
661 to/from anything above 1GB :-( */
639b421b
AK
662 if (dma_mapping_error(mapping) ||
663 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
1da177e4 664 /* Sigh... */
639b421b
AK
665 if (!dma_mapping_error(mapping))
666 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
1da177e4
LT
667 dev_kfree_skb_any(skb);
668 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
669 if (skb == NULL)
670 return -ENOMEM;
671 mapping = pci_map_single(bp->pdev, skb->data,
672 RX_PKT_BUF_SZ,
673 PCI_DMA_FROMDEVICE);
639b421b
AK
674 if (dma_mapping_error(mapping) ||
675 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
676 if (!dma_mapping_error(mapping))
677 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
1da177e4
LT
678 dev_kfree_skb_any(skb);
679 return -ENOMEM;
680 }
681 }
682
683 skb->dev = bp->dev;
684 skb_reserve(skb, bp->rx_offset);
685
686 rh = (struct rx_header *)
687 (skb->data - bp->rx_offset);
688 rh->len = 0;
689 rh->flags = 0;
690
691 map->skb = skb;
692 pci_unmap_addr_set(map, mapping, mapping);
693
694 if (src_map != NULL)
695 src_map->skb = NULL;
696
697 ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
698 if (dest_idx == (B44_RX_RING_SIZE - 1))
699 ctrl |= DESC_CTRL_EOT;
700
701 dp = &bp->rx_ring[dest_idx];
702 dp->ctrl = cpu_to_le32(ctrl);
703 dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
704
9f38c636
JL
705 if (bp->flags & B44_FLAG_RX_RING_HACK)
706 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
707 dest_idx * sizeof(dp),
708 DMA_BIDIRECTIONAL);
709
1da177e4
LT
710 return RX_PKT_BUF_SZ;
711}
712
713static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
714{
715 struct dma_desc *src_desc, *dest_desc;
716 struct ring_info *src_map, *dest_map;
717 struct rx_header *rh;
718 int dest_idx;
719 u32 ctrl;
720
721 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
722 dest_desc = &bp->rx_ring[dest_idx];
723 dest_map = &bp->rx_buffers[dest_idx];
724 src_desc = &bp->rx_ring[src_idx];
725 src_map = &bp->rx_buffers[src_idx];
726
727 dest_map->skb = src_map->skb;
728 rh = (struct rx_header *) src_map->skb->data;
729 rh->len = 0;
730 rh->flags = 0;
731 pci_unmap_addr_set(dest_map, mapping,
732 pci_unmap_addr(src_map, mapping));
733
9f38c636
JL
734 if (bp->flags & B44_FLAG_RX_RING_HACK)
735 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
736 src_idx * sizeof(src_desc),
737 DMA_BIDIRECTIONAL);
738
1da177e4
LT
739 ctrl = src_desc->ctrl;
740 if (dest_idx == (B44_RX_RING_SIZE - 1))
741 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
742 else
743 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
744
745 dest_desc->ctrl = ctrl;
746 dest_desc->addr = src_desc->addr;
9f38c636 747
1da177e4
LT
748 src_map->skb = NULL;
749
9f38c636
JL
750 if (bp->flags & B44_FLAG_RX_RING_HACK)
751 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
752 dest_idx * sizeof(dest_desc),
753 DMA_BIDIRECTIONAL);
754
1da177e4
LT
755 pci_dma_sync_single_for_device(bp->pdev, src_desc->addr,
756 RX_PKT_BUF_SZ,
757 PCI_DMA_FROMDEVICE);
758}
759
760static int b44_rx(struct b44 *bp, int budget)
761{
762 int received;
763 u32 cons, prod;
764
765 received = 0;
766 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
767 prod /= sizeof(struct dma_desc);
768 cons = bp->rx_cons;
769
770 while (cons != prod && budget > 0) {
771 struct ring_info *rp = &bp->rx_buffers[cons];
772 struct sk_buff *skb = rp->skb;
773 dma_addr_t map = pci_unmap_addr(rp, mapping);
774 struct rx_header *rh;
775 u16 len;
776
777 pci_dma_sync_single_for_cpu(bp->pdev, map,
778 RX_PKT_BUF_SZ,
779 PCI_DMA_FROMDEVICE);
780 rh = (struct rx_header *) skb->data;
781 len = cpu_to_le16(rh->len);
782 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
783 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
784 drop_it:
785 b44_recycle_rx(bp, cons, bp->rx_prod);
786 drop_it_no_recycle:
787 bp->stats.rx_dropped++;
788 goto next_pkt;
789 }
790
791 if (len == 0) {
792 int i = 0;
793
794 do {
795 udelay(2);
796 barrier();
797 len = cpu_to_le16(rh->len);
798 } while (len == 0 && i++ < 5);
799 if (len == 0)
800 goto drop_it;
801 }
802
803 /* Omit CRC. */
804 len -= 4;
805
806 if (len > RX_COPY_THRESHOLD) {
807 int skb_size;
808 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
809 if (skb_size < 0)
810 goto drop_it;
811 pci_unmap_single(bp->pdev, map,
812 skb_size, PCI_DMA_FROMDEVICE);
813 /* Leave out rx_header */
814 skb_put(skb, len+bp->rx_offset);
815 skb_pull(skb,bp->rx_offset);
816 } else {
817 struct sk_buff *copy_skb;
818
819 b44_recycle_rx(bp, cons, bp->rx_prod);
820 copy_skb = dev_alloc_skb(len + 2);
821 if (copy_skb == NULL)
822 goto drop_it_no_recycle;
823
824 copy_skb->dev = bp->dev;
825 skb_reserve(copy_skb, 2);
826 skb_put(copy_skb, len);
827 /* DMA sync done above, copy just the actual packet */
828 memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
829
830 skb = copy_skb;
831 }
832 skb->ip_summed = CHECKSUM_NONE;
833 skb->protocol = eth_type_trans(skb, bp->dev);
834 netif_receive_skb(skb);
835 bp->dev->last_rx = jiffies;
836 received++;
837 budget--;
838 next_pkt:
839 bp->rx_prod = (bp->rx_prod + 1) &
840 (B44_RX_RING_SIZE - 1);
841 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
842 }
843
844 bp->rx_cons = cons;
845 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
846
847 return received;
848}
849
850static int b44_poll(struct net_device *netdev, int *budget)
851{
852 struct b44 *bp = netdev_priv(netdev);
853 int done;
854
855 spin_lock_irq(&bp->lock);
856
857 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
858 /* spin_lock(&bp->tx_lock); */
859 b44_tx(bp);
860 /* spin_unlock(&bp->tx_lock); */
861 }
862 spin_unlock_irq(&bp->lock);
863
864 done = 1;
865 if (bp->istat & ISTAT_RX) {
866 int orig_budget = *budget;
867 int work_done;
868
869 if (orig_budget > netdev->quota)
870 orig_budget = netdev->quota;
871
872 work_done = b44_rx(bp, orig_budget);
873
874 *budget -= work_done;
875 netdev->quota -= work_done;
876
877 if (work_done >= orig_budget)
878 done = 0;
879 }
880
881 if (bp->istat & ISTAT_ERRORS) {
882 spin_lock_irq(&bp->lock);
883 b44_halt(bp);
884 b44_init_rings(bp);
00e8b3aa 885 b44_init_hw(bp, 1);
1da177e4
LT
886 netif_wake_queue(bp->dev);
887 spin_unlock_irq(&bp->lock);
888 done = 1;
889 }
890
891 if (done) {
892 netif_rx_complete(netdev);
893 b44_enable_ints(bp);
894 }
895
896 return (done ? 0 : 1);
897}
898
899static irqreturn_t b44_interrupt(int irq, void *dev_id, struct pt_regs *regs)
900{
901 struct net_device *dev = dev_id;
902 struct b44 *bp = netdev_priv(dev);
1da177e4
LT
903 u32 istat, imask;
904 int handled = 0;
905
65b984f2 906 spin_lock(&bp->lock);
1da177e4
LT
907
908 istat = br32(bp, B44_ISTAT);
909 imask = br32(bp, B44_IMASK);
910
911 /* ??? What the fuck is the purpose of the interrupt mask
912 * ??? register if we have to mask it out by hand anyways?
913 */
914 istat &= imask;
915 if (istat) {
916 handled = 1;
ba5eec9c
FR
917
918 if (unlikely(!netif_running(dev))) {
919 printk(KERN_INFO "%s: late interrupt.\n", dev->name);
920 goto irq_ack;
921 }
922
1da177e4
LT
923 if (netif_rx_schedule_prep(dev)) {
924 /* NOTE: These writes are posted by the readback of
925 * the ISTAT register below.
926 */
927 bp->istat = istat;
928 __b44_disable_ints(bp);
929 __netif_rx_schedule(dev);
930 } else {
931 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
932 dev->name);
933 }
934
ba5eec9c 935irq_ack:
1da177e4
LT
936 bw32(bp, B44_ISTAT, istat);
937 br32(bp, B44_ISTAT);
938 }
65b984f2 939 spin_unlock(&bp->lock);
1da177e4
LT
940 return IRQ_RETVAL(handled);
941}
942
943static void b44_tx_timeout(struct net_device *dev)
944{
945 struct b44 *bp = netdev_priv(dev);
946
947 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
948 dev->name);
949
950 spin_lock_irq(&bp->lock);
951
952 b44_halt(bp);
953 b44_init_rings(bp);
00e8b3aa 954 b44_init_hw(bp, 1);
1da177e4
LT
955
956 spin_unlock_irq(&bp->lock);
957
958 b44_enable_ints(bp);
959
960 netif_wake_queue(dev);
961}
962
963static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
964{
965 struct b44 *bp = netdev_priv(dev);
966 struct sk_buff *bounce_skb;
c7193693 967 int rc = NETDEV_TX_OK;
1da177e4
LT
968 dma_addr_t mapping;
969 u32 len, entry, ctrl;
970
971 len = skb->len;
972 spin_lock_irq(&bp->lock);
973
974 /* This is a hard error, log it. */
975 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
976 netif_stop_queue(dev);
1da177e4
LT
977 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
978 dev->name);
c7193693 979 goto err_out;
1da177e4
LT
980 }
981
982 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
639b421b 983 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
1da177e4 984 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
639b421b
AK
985 if (!dma_mapping_error(mapping))
986 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
1da177e4
LT
987
988 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
989 GFP_ATOMIC|GFP_DMA);
990 if (!bounce_skb)
c7193693 991 goto err_out;
1da177e4
LT
992
993 mapping = pci_map_single(bp->pdev, bounce_skb->data,
994 len, PCI_DMA_TODEVICE);
639b421b
AK
995 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
996 if (!dma_mapping_error(mapping))
997 pci_unmap_single(bp->pdev, mapping,
1da177e4
LT
998 len, PCI_DMA_TODEVICE);
999 dev_kfree_skb_any(bounce_skb);
c7193693 1000 goto err_out;
1da177e4
LT
1001 }
1002
1003 memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
1004 dev_kfree_skb_any(skb);
1005 skb = bounce_skb;
1006 }
1007
1008 entry = bp->tx_prod;
1009 bp->tx_buffers[entry].skb = skb;
1010 pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1011
1012 ctrl = (len & DESC_CTRL_LEN);
1013 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1014 if (entry == (B44_TX_RING_SIZE - 1))
1015 ctrl |= DESC_CTRL_EOT;
1016
1017 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1018 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1019
9f38c636
JL
1020 if (bp->flags & B44_FLAG_TX_RING_HACK)
1021 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1022 entry * sizeof(bp->tx_ring[0]),
1023 DMA_TO_DEVICE);
1024
1da177e4
LT
1025 entry = NEXT_TX(entry);
1026
1027 bp->tx_prod = entry;
1028
1029 wmb();
1030
1031 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1032 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1033 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1034 if (bp->flags & B44_FLAG_REORDER_BUG)
1035 br32(bp, B44_DMATX_PTR);
1036
1037 if (TX_BUFFS_AVAIL(bp) < 1)
1038 netif_stop_queue(dev);
1039
c7193693
FR
1040 dev->trans_start = jiffies;
1041
1042out_unlock:
1da177e4
LT
1043 spin_unlock_irq(&bp->lock);
1044
c7193693 1045 return rc;
1da177e4 1046
c7193693
FR
1047err_out:
1048 rc = NETDEV_TX_BUSY;
1049 goto out_unlock;
1da177e4
LT
1050}
1051
1052static int b44_change_mtu(struct net_device *dev, int new_mtu)
1053{
1054 struct b44 *bp = netdev_priv(dev);
1055
1056 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1057 return -EINVAL;
1058
1059 if (!netif_running(dev)) {
1060 /* We'll just catch it later when the
1061 * device is up'd.
1062 */
1063 dev->mtu = new_mtu;
1064 return 0;
1065 }
1066
1067 spin_lock_irq(&bp->lock);
1068 b44_halt(bp);
1069 dev->mtu = new_mtu;
1070 b44_init_rings(bp);
00e8b3aa 1071 b44_init_hw(bp, 1);
1da177e4
LT
1072 spin_unlock_irq(&bp->lock);
1073
1074 b44_enable_ints(bp);
10badc21 1075
1da177e4
LT
1076 return 0;
1077}
1078
1079/* Free up pending packets in all rx/tx rings.
1080 *
1081 * The chip has been shut down and the driver detached from
1082 * the networking, so no interrupts or new tx packets will
1083 * end up in the driver. bp->lock is not held and we are not
1084 * in an interrupt context and thus may sleep.
1085 */
1086static void b44_free_rings(struct b44 *bp)
1087{
1088 struct ring_info *rp;
1089 int i;
1090
1091 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1092 rp = &bp->rx_buffers[i];
1093
1094 if (rp->skb == NULL)
1095 continue;
1096 pci_unmap_single(bp->pdev,
1097 pci_unmap_addr(rp, mapping),
1098 RX_PKT_BUF_SZ,
1099 PCI_DMA_FROMDEVICE);
1100 dev_kfree_skb_any(rp->skb);
1101 rp->skb = NULL;
1102 }
1103
1104 /* XXX needs changes once NETIF_F_SG is set... */
1105 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1106 rp = &bp->tx_buffers[i];
1107
1108 if (rp->skb == NULL)
1109 continue;
1110 pci_unmap_single(bp->pdev,
1111 pci_unmap_addr(rp, mapping),
1112 rp->skb->len,
1113 PCI_DMA_TODEVICE);
1114 dev_kfree_skb_any(rp->skb);
1115 rp->skb = NULL;
1116 }
1117}
1118
1119/* Initialize tx/rx rings for packet processing.
1120 *
1121 * The chip has been shut down and the driver detached from
1122 * the networking, so no interrupts or new tx packets will
874a6214 1123 * end up in the driver.
1da177e4
LT
1124 */
1125static void b44_init_rings(struct b44 *bp)
1126{
1127 int i;
1128
1129 b44_free_rings(bp);
1130
1131 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1132 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1133
9f38c636
JL
1134 if (bp->flags & B44_FLAG_RX_RING_HACK)
1135 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1136 DMA_TABLE_BYTES,
1137 PCI_DMA_BIDIRECTIONAL);
1138
1139 if (bp->flags & B44_FLAG_TX_RING_HACK)
1140 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1141 DMA_TABLE_BYTES,
1142 PCI_DMA_TODEVICE);
1143
1da177e4
LT
1144 for (i = 0; i < bp->rx_pending; i++) {
1145 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1146 break;
1147 }
1148}
1149
1150/*
1151 * Must not be invoked with interrupt sources disabled and
1152 * the hardware shutdown down.
1153 */
1154static void b44_free_consistent(struct b44 *bp)
1155{
b4558ea9
JJ
1156 kfree(bp->rx_buffers);
1157 bp->rx_buffers = NULL;
1158 kfree(bp->tx_buffers);
1159 bp->tx_buffers = NULL;
1da177e4 1160 if (bp->rx_ring) {
9f38c636
JL
1161 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1162 dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1163 DMA_TABLE_BYTES,
1164 DMA_BIDIRECTIONAL);
1165 kfree(bp->rx_ring);
1166 } else
1167 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1168 bp->rx_ring, bp->rx_ring_dma);
1da177e4 1169 bp->rx_ring = NULL;
9f38c636 1170 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1da177e4
LT
1171 }
1172 if (bp->tx_ring) {
9f38c636
JL
1173 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1174 dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1175 DMA_TABLE_BYTES,
1176 DMA_TO_DEVICE);
1177 kfree(bp->tx_ring);
1178 } else
1179 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1180 bp->tx_ring, bp->tx_ring_dma);
1da177e4 1181 bp->tx_ring = NULL;
9f38c636 1182 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1da177e4
LT
1183 }
1184}
1185
1186/*
1187 * Must not be invoked with interrupt sources disabled and
1188 * the hardware shutdown down. Can sleep.
1189 */
1190static int b44_alloc_consistent(struct b44 *bp)
1191{
1192 int size;
1193
1194 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
874a6214 1195 bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1da177e4
LT
1196 if (!bp->rx_buffers)
1197 goto out_err;
1da177e4
LT
1198
1199 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
874a6214 1200 bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1da177e4
LT
1201 if (!bp->tx_buffers)
1202 goto out_err;
1da177e4
LT
1203
1204 size = DMA_TABLE_BYTES;
1205 bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
9f38c636
JL
1206 if (!bp->rx_ring) {
1207 /* Allocation may have failed due to pci_alloc_consistent
1208 insisting on use of GFP_DMA, which is more restrictive
1209 than necessary... */
1210 struct dma_desc *rx_ring;
1211 dma_addr_t rx_ring_dma;
1212
874a6214
FR
1213 rx_ring = kzalloc(size, GFP_KERNEL);
1214 if (!rx_ring)
9f38c636
JL
1215 goto out_err;
1216
9f38c636
JL
1217 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1218 DMA_TABLE_BYTES,
1219 DMA_BIDIRECTIONAL);
1220
639b421b
AK
1221 if (dma_mapping_error(rx_ring_dma) ||
1222 rx_ring_dma + size > B44_DMA_MASK) {
9f38c636
JL
1223 kfree(rx_ring);
1224 goto out_err;
1225 }
1226
1227 bp->rx_ring = rx_ring;
1228 bp->rx_ring_dma = rx_ring_dma;
1229 bp->flags |= B44_FLAG_RX_RING_HACK;
1230 }
1da177e4
LT
1231
1232 bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
9f38c636
JL
1233 if (!bp->tx_ring) {
1234 /* Allocation may have failed due to pci_alloc_consistent
1235 insisting on use of GFP_DMA, which is more restrictive
1236 than necessary... */
1237 struct dma_desc *tx_ring;
1238 dma_addr_t tx_ring_dma;
1239
874a6214
FR
1240 tx_ring = kzalloc(size, GFP_KERNEL);
1241 if (!tx_ring)
9f38c636
JL
1242 goto out_err;
1243
9f38c636
JL
1244 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1245 DMA_TABLE_BYTES,
1246 DMA_TO_DEVICE);
1247
639b421b
AK
1248 if (dma_mapping_error(tx_ring_dma) ||
1249 tx_ring_dma + size > B44_DMA_MASK) {
9f38c636
JL
1250 kfree(tx_ring);
1251 goto out_err;
1252 }
1253
1254 bp->tx_ring = tx_ring;
1255 bp->tx_ring_dma = tx_ring_dma;
1256 bp->flags |= B44_FLAG_TX_RING_HACK;
1257 }
1da177e4
LT
1258
1259 return 0;
1260
1261out_err:
1262 b44_free_consistent(bp);
1263 return -ENOMEM;
1264}
1265
1266/* bp->lock is held. */
1267static void b44_clear_stats(struct b44 *bp)
1268{
1269 unsigned long reg;
1270
1271 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1272 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1273 br32(bp, reg);
1274 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1275 br32(bp, reg);
1276}
1277
1278/* bp->lock is held. */
1279static void b44_chip_reset(struct b44 *bp)
1280{
1281 if (ssb_is_core_up(bp)) {
1282 bw32(bp, B44_RCV_LAZY, 0);
1283 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1284 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
1285 bw32(bp, B44_DMATX_CTRL, 0);
1286 bp->tx_prod = bp->tx_cons = 0;
1287 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1288 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1289 100, 0);
1290 }
1291 bw32(bp, B44_DMARX_CTRL, 0);
1292 bp->rx_prod = bp->rx_cons = 0;
1293 } else {
1294 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1295 SBINTVEC_ENET0 :
1296 SBINTVEC_ENET1));
1297 }
1298
1299 ssb_core_reset(bp);
1300
1301 b44_clear_stats(bp);
1302
1303 /* Make PHY accessible. */
1304 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1305 (0x0d & MDIO_CTRL_MAXF_MASK)));
1306 br32(bp, B44_MDIO_CTRL);
1307
1308 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1309 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1310 br32(bp, B44_ENET_CTRL);
1311 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1312 } else {
1313 u32 val = br32(bp, B44_DEVCTRL);
1314
1315 if (val & DEVCTRL_EPR) {
1316 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1317 br32(bp, B44_DEVCTRL);
1318 udelay(100);
1319 }
1320 bp->flags |= B44_FLAG_INTERNAL_PHY;
1321 }
1322}
1323
1324/* bp->lock is held. */
1325static void b44_halt(struct b44 *bp)
1326{
1327 b44_disable_ints(bp);
1328 b44_chip_reset(bp);
1329}
1330
1331/* bp->lock is held. */
1332static void __b44_set_mac_addr(struct b44 *bp)
1333{
1334 bw32(bp, B44_CAM_CTRL, 0);
1335 if (!(bp->dev->flags & IFF_PROMISC)) {
1336 u32 val;
1337
1338 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1339 val = br32(bp, B44_CAM_CTRL);
1340 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1341 }
1342}
1343
1344static int b44_set_mac_addr(struct net_device *dev, void *p)
1345{
1346 struct b44 *bp = netdev_priv(dev);
1347 struct sockaddr *addr = p;
1348
1349 if (netif_running(dev))
1350 return -EBUSY;
1351
391fc09a
GZ
1352 if (!is_valid_ether_addr(addr->sa_data))
1353 return -EINVAL;
1354
1da177e4
LT
1355 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1356
1357 spin_lock_irq(&bp->lock);
1358 __b44_set_mac_addr(bp);
1359 spin_unlock_irq(&bp->lock);
1360
1361 return 0;
1362}
1363
1364/* Called at device open time to get the chip ready for
1365 * packet processing. Invoked with bp->lock held.
1366 */
1367static void __b44_set_rx_mode(struct net_device *);
00e8b3aa 1368static void b44_init_hw(struct b44 *bp, int full_reset)
1da177e4
LT
1369{
1370 u32 val;
1371
1372 b44_chip_reset(bp);
00e8b3aa
GZ
1373 if (full_reset) {
1374 b44_phy_reset(bp);
1375 b44_setup_phy(bp);
1376 }
1da177e4
LT
1377
1378 /* Enable CRC32, set proper LED modes and power on PHY */
1379 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1380 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1381
1382 /* This sets the MAC address too. */
1383 __b44_set_rx_mode(bp->dev);
1384
1385 /* MTU + eth header + possible VLAN tag + struct rx_header */
1386 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1387 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1388
1389 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
00e8b3aa
GZ
1390 if (full_reset) {
1391 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1392 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1393 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1394 (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1395 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1da177e4 1396
00e8b3aa
GZ
1397 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1398 bp->rx_prod = bp->rx_pending;
1da177e4 1399
00e8b3aa
GZ
1400 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1401 } else {
1402 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1403 (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1404 }
1da177e4
LT
1405
1406 val = br32(bp, B44_ENET_CTRL);
1407 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1408}
1409
1410static int b44_open(struct net_device *dev)
1411{
1412 struct b44 *bp = netdev_priv(dev);
1413 int err;
1414
1415 err = b44_alloc_consistent(bp);
1416 if (err)
6c2f4267 1417 goto out;
1da177e4
LT
1418
1419 b44_init_rings(bp);
00e8b3aa 1420 b44_init_hw(bp, 1);
1da177e4 1421
e254e9bf
JL
1422 b44_check_phy(bp);
1423
6c2f4267
FR
1424 err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev);
1425 if (unlikely(err < 0)) {
1426 b44_chip_reset(bp);
1427 b44_free_rings(bp);
1428 b44_free_consistent(bp);
1429 goto out;
1430 }
1da177e4
LT
1431
1432 init_timer(&bp->timer);
1433 bp->timer.expires = jiffies + HZ;
1434 bp->timer.data = (unsigned long) bp;
1435 bp->timer.function = b44_timer;
1436 add_timer(&bp->timer);
1437
1438 b44_enable_ints(bp);
d9e2d185 1439 netif_start_queue(dev);
6c2f4267 1440out:
1da177e4
LT
1441 return err;
1442}
1443
1444#if 0
1445/*static*/ void b44_dump_state(struct b44 *bp)
1446{
1447 u32 val32, val32_2, val32_3, val32_4, val32_5;
1448 u16 val16;
1449
1450 pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1451 printk("DEBUG: PCI status [%04x] \n", val16);
1452
1453}
1454#endif
1455
1456#ifdef CONFIG_NET_POLL_CONTROLLER
1457/*
1458 * Polling receive - used by netconsole and other diagnostic tools
1459 * to allow network i/o with interrupts disabled.
1460 */
1461static void b44_poll_controller(struct net_device *dev)
1462{
1463 disable_irq(dev->irq);
1464 b44_interrupt(dev->irq, dev, NULL);
1465 enable_irq(dev->irq);
1466}
1467#endif
1468
725ad800
GZ
1469static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1470{
1471 u32 i;
1472 u32 *pattern = (u32 *) pp;
1473
1474 for (i = 0; i < bytes; i += sizeof(u32)) {
1475 bw32(bp, B44_FILT_ADDR, table_offset + i);
1476 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1477 }
1478}
1479
1480static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1481{
1482 int magicsync = 6;
1483 int k, j, len = offset;
1484 int ethaddr_bytes = ETH_ALEN;
1485
1486 memset(ppattern + offset, 0xff, magicsync);
1487 for (j = 0; j < magicsync; j++)
1488 set_bit(len++, (unsigned long *) pmask);
1489
1490 for (j = 0; j < B44_MAX_PATTERNS; j++) {
1491 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1492 ethaddr_bytes = ETH_ALEN;
1493 else
1494 ethaddr_bytes = B44_PATTERN_SIZE - len;
1495 if (ethaddr_bytes <=0)
1496 break;
1497 for (k = 0; k< ethaddr_bytes; k++) {
1498 ppattern[offset + magicsync +
1499 (j * ETH_ALEN) + k] = macaddr[k];
1500 len++;
1501 set_bit(len, (unsigned long *) pmask);
1502 }
1503 }
1504 return len - 1;
1505}
1506
1507/* Setup magic packet patterns in the b44 WOL
1508 * pattern matching filter.
1509 */
1510static void b44_setup_pseudo_magicp(struct b44 *bp)
1511{
1512
1513 u32 val;
1514 int plen0, plen1, plen2;
1515 u8 *pwol_pattern;
1516 u8 pwol_mask[B44_PMASK_SIZE];
1517
1518 pwol_pattern = kmalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1519 if (!pwol_pattern) {
1520 printk(KERN_ERR PFX "Memory not available for WOL\n");
1521 return;
1522 }
1523
1524 /* Ipv4 magic packet pattern - pattern 0.*/
1525 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1526 memset(pwol_mask, 0, B44_PMASK_SIZE);
1527 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1528 B44_ETHIPV4UDP_HLEN);
1529
1530 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1531 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1532
1533 /* Raw ethernet II magic packet pattern - pattern 1 */
1534 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1535 memset(pwol_mask, 0, B44_PMASK_SIZE);
1536 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1537 ETH_HLEN);
1538
1539 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1540 B44_PATTERN_BASE + B44_PATTERN_SIZE);
1541 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1542 B44_PMASK_BASE + B44_PMASK_SIZE);
1543
1544 /* Ipv6 magic packet pattern - pattern 2 */
1545 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1546 memset(pwol_mask, 0, B44_PMASK_SIZE);
1547 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1548 B44_ETHIPV6UDP_HLEN);
1549
1550 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1551 B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1552 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1553 B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1554
1555 kfree(pwol_pattern);
1556
1557 /* set these pattern's lengths: one less than each real length */
1558 val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1559 bw32(bp, B44_WKUP_LEN, val);
1560
1561 /* enable wakeup pattern matching */
1562 val = br32(bp, B44_DEVCTRL);
1563 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1564
1565}
52cafd96
GZ
1566
1567static void b44_setup_wol(struct b44 *bp)
1568{
1569 u32 val;
1570 u16 pmval;
1571
1572 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1573
1574 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1575
1576 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1577
1578 val = bp->dev->dev_addr[2] << 24 |
1579 bp->dev->dev_addr[3] << 16 |
1580 bp->dev->dev_addr[4] << 8 |
1581 bp->dev->dev_addr[5];
1582 bw32(bp, B44_ADDR_LO, val);
1583
1584 val = bp->dev->dev_addr[0] << 8 |
1585 bp->dev->dev_addr[1];
1586 bw32(bp, B44_ADDR_HI, val);
1587
1588 val = br32(bp, B44_DEVCTRL);
1589 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1590
725ad800
GZ
1591 } else {
1592 b44_setup_pseudo_magicp(bp);
1593 }
52cafd96
GZ
1594
1595 val = br32(bp, B44_SBTMSLOW);
1596 bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
1597
1598 pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
1599 pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
1600
1601}
1602
1da177e4
LT
1603static int b44_close(struct net_device *dev)
1604{
1605 struct b44 *bp = netdev_priv(dev);
1606
1607 netif_stop_queue(dev);
1608
ba5eec9c
FR
1609 netif_poll_disable(dev);
1610
1da177e4
LT
1611 del_timer_sync(&bp->timer);
1612
1613 spin_lock_irq(&bp->lock);
1614
1615#if 0
1616 b44_dump_state(bp);
1617#endif
1618 b44_halt(bp);
1619 b44_free_rings(bp);
c35ca399 1620 netif_carrier_off(dev);
1da177e4
LT
1621
1622 spin_unlock_irq(&bp->lock);
1623
1624 free_irq(dev->irq, dev);
1625
ba5eec9c
FR
1626 netif_poll_enable(dev);
1627
52cafd96 1628 if (bp->flags & B44_FLAG_WOL_ENABLE) {
00e8b3aa 1629 b44_init_hw(bp, 0);
52cafd96
GZ
1630 b44_setup_wol(bp);
1631 }
1632
1da177e4
LT
1633 b44_free_consistent(bp);
1634
1635 return 0;
1636}
1637
1638static struct net_device_stats *b44_get_stats(struct net_device *dev)
1639{
1640 struct b44 *bp = netdev_priv(dev);
1641 struct net_device_stats *nstat = &bp->stats;
1642 struct b44_hw_stats *hwstat = &bp->hw_stats;
1643
1644 /* Convert HW stats into netdevice stats. */
1645 nstat->rx_packets = hwstat->rx_pkts;
1646 nstat->tx_packets = hwstat->tx_pkts;
1647 nstat->rx_bytes = hwstat->rx_octets;
1648 nstat->tx_bytes = hwstat->tx_octets;
1649 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1650 hwstat->tx_oversize_pkts +
1651 hwstat->tx_underruns +
1652 hwstat->tx_excessive_cols +
1653 hwstat->tx_late_cols);
1654 nstat->multicast = hwstat->tx_multicast_pkts;
1655 nstat->collisions = hwstat->tx_total_cols;
1656
1657 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1658 hwstat->rx_undersize);
1659 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1660 nstat->rx_frame_errors = hwstat->rx_align_errs;
1661 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1662 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1663 hwstat->rx_oversize_pkts +
1664 hwstat->rx_missed_pkts +
1665 hwstat->rx_crc_align_errs +
1666 hwstat->rx_undersize +
1667 hwstat->rx_crc_errs +
1668 hwstat->rx_align_errs +
1669 hwstat->rx_symbol_errs);
1670
1671 nstat->tx_aborted_errors = hwstat->tx_underruns;
1672#if 0
1673 /* Carrier lost counter seems to be broken for some devices */
1674 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1675#endif
1676
1677 return nstat;
1678}
1679
1680static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1681{
1682 struct dev_mc_list *mclist;
1683 int i, num_ents;
1684
1685 num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1686 mclist = dev->mc_list;
1687 for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1688 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1689 }
1690 return i+1;
1691}
1692
1693static void __b44_set_rx_mode(struct net_device *dev)
1694{
1695 struct b44 *bp = netdev_priv(dev);
1696 u32 val;
1da177e4
LT
1697
1698 val = br32(bp, B44_RXCONFIG);
1699 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1700 if (dev->flags & IFF_PROMISC) {
1701 val |= RXCONFIG_PROMISC;
1702 bw32(bp, B44_RXCONFIG, val);
1703 } else {
874a6214
FR
1704 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1705 int i = 0;
1706
1da177e4
LT
1707 __b44_set_mac_addr(bp);
1708
1709 if (dev->flags & IFF_ALLMULTI)
1710 val |= RXCONFIG_ALLMULTI;
1711 else
874a6214 1712 i = __b44_load_mcast(bp, dev);
10badc21 1713
874a6214 1714 for (; i < 64; i++) {
10badc21 1715 __b44_cam_write(bp, zero, i);
1da177e4
LT
1716 }
1717 bw32(bp, B44_RXCONFIG, val);
1718 val = br32(bp, B44_CAM_CTRL);
1719 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1720 }
1721}
1722
1723static void b44_set_rx_mode(struct net_device *dev)
1724{
1725 struct b44 *bp = netdev_priv(dev);
1726
1727 spin_lock_irq(&bp->lock);
1728 __b44_set_rx_mode(dev);
1729 spin_unlock_irq(&bp->lock);
1730}
1731
1732static u32 b44_get_msglevel(struct net_device *dev)
1733{
1734 struct b44 *bp = netdev_priv(dev);
1735 return bp->msg_enable;
1736}
1737
1738static void b44_set_msglevel(struct net_device *dev, u32 value)
1739{
1740 struct b44 *bp = netdev_priv(dev);
1741 bp->msg_enable = value;
1742}
1743
1744static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1745{
1746 struct b44 *bp = netdev_priv(dev);
1747 struct pci_dev *pci_dev = bp->pdev;
1748
1749 strcpy (info->driver, DRV_MODULE_NAME);
1750 strcpy (info->version, DRV_MODULE_VERSION);
1751 strcpy (info->bus_info, pci_name(pci_dev));
1752}
1753
1754static int b44_nway_reset(struct net_device *dev)
1755{
1756 struct b44 *bp = netdev_priv(dev);
1757 u32 bmcr;
1758 int r;
1759
1760 spin_lock_irq(&bp->lock);
1761 b44_readphy(bp, MII_BMCR, &bmcr);
1762 b44_readphy(bp, MII_BMCR, &bmcr);
1763 r = -EINVAL;
1764 if (bmcr & BMCR_ANENABLE) {
1765 b44_writephy(bp, MII_BMCR,
1766 bmcr | BMCR_ANRESTART);
1767 r = 0;
1768 }
1769 spin_unlock_irq(&bp->lock);
1770
1771 return r;
1772}
1773
1774static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1775{
1776 struct b44 *bp = netdev_priv(dev);
1777
1da177e4
LT
1778 cmd->supported = (SUPPORTED_Autoneg);
1779 cmd->supported |= (SUPPORTED_100baseT_Half |
1780 SUPPORTED_100baseT_Full |
1781 SUPPORTED_10baseT_Half |
1782 SUPPORTED_10baseT_Full |
1783 SUPPORTED_MII);
1784
1785 cmd->advertising = 0;
1786 if (bp->flags & B44_FLAG_ADV_10HALF)
adf6e000 1787 cmd->advertising |= ADVERTISED_10baseT_Half;
1da177e4 1788 if (bp->flags & B44_FLAG_ADV_10FULL)
adf6e000 1789 cmd->advertising |= ADVERTISED_10baseT_Full;
1da177e4 1790 if (bp->flags & B44_FLAG_ADV_100HALF)
adf6e000 1791 cmd->advertising |= ADVERTISED_100baseT_Half;
1da177e4 1792 if (bp->flags & B44_FLAG_ADV_100FULL)
adf6e000
MW
1793 cmd->advertising |= ADVERTISED_100baseT_Full;
1794 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1da177e4
LT
1795 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1796 SPEED_100 : SPEED_10;
1797 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1798 DUPLEX_FULL : DUPLEX_HALF;
1799 cmd->port = 0;
1800 cmd->phy_address = bp->phy_addr;
1801 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1802 XCVR_INTERNAL : XCVR_EXTERNAL;
1803 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1804 AUTONEG_DISABLE : AUTONEG_ENABLE;
47b9c3b1
GZ
1805 if (cmd->autoneg == AUTONEG_ENABLE)
1806 cmd->advertising |= ADVERTISED_Autoneg;
1807 if (!netif_running(dev)){
1808 cmd->speed = 0;
1809 cmd->duplex = 0xff;
1810 }
1da177e4
LT
1811 cmd->maxtxpkt = 0;
1812 cmd->maxrxpkt = 0;
1813 return 0;
1814}
1815
1816static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1817{
1818 struct b44 *bp = netdev_priv(dev);
1819
1da177e4
LT
1820 /* We do not support gigabit. */
1821 if (cmd->autoneg == AUTONEG_ENABLE) {
1822 if (cmd->advertising &
1823 (ADVERTISED_1000baseT_Half |
1824 ADVERTISED_1000baseT_Full))
1825 return -EINVAL;
1826 } else if ((cmd->speed != SPEED_100 &&
1827 cmd->speed != SPEED_10) ||
1828 (cmd->duplex != DUPLEX_HALF &&
1829 cmd->duplex != DUPLEX_FULL)) {
1830 return -EINVAL;
1831 }
1832
1833 spin_lock_irq(&bp->lock);
1834
1835 if (cmd->autoneg == AUTONEG_ENABLE) {
47b9c3b1
GZ
1836 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1837 B44_FLAG_100_BASE_T |
1838 B44_FLAG_FULL_DUPLEX |
1839 B44_FLAG_ADV_10HALF |
1da177e4
LT
1840 B44_FLAG_ADV_10FULL |
1841 B44_FLAG_ADV_100HALF |
1842 B44_FLAG_ADV_100FULL);
47b9c3b1
GZ
1843 if (cmd->advertising == 0) {
1844 bp->flags |= (B44_FLAG_ADV_10HALF |
1845 B44_FLAG_ADV_10FULL |
1846 B44_FLAG_ADV_100HALF |
1847 B44_FLAG_ADV_100FULL);
1848 } else {
1849 if (cmd->advertising & ADVERTISED_10baseT_Half)
1850 bp->flags |= B44_FLAG_ADV_10HALF;
1851 if (cmd->advertising & ADVERTISED_10baseT_Full)
1852 bp->flags |= B44_FLAG_ADV_10FULL;
1853 if (cmd->advertising & ADVERTISED_100baseT_Half)
1854 bp->flags |= B44_FLAG_ADV_100HALF;
1855 if (cmd->advertising & ADVERTISED_100baseT_Full)
1856 bp->flags |= B44_FLAG_ADV_100FULL;
1857 }
1da177e4
LT
1858 } else {
1859 bp->flags |= B44_FLAG_FORCE_LINK;
47b9c3b1 1860 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1da177e4
LT
1861 if (cmd->speed == SPEED_100)
1862 bp->flags |= B44_FLAG_100_BASE_T;
1863 if (cmd->duplex == DUPLEX_FULL)
1864 bp->flags |= B44_FLAG_FULL_DUPLEX;
1865 }
1866
47b9c3b1
GZ
1867 if (netif_running(dev))
1868 b44_setup_phy(bp);
1da177e4
LT
1869
1870 spin_unlock_irq(&bp->lock);
1871
1872 return 0;
1873}
1874
1875static void b44_get_ringparam(struct net_device *dev,
1876 struct ethtool_ringparam *ering)
1877{
1878 struct b44 *bp = netdev_priv(dev);
1879
1880 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1881 ering->rx_pending = bp->rx_pending;
1882
1883 /* XXX ethtool lacks a tx_max_pending, oops... */
1884}
1885
1886static int b44_set_ringparam(struct net_device *dev,
1887 struct ethtool_ringparam *ering)
1888{
1889 struct b44 *bp = netdev_priv(dev);
1890
1891 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1892 (ering->rx_mini_pending != 0) ||
1893 (ering->rx_jumbo_pending != 0) ||
1894 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1895 return -EINVAL;
1896
1897 spin_lock_irq(&bp->lock);
1898
1899 bp->rx_pending = ering->rx_pending;
1900 bp->tx_pending = ering->tx_pending;
1901
1902 b44_halt(bp);
1903 b44_init_rings(bp);
00e8b3aa 1904 b44_init_hw(bp, 1);
1da177e4
LT
1905 netif_wake_queue(bp->dev);
1906 spin_unlock_irq(&bp->lock);
1907
1908 b44_enable_ints(bp);
10badc21 1909
1da177e4
LT
1910 return 0;
1911}
1912
1913static void b44_get_pauseparam(struct net_device *dev,
1914 struct ethtool_pauseparam *epause)
1915{
1916 struct b44 *bp = netdev_priv(dev);
1917
1918 epause->autoneg =
1919 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1920 epause->rx_pause =
1921 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1922 epause->tx_pause =
1923 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1924}
1925
1926static int b44_set_pauseparam(struct net_device *dev,
1927 struct ethtool_pauseparam *epause)
1928{
1929 struct b44 *bp = netdev_priv(dev);
1930
1931 spin_lock_irq(&bp->lock);
1932 if (epause->autoneg)
1933 bp->flags |= B44_FLAG_PAUSE_AUTO;
1934 else
1935 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1936 if (epause->rx_pause)
1937 bp->flags |= B44_FLAG_RX_PAUSE;
1938 else
1939 bp->flags &= ~B44_FLAG_RX_PAUSE;
1940 if (epause->tx_pause)
1941 bp->flags |= B44_FLAG_TX_PAUSE;
1942 else
1943 bp->flags &= ~B44_FLAG_TX_PAUSE;
1944 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1945 b44_halt(bp);
1946 b44_init_rings(bp);
00e8b3aa 1947 b44_init_hw(bp, 1);
1da177e4
LT
1948 } else {
1949 __b44_set_flow_ctrl(bp, bp->flags);
1950 }
1951 spin_unlock_irq(&bp->lock);
1952
1953 b44_enable_ints(bp);
10badc21 1954
1da177e4
LT
1955 return 0;
1956}
1957
3353930d
FR
1958static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1959{
1960 switch(stringset) {
1961 case ETH_SS_STATS:
1962 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1963 break;
1964 }
1965}
1966
1967static int b44_get_stats_count(struct net_device *dev)
1968{
1969 return ARRAY_SIZE(b44_gstrings);
1970}
1971
1972static void b44_get_ethtool_stats(struct net_device *dev,
1973 struct ethtool_stats *stats, u64 *data)
1974{
1975 struct b44 *bp = netdev_priv(dev);
1976 u32 *val = &bp->hw_stats.tx_good_octets;
1977 u32 i;
1978
1979 spin_lock_irq(&bp->lock);
1980
1981 b44_stats_update(bp);
1982
1983 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1984 *data++ = *val++;
1985
1986 spin_unlock_irq(&bp->lock);
1987}
1988
52cafd96
GZ
1989static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1990{
1991 struct b44 *bp = netdev_priv(dev);
1992
1993 wol->supported = WAKE_MAGIC;
1994 if (bp->flags & B44_FLAG_WOL_ENABLE)
1995 wol->wolopts = WAKE_MAGIC;
1996 else
1997 wol->wolopts = 0;
1998 memset(&wol->sopass, 0, sizeof(wol->sopass));
1999}
2000
2001static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2002{
2003 struct b44 *bp = netdev_priv(dev);
2004
2005 spin_lock_irq(&bp->lock);
2006 if (wol->wolopts & WAKE_MAGIC)
2007 bp->flags |= B44_FLAG_WOL_ENABLE;
2008 else
2009 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2010 spin_unlock_irq(&bp->lock);
2011
2012 return 0;
2013}
2014
1da177e4
LT
2015static struct ethtool_ops b44_ethtool_ops = {
2016 .get_drvinfo = b44_get_drvinfo,
2017 .get_settings = b44_get_settings,
2018 .set_settings = b44_set_settings,
2019 .nway_reset = b44_nway_reset,
2020 .get_link = ethtool_op_get_link,
52cafd96
GZ
2021 .get_wol = b44_get_wol,
2022 .set_wol = b44_set_wol,
1da177e4
LT
2023 .get_ringparam = b44_get_ringparam,
2024 .set_ringparam = b44_set_ringparam,
2025 .get_pauseparam = b44_get_pauseparam,
2026 .set_pauseparam = b44_set_pauseparam,
2027 .get_msglevel = b44_get_msglevel,
2028 .set_msglevel = b44_set_msglevel,
3353930d
FR
2029 .get_strings = b44_get_strings,
2030 .get_stats_count = b44_get_stats_count,
2031 .get_ethtool_stats = b44_get_ethtool_stats,
2160de53 2032 .get_perm_addr = ethtool_op_get_perm_addr,
1da177e4
LT
2033};
2034
2035static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2036{
2037 struct mii_ioctl_data *data = if_mii(ifr);
2038 struct b44 *bp = netdev_priv(dev);
3410572d
FR
2039 int err = -EINVAL;
2040
2041 if (!netif_running(dev))
2042 goto out;
1da177e4
LT
2043
2044 spin_lock_irq(&bp->lock);
2045 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2046 spin_unlock_irq(&bp->lock);
3410572d 2047out:
1da177e4
LT
2048 return err;
2049}
2050
2051/* Read 128-bytes of EEPROM. */
2052static int b44_read_eeprom(struct b44 *bp, u8 *data)
2053{
2054 long i;
2055 u16 *ptr = (u16 *) data;
2056
2057 for (i = 0; i < 128; i += 2)
2058 ptr[i / 2] = readw(bp->regs + 4096 + i);
2059
2060 return 0;
2061}
2062
2063static int __devinit b44_get_invariants(struct b44 *bp)
2064{
2065 u8 eeprom[128];
2066 int err;
2067
2068 err = b44_read_eeprom(bp, &eeprom[0]);
2069 if (err)
2070 goto out;
2071
2072 bp->dev->dev_addr[0] = eeprom[79];
2073 bp->dev->dev_addr[1] = eeprom[78];
2074 bp->dev->dev_addr[2] = eeprom[81];
2075 bp->dev->dev_addr[3] = eeprom[80];
2076 bp->dev->dev_addr[4] = eeprom[83];
2077 bp->dev->dev_addr[5] = eeprom[82];
391fc09a
GZ
2078
2079 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2080 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2081 return -EINVAL;
2082 }
2083
2160de53 2084 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
1da177e4
LT
2085
2086 bp->phy_addr = eeprom[90] & 0x1f;
2087
2088 /* With this, plus the rx_header prepended to the data by the
2089 * hardware, we'll land the ethernet header on a 2-byte boundary.
2090 */
2091 bp->rx_offset = 30;
2092
2093 bp->imask = IMASK_DEF;
2094
2095 bp->core_unit = ssb_core_unit(bp);
2096 bp->dma_offset = SB_PCI_DMA;
2097
10badc21 2098 /* XXX - really required?
1da177e4
LT
2099 bp->flags |= B44_FLAG_BUGGY_TXPTR;
2100 */
52cafd96
GZ
2101
2102 if (ssb_get_core_rev(bp) >= 7)
2103 bp->flags |= B44_FLAG_B0_ANDLATER;
2104
1da177e4
LT
2105out:
2106 return err;
2107}
2108
2109static int __devinit b44_init_one(struct pci_dev *pdev,
2110 const struct pci_device_id *ent)
2111{
2112 static int b44_version_printed = 0;
2113 unsigned long b44reg_base, b44reg_len;
2114 struct net_device *dev;
2115 struct b44 *bp;
2116 int err, i;
2117
2118 if (b44_version_printed++ == 0)
2119 printk(KERN_INFO "%s", version);
2120
2121 err = pci_enable_device(pdev);
2122 if (err) {
2123 printk(KERN_ERR PFX "Cannot enable PCI device, "
2124 "aborting.\n");
2125 return err;
2126 }
2127
2128 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2129 printk(KERN_ERR PFX "Cannot find proper PCI device "
2130 "base address, aborting.\n");
2131 err = -ENODEV;
2132 goto err_out_disable_pdev;
2133 }
2134
2135 err = pci_request_regions(pdev, DRV_MODULE_NAME);
2136 if (err) {
2137 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
2138 "aborting.\n");
2139 goto err_out_disable_pdev;
2140 }
2141
2142 pci_set_master(pdev);
2143
2144 err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK);
2145 if (err) {
2146 printk(KERN_ERR PFX "No usable DMA configuration, "
2147 "aborting.\n");
2148 goto err_out_free_res;
2149 }
10badc21 2150
1da177e4
LT
2151 err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK);
2152 if (err) {
874a6214
FR
2153 printk(KERN_ERR PFX "No usable DMA configuration, "
2154 "aborting.\n");
2155 goto err_out_free_res;
1da177e4
LT
2156 }
2157
2158 b44reg_base = pci_resource_start(pdev, 0);
2159 b44reg_len = pci_resource_len(pdev, 0);
2160
2161 dev = alloc_etherdev(sizeof(*bp));
2162 if (!dev) {
2163 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
2164 err = -ENOMEM;
2165 goto err_out_free_res;
2166 }
2167
2168 SET_MODULE_OWNER(dev);
2169 SET_NETDEV_DEV(dev,&pdev->dev);
2170
2171 /* No interesting netdevice features in this card... */
2172 dev->features |= 0;
2173
2174 bp = netdev_priv(dev);
2175 bp->pdev = pdev;
2176 bp->dev = dev;
874a6214
FR
2177
2178 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
1da177e4
LT
2179
2180 spin_lock_init(&bp->lock);
2181
2182 bp->regs = ioremap(b44reg_base, b44reg_len);
2183 if (bp->regs == 0UL) {
2184 printk(KERN_ERR PFX "Cannot map device registers, "
2185 "aborting.\n");
2186 err = -ENOMEM;
2187 goto err_out_free_dev;
2188 }
2189
2190 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2191 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2192
2193 dev->open = b44_open;
2194 dev->stop = b44_close;
2195 dev->hard_start_xmit = b44_start_xmit;
2196 dev->get_stats = b44_get_stats;
2197 dev->set_multicast_list = b44_set_rx_mode;
2198 dev->set_mac_address = b44_set_mac_addr;
2199 dev->do_ioctl = b44_ioctl;
2200 dev->tx_timeout = b44_tx_timeout;
2201 dev->poll = b44_poll;
2202 dev->weight = 64;
2203 dev->watchdog_timeo = B44_TX_TIMEOUT;
2204#ifdef CONFIG_NET_POLL_CONTROLLER
2205 dev->poll_controller = b44_poll_controller;
2206#endif
2207 dev->change_mtu = b44_change_mtu;
2208 dev->irq = pdev->irq;
2209 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2210
c35ca399
SH
2211 netif_carrier_off(dev);
2212
1da177e4
LT
2213 err = b44_get_invariants(bp);
2214 if (err) {
2215 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
2216 "aborting.\n");
2217 goto err_out_iounmap;
2218 }
2219
2220 bp->mii_if.dev = dev;
2221 bp->mii_if.mdio_read = b44_mii_read;
2222 bp->mii_if.mdio_write = b44_mii_write;
2223 bp->mii_if.phy_id = bp->phy_addr;
2224 bp->mii_if.phy_id_mask = 0x1f;
2225 bp->mii_if.reg_num_mask = 0x1f;
2226
2227 /* By default, advertise all speed/duplex settings. */
2228 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2229 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2230
2231 /* By default, auto-negotiate PAUSE. */
2232 bp->flags |= B44_FLAG_PAUSE_AUTO;
2233
2234 err = register_netdev(dev);
2235 if (err) {
2236 printk(KERN_ERR PFX "Cannot register net device, "
2237 "aborting.\n");
2238 goto err_out_iounmap;
2239 }
2240
2241 pci_set_drvdata(pdev, dev);
2242
2243 pci_save_state(bp->pdev);
2244
10badc21 2245 /* Chip reset provides power to the b44 MAC & PCI cores, which
5c513129 2246 * is necessary for MAC register access.
10badc21 2247 */
5c513129
GZ
2248 b44_chip_reset(bp);
2249
1da177e4
LT
2250 printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2251 for (i = 0; i < 6; i++)
2252 printk("%2.2x%c", dev->dev_addr[i],
2253 i == 5 ? '\n' : ':');
2254
2255 return 0;
2256
2257err_out_iounmap:
2258 iounmap(bp->regs);
2259
2260err_out_free_dev:
2261 free_netdev(dev);
2262
2263err_out_free_res:
2264 pci_release_regions(pdev);
2265
2266err_out_disable_pdev:
2267 pci_disable_device(pdev);
2268 pci_set_drvdata(pdev, NULL);
2269 return err;
2270}
2271
2272static void __devexit b44_remove_one(struct pci_dev *pdev)
2273{
2274 struct net_device *dev = pci_get_drvdata(pdev);
874a6214 2275 struct b44 *bp = netdev_priv(dev);
1da177e4 2276
874a6214
FR
2277 unregister_netdev(dev);
2278 iounmap(bp->regs);
2279 free_netdev(dev);
2280 pci_release_regions(pdev);
2281 pci_disable_device(pdev);
2282 pci_set_drvdata(pdev, NULL);
1da177e4
LT
2283}
2284
2285static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2286{
2287 struct net_device *dev = pci_get_drvdata(pdev);
2288 struct b44 *bp = netdev_priv(dev);
2289
2290 if (!netif_running(dev))
2291 return 0;
2292
2293 del_timer_sync(&bp->timer);
2294
10badc21 2295 spin_lock_irq(&bp->lock);
1da177e4
LT
2296
2297 b44_halt(bp);
10badc21 2298 netif_carrier_off(bp->dev);
1da177e4
LT
2299 netif_device_detach(bp->dev);
2300 b44_free_rings(bp);
2301
2302 spin_unlock_irq(&bp->lock);
46e17853
PM
2303
2304 free_irq(dev->irq, dev);
52cafd96 2305 if (bp->flags & B44_FLAG_WOL_ENABLE) {
00e8b3aa 2306 b44_init_hw(bp, 0);
52cafd96
GZ
2307 b44_setup_wol(bp);
2308 }
d58da590 2309 pci_disable_device(pdev);
1da177e4
LT
2310 return 0;
2311}
2312
2313static int b44_resume(struct pci_dev *pdev)
2314{
2315 struct net_device *dev = pci_get_drvdata(pdev);
2316 struct b44 *bp = netdev_priv(dev);
2317
2318 pci_restore_state(pdev);
d58da590
DSL
2319 pci_enable_device(pdev);
2320 pci_set_master(pdev);
1da177e4
LT
2321
2322 if (!netif_running(dev))
2323 return 0;
2324
46e17853
PM
2325 if (request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev))
2326 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2327
1da177e4
LT
2328 spin_lock_irq(&bp->lock);
2329
2330 b44_init_rings(bp);
00e8b3aa 2331 b44_init_hw(bp, 1);
1da177e4
LT
2332 netif_device_attach(bp->dev);
2333 spin_unlock_irq(&bp->lock);
2334
2335 bp->timer.expires = jiffies + HZ;
2336 add_timer(&bp->timer);
2337
2338 b44_enable_ints(bp);
d9e2d185 2339 netif_wake_queue(dev);
1da177e4
LT
2340 return 0;
2341}
2342
2343static struct pci_driver b44_driver = {
2344 .name = DRV_MODULE_NAME,
2345 .id_table = b44_pci_tbl,
2346 .probe = b44_init_one,
2347 .remove = __devexit_p(b44_remove_one),
2348 .suspend = b44_suspend,
2349 .resume = b44_resume,
2350};
2351
2352static int __init b44_init(void)
2353{
9f38c636
JL
2354 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2355
2356 /* Setup paramaters for syncing RX/TX DMA descriptors */
2357 dma_desc_align_mask = ~(dma_desc_align_size - 1);
22d4d771 2358 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
9f38c636 2359
1da177e4
LT
2360 return pci_module_init(&b44_driver);
2361}
2362
2363static void __exit b44_cleanup(void)
2364{
2365 pci_unregister_driver(&b44_driver);
2366}
2367
2368module_init(b44_init);
2369module_exit(b44_cleanup);
2370