]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/b44.c
[PATCH] b44: increase version to 1.00
[net-next-2.6.git] / drivers / net / b44.c
CommitLineData
1da177e4
LT
1/* b44.c: Broadcom 4400 device driver.
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
8056bfaf 5 * Copyright (C) 2006 Broadcom Corporation.
1da177e4
LT
6 *
7 * Distribute under GPL.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/types.h>
14#include <linux/netdevice.h>
15#include <linux/ethtool.h>
16#include <linux/mii.h>
17#include <linux/if_ether.h>
18#include <linux/etherdevice.h>
19#include <linux/pci.h>
20#include <linux/delay.h>
21#include <linux/init.h>
89358f90 22#include <linux/dma-mapping.h>
1da177e4
LT
23
24#include <asm/uaccess.h>
25#include <asm/io.h>
26#include <asm/irq.h>
27
28#include "b44.h"
29
30#define DRV_MODULE_NAME "b44"
31#define PFX DRV_MODULE_NAME ": "
8056bfaf
GZ
32#define DRV_MODULE_VERSION "1.00"
33#define DRV_MODULE_RELDATE "Apr 7, 2006"
1da177e4
LT
34
35#define B44_DEF_MSG_ENABLE \
36 (NETIF_MSG_DRV | \
37 NETIF_MSG_PROBE | \
38 NETIF_MSG_LINK | \
39 NETIF_MSG_TIMER | \
40 NETIF_MSG_IFDOWN | \
41 NETIF_MSG_IFUP | \
42 NETIF_MSG_RX_ERR | \
43 NETIF_MSG_TX_ERR)
44
45/* length of time before we decide the hardware is borked,
46 * and dev->tx_timeout() should be called to fix the problem
47 */
48#define B44_TX_TIMEOUT (5 * HZ)
49
50/* hardware minimum and maximum for a single frame's data payload */
51#define B44_MIN_MTU 60
52#define B44_MAX_MTU 1500
53
54#define B44_RX_RING_SIZE 512
55#define B44_DEF_RX_RING_PENDING 200
56#define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
57 B44_RX_RING_SIZE)
58#define B44_TX_RING_SIZE 512
59#define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
60#define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
61 B44_TX_RING_SIZE)
62#define B44_DMA_MASK 0x3fffffff
63
64#define TX_RING_GAP(BP) \
65 (B44_TX_RING_SIZE - (BP)->tx_pending)
66#define TX_BUFFS_AVAIL(BP) \
67 (((BP)->tx_cons <= (BP)->tx_prod) ? \
68 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
69 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
70#define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
71
72#define RX_PKT_BUF_SZ (1536 + bp->rx_offset + 64)
73#define TX_PKT_BUF_SZ (B44_MAX_MTU + ETH_HLEN + 8)
74
75/* minimum number of free TX descriptors required to wake up TX process */
76#define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
77
78static char version[] __devinitdata =
79 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
80
81MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
82MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
83MODULE_LICENSE("GPL");
84MODULE_VERSION(DRV_MODULE_VERSION);
85
86static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
87module_param(b44_debug, int, 0);
88MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
89
90static struct pci_device_id b44_pci_tbl[] = {
91 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
92 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
93 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
94 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
95 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
96 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
97 { } /* terminate list with empty entry */
98};
99
100MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
101
102static void b44_halt(struct b44 *);
103static void b44_init_rings(struct b44 *);
104static void b44_init_hw(struct b44 *);
1da177e4 105
9f38c636
JL
106static int dma_desc_align_mask;
107static int dma_desc_sync_size;
108
3353930d
FR
109static const char b44_gstrings[][ETH_GSTRING_LEN] = {
110#define _B44(x...) # x,
111B44_STAT_REG_DECLARE
112#undef _B44
113};
114
9f38c636
JL
115static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
116 dma_addr_t dma_base,
117 unsigned long offset,
118 enum dma_data_direction dir)
119{
120 dma_sync_single_range_for_device(&pdev->dev, dma_base,
121 offset & dma_desc_align_mask,
122 dma_desc_sync_size, dir);
123}
124
125static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
126 dma_addr_t dma_base,
127 unsigned long offset,
128 enum dma_data_direction dir)
129{
130 dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
131 offset & dma_desc_align_mask,
132 dma_desc_sync_size, dir);
133}
134
1da177e4
LT
135static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
136{
137 return readl(bp->regs + reg);
138}
139
140static inline void bw32(const struct b44 *bp,
141 unsigned long reg, unsigned long val)
142{
143 writel(val, bp->regs + reg);
144}
145
146static int b44_wait_bit(struct b44 *bp, unsigned long reg,
147 u32 bit, unsigned long timeout, const int clear)
148{
149 unsigned long i;
150
151 for (i = 0; i < timeout; i++) {
152 u32 val = br32(bp, reg);
153
154 if (clear && !(val & bit))
155 break;
156 if (!clear && (val & bit))
157 break;
158 udelay(10);
159 }
160 if (i == timeout) {
161 printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
162 "%lx to %s.\n",
163 bp->dev->name,
164 bit, reg,
165 (clear ? "clear" : "set"));
166 return -ENODEV;
167 }
168 return 0;
169}
170
171/* Sonics SiliconBackplane support routines. ROFL, you should see all the
172 * buzz words used on this company's website :-)
173 *
174 * All of these routines must be invoked with bp->lock held and
175 * interrupts disabled.
176 */
177
178#define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */
179#define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */
180
181static u32 ssb_get_core_rev(struct b44 *bp)
182{
183 return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
184}
185
186static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
187{
188 u32 bar_orig, pci_rev, val;
189
190 pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
191 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
192 pci_rev = ssb_get_core_rev(bp);
193
194 val = br32(bp, B44_SBINTVEC);
195 val |= cores;
196 bw32(bp, B44_SBINTVEC, val);
197
198 val = br32(bp, SSB_PCI_TRANS_2);
199 val |= SSB_PCI_PREF | SSB_PCI_BURST;
200 bw32(bp, SSB_PCI_TRANS_2, val);
201
202 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
203
204 return pci_rev;
205}
206
207static void ssb_core_disable(struct b44 *bp)
208{
209 if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
210 return;
211
212 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
213 b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
214 b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
215 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
216 SBTMSLOW_REJECT | SBTMSLOW_RESET));
217 br32(bp, B44_SBTMSLOW);
218 udelay(1);
219 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
220 br32(bp, B44_SBTMSLOW);
221 udelay(1);
222}
223
224static void ssb_core_reset(struct b44 *bp)
225{
226 u32 val;
227
228 ssb_core_disable(bp);
229 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
230 br32(bp, B44_SBTMSLOW);
231 udelay(1);
232
233 /* Clear SERR if set, this is a hw bug workaround. */
234 if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
235 bw32(bp, B44_SBTMSHIGH, 0);
236
237 val = br32(bp, B44_SBIMSTATE);
238 if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
239 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
240
241 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
242 br32(bp, B44_SBTMSLOW);
243 udelay(1);
244
245 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
246 br32(bp, B44_SBTMSLOW);
247 udelay(1);
248}
249
250static int ssb_core_unit(struct b44 *bp)
251{
252#if 0
253 u32 val = br32(bp, B44_SBADMATCH0);
254 u32 base;
255
256 type = val & SBADMATCH0_TYPE_MASK;
257 switch (type) {
258 case 0:
259 base = val & SBADMATCH0_BS0_MASK;
260 break;
261
262 case 1:
263 base = val & SBADMATCH0_BS1_MASK;
264 break;
265
266 case 2:
267 default:
268 base = val & SBADMATCH0_BS2_MASK;
269 break;
270 };
271#endif
272 return 0;
273}
274
275static int ssb_is_core_up(struct b44 *bp)
276{
277 return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
278 == SBTMSLOW_CLOCK);
279}
280
281static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
282{
283 u32 val;
284
285 val = ((u32) data[2]) << 24;
286 val |= ((u32) data[3]) << 16;
287 val |= ((u32) data[4]) << 8;
288 val |= ((u32) data[5]) << 0;
289 bw32(bp, B44_CAM_DATA_LO, val);
290 val = (CAM_DATA_HI_VALID |
291 (((u32) data[0]) << 8) |
292 (((u32) data[1]) << 0));
293 bw32(bp, B44_CAM_DATA_HI, val);
294 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
295 (index << CAM_CTRL_INDEX_SHIFT)));
296 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
297}
298
299static inline void __b44_disable_ints(struct b44 *bp)
300{
301 bw32(bp, B44_IMASK, 0);
302}
303
304static void b44_disable_ints(struct b44 *bp)
305{
306 __b44_disable_ints(bp);
307
308 /* Flush posted writes. */
309 br32(bp, B44_IMASK);
310}
311
312static void b44_enable_ints(struct b44 *bp)
313{
314 bw32(bp, B44_IMASK, bp->imask);
315}
316
317static int b44_readphy(struct b44 *bp, int reg, u32 *val)
318{
319 int err;
320
321 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
322 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
323 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
324 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
325 (reg << MDIO_DATA_RA_SHIFT) |
326 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
327 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
328 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
329
330 return err;
331}
332
333static int b44_writephy(struct b44 *bp, int reg, u32 val)
334{
335 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
336 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
337 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
338 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
339 (reg << MDIO_DATA_RA_SHIFT) |
340 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
341 (val & MDIO_DATA_DATA)));
342 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
343}
344
345/* miilib interface */
346/* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
347 * due to code existing before miilib use was added to this driver.
348 * Someone should remove this artificial driver limitation in
349 * b44_{read,write}phy. bp->phy_addr itself is fine (and needed).
350 */
351static int b44_mii_read(struct net_device *dev, int phy_id, int location)
352{
353 u32 val;
354 struct b44 *bp = netdev_priv(dev);
355 int rc = b44_readphy(bp, location, &val);
356 if (rc)
357 return 0xffffffff;
358 return val;
359}
360
361static void b44_mii_write(struct net_device *dev, int phy_id, int location,
362 int val)
363{
364 struct b44 *bp = netdev_priv(dev);
365 b44_writephy(bp, location, val);
366}
367
368static int b44_phy_reset(struct b44 *bp)
369{
370 u32 val;
371 int err;
372
373 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
374 if (err)
375 return err;
376 udelay(100);
377 err = b44_readphy(bp, MII_BMCR, &val);
378 if (!err) {
379 if (val & BMCR_RESET) {
380 printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
381 bp->dev->name);
382 err = -ENODEV;
383 }
384 }
385
386 return 0;
387}
388
389static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
390{
391 u32 val;
392
393 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
394 bp->flags |= pause_flags;
395
396 val = br32(bp, B44_RXCONFIG);
397 if (pause_flags & B44_FLAG_RX_PAUSE)
398 val |= RXCONFIG_FLOW;
399 else
400 val &= ~RXCONFIG_FLOW;
401 bw32(bp, B44_RXCONFIG, val);
402
403 val = br32(bp, B44_MAC_FLOW);
404 if (pause_flags & B44_FLAG_TX_PAUSE)
405 val |= (MAC_FLOW_PAUSE_ENAB |
406 (0xc0 & MAC_FLOW_RX_HI_WATER));
407 else
408 val &= ~MAC_FLOW_PAUSE_ENAB;
409 bw32(bp, B44_MAC_FLOW, val);
410}
411
412static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
413{
2b474cf5
GZ
414 u32 pause_enab = 0;
415
416 /* The driver supports only rx pause by default because
417 the b44 mac tx pause mechanism generates excessive
418 pause frames.
419 Use ethtool to turn on b44 tx pause if necessary.
420 */
421 if ((local & ADVERTISE_PAUSE_CAP) &&
422 (local & ADVERTISE_PAUSE_ASYM)){
423 if ((remote & LPA_PAUSE_ASYM) &&
424 !(remote & LPA_PAUSE_CAP))
425 pause_enab |= B44_FLAG_RX_PAUSE;
1da177e4
LT
426 }
427
428 __b44_set_flow_ctrl(bp, pause_enab);
429}
430
431static int b44_setup_phy(struct b44 *bp)
432{
433 u32 val;
434 int err;
435
436 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
437 goto out;
438 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
439 val & MII_ALEDCTRL_ALLMSK)) != 0)
440 goto out;
441 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
442 goto out;
443 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
444 val | MII_TLEDCTRL_ENABLE)) != 0)
445 goto out;
446
447 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
448 u32 adv = ADVERTISE_CSMA;
449
450 if (bp->flags & B44_FLAG_ADV_10HALF)
451 adv |= ADVERTISE_10HALF;
452 if (bp->flags & B44_FLAG_ADV_10FULL)
453 adv |= ADVERTISE_10FULL;
454 if (bp->flags & B44_FLAG_ADV_100HALF)
455 adv |= ADVERTISE_100HALF;
456 if (bp->flags & B44_FLAG_ADV_100FULL)
457 adv |= ADVERTISE_100FULL;
458
459 if (bp->flags & B44_FLAG_PAUSE_AUTO)
460 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
461
462 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
463 goto out;
464 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
465 BMCR_ANRESTART))) != 0)
466 goto out;
467 } else {
468 u32 bmcr;
469
470 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
471 goto out;
472 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
473 if (bp->flags & B44_FLAG_100_BASE_T)
474 bmcr |= BMCR_SPEED100;
475 if (bp->flags & B44_FLAG_FULL_DUPLEX)
476 bmcr |= BMCR_FULLDPLX;
477 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
478 goto out;
479
480 /* Since we will not be negotiating there is no safe way
481 * to determine if the link partner supports flow control
482 * or not. So just disable it completely in this case.
483 */
484 b44_set_flow_ctrl(bp, 0, 0);
485 }
486
487out:
488 return err;
489}
490
491static void b44_stats_update(struct b44 *bp)
492{
493 unsigned long reg;
494 u32 *val;
495
496 val = &bp->hw_stats.tx_good_octets;
497 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
498 *val++ += br32(bp, reg);
499 }
3353930d
FR
500
501 /* Pad */
502 reg += 8*4UL;
503
1da177e4
LT
504 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
505 *val++ += br32(bp, reg);
506 }
507}
508
509static void b44_link_report(struct b44 *bp)
510{
511 if (!netif_carrier_ok(bp->dev)) {
512 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
513 } else {
514 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
515 bp->dev->name,
516 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
517 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
518
519 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
520 "%s for RX.\n",
521 bp->dev->name,
522 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
523 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
524 }
525}
526
527static void b44_check_phy(struct b44 *bp)
528{
529 u32 bmsr, aux;
530
531 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
532 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
533 (bmsr != 0xffff)) {
534 if (aux & MII_AUXCTRL_SPEED)
535 bp->flags |= B44_FLAG_100_BASE_T;
536 else
537 bp->flags &= ~B44_FLAG_100_BASE_T;
538 if (aux & MII_AUXCTRL_DUPLEX)
539 bp->flags |= B44_FLAG_FULL_DUPLEX;
540 else
541 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
542
543 if (!netif_carrier_ok(bp->dev) &&
544 (bmsr & BMSR_LSTATUS)) {
545 u32 val = br32(bp, B44_TX_CTRL);
546 u32 local_adv, remote_adv;
547
548 if (bp->flags & B44_FLAG_FULL_DUPLEX)
549 val |= TX_CTRL_DUPLEX;
550 else
551 val &= ~TX_CTRL_DUPLEX;
552 bw32(bp, B44_TX_CTRL, val);
553
554 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
555 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
556 !b44_readphy(bp, MII_LPA, &remote_adv))
557 b44_set_flow_ctrl(bp, local_adv, remote_adv);
558
559 /* Link now up */
560 netif_carrier_on(bp->dev);
561 b44_link_report(bp);
562 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
563 /* Link now down */
564 netif_carrier_off(bp->dev);
565 b44_link_report(bp);
566 }
567
568 if (bmsr & BMSR_RFAULT)
569 printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
570 bp->dev->name);
571 if (bmsr & BMSR_JCD)
572 printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
573 bp->dev->name);
574 }
575}
576
577static void b44_timer(unsigned long __opaque)
578{
579 struct b44 *bp = (struct b44 *) __opaque;
580
581 spin_lock_irq(&bp->lock);
582
583 b44_check_phy(bp);
584
585 b44_stats_update(bp);
586
587 spin_unlock_irq(&bp->lock);
588
589 bp->timer.expires = jiffies + HZ;
590 add_timer(&bp->timer);
591}
592
593static void b44_tx(struct b44 *bp)
594{
595 u32 cur, cons;
596
597 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
598 cur /= sizeof(struct dma_desc);
599
600 /* XXX needs updating when NETIF_F_SG is supported */
601 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
602 struct ring_info *rp = &bp->tx_buffers[cons];
603 struct sk_buff *skb = rp->skb;
604
5d9428de 605 BUG_ON(skb == NULL);
1da177e4
LT
606
607 pci_unmap_single(bp->pdev,
608 pci_unmap_addr(rp, mapping),
609 skb->len,
610 PCI_DMA_TODEVICE);
611 rp->skb = NULL;
612 dev_kfree_skb_irq(skb);
613 }
614
615 bp->tx_cons = cons;
616 if (netif_queue_stopped(bp->dev) &&
617 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
618 netif_wake_queue(bp->dev);
619
620 bw32(bp, B44_GPTIMER, 0);
621}
622
623/* Works like this. This chip writes a 'struct rx_header" 30 bytes
624 * before the DMA address you give it. So we allocate 30 more bytes
625 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
626 * point the chip at 30 bytes past where the rx_header will go.
627 */
628static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
629{
630 struct dma_desc *dp;
631 struct ring_info *src_map, *map;
632 struct rx_header *rh;
633 struct sk_buff *skb;
634 dma_addr_t mapping;
635 int dest_idx;
636 u32 ctrl;
637
638 src_map = NULL;
639 if (src_idx >= 0)
640 src_map = &bp->rx_buffers[src_idx];
641 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
642 map = &bp->rx_buffers[dest_idx];
643 skb = dev_alloc_skb(RX_PKT_BUF_SZ);
644 if (skb == NULL)
645 return -ENOMEM;
646
647 mapping = pci_map_single(bp->pdev, skb->data,
648 RX_PKT_BUF_SZ,
649 PCI_DMA_FROMDEVICE);
650
651 /* Hardware bug work-around, the chip is unable to do PCI DMA
652 to/from anything above 1GB :-( */
874a6214 653 if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
1da177e4
LT
654 /* Sigh... */
655 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
656 dev_kfree_skb_any(skb);
657 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
658 if (skb == NULL)
659 return -ENOMEM;
660 mapping = pci_map_single(bp->pdev, skb->data,
661 RX_PKT_BUF_SZ,
662 PCI_DMA_FROMDEVICE);
874a6214 663 if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
1da177e4
LT
664 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
665 dev_kfree_skb_any(skb);
666 return -ENOMEM;
667 }
668 }
669
670 skb->dev = bp->dev;
671 skb_reserve(skb, bp->rx_offset);
672
673 rh = (struct rx_header *)
674 (skb->data - bp->rx_offset);
675 rh->len = 0;
676 rh->flags = 0;
677
678 map->skb = skb;
679 pci_unmap_addr_set(map, mapping, mapping);
680
681 if (src_map != NULL)
682 src_map->skb = NULL;
683
684 ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
685 if (dest_idx == (B44_RX_RING_SIZE - 1))
686 ctrl |= DESC_CTRL_EOT;
687
688 dp = &bp->rx_ring[dest_idx];
689 dp->ctrl = cpu_to_le32(ctrl);
690 dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
691
9f38c636
JL
692 if (bp->flags & B44_FLAG_RX_RING_HACK)
693 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
694 dest_idx * sizeof(dp),
695 DMA_BIDIRECTIONAL);
696
1da177e4
LT
697 return RX_PKT_BUF_SZ;
698}
699
700static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
701{
702 struct dma_desc *src_desc, *dest_desc;
703 struct ring_info *src_map, *dest_map;
704 struct rx_header *rh;
705 int dest_idx;
706 u32 ctrl;
707
708 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
709 dest_desc = &bp->rx_ring[dest_idx];
710 dest_map = &bp->rx_buffers[dest_idx];
711 src_desc = &bp->rx_ring[src_idx];
712 src_map = &bp->rx_buffers[src_idx];
713
714 dest_map->skb = src_map->skb;
715 rh = (struct rx_header *) src_map->skb->data;
716 rh->len = 0;
717 rh->flags = 0;
718 pci_unmap_addr_set(dest_map, mapping,
719 pci_unmap_addr(src_map, mapping));
720
9f38c636
JL
721 if (bp->flags & B44_FLAG_RX_RING_HACK)
722 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
723 src_idx * sizeof(src_desc),
724 DMA_BIDIRECTIONAL);
725
1da177e4
LT
726 ctrl = src_desc->ctrl;
727 if (dest_idx == (B44_RX_RING_SIZE - 1))
728 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
729 else
730 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
731
732 dest_desc->ctrl = ctrl;
733 dest_desc->addr = src_desc->addr;
9f38c636 734
1da177e4
LT
735 src_map->skb = NULL;
736
9f38c636
JL
737 if (bp->flags & B44_FLAG_RX_RING_HACK)
738 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
739 dest_idx * sizeof(dest_desc),
740 DMA_BIDIRECTIONAL);
741
1da177e4
LT
742 pci_dma_sync_single_for_device(bp->pdev, src_desc->addr,
743 RX_PKT_BUF_SZ,
744 PCI_DMA_FROMDEVICE);
745}
746
747static int b44_rx(struct b44 *bp, int budget)
748{
749 int received;
750 u32 cons, prod;
751
752 received = 0;
753 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
754 prod /= sizeof(struct dma_desc);
755 cons = bp->rx_cons;
756
757 while (cons != prod && budget > 0) {
758 struct ring_info *rp = &bp->rx_buffers[cons];
759 struct sk_buff *skb = rp->skb;
760 dma_addr_t map = pci_unmap_addr(rp, mapping);
761 struct rx_header *rh;
762 u16 len;
763
764 pci_dma_sync_single_for_cpu(bp->pdev, map,
765 RX_PKT_BUF_SZ,
766 PCI_DMA_FROMDEVICE);
767 rh = (struct rx_header *) skb->data;
768 len = cpu_to_le16(rh->len);
769 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
770 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
771 drop_it:
772 b44_recycle_rx(bp, cons, bp->rx_prod);
773 drop_it_no_recycle:
774 bp->stats.rx_dropped++;
775 goto next_pkt;
776 }
777
778 if (len == 0) {
779 int i = 0;
780
781 do {
782 udelay(2);
783 barrier();
784 len = cpu_to_le16(rh->len);
785 } while (len == 0 && i++ < 5);
786 if (len == 0)
787 goto drop_it;
788 }
789
790 /* Omit CRC. */
791 len -= 4;
792
793 if (len > RX_COPY_THRESHOLD) {
794 int skb_size;
795 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
796 if (skb_size < 0)
797 goto drop_it;
798 pci_unmap_single(bp->pdev, map,
799 skb_size, PCI_DMA_FROMDEVICE);
800 /* Leave out rx_header */
801 skb_put(skb, len+bp->rx_offset);
802 skb_pull(skb,bp->rx_offset);
803 } else {
804 struct sk_buff *copy_skb;
805
806 b44_recycle_rx(bp, cons, bp->rx_prod);
807 copy_skb = dev_alloc_skb(len + 2);
808 if (copy_skb == NULL)
809 goto drop_it_no_recycle;
810
811 copy_skb->dev = bp->dev;
812 skb_reserve(copy_skb, 2);
813 skb_put(copy_skb, len);
814 /* DMA sync done above, copy just the actual packet */
815 memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
816
817 skb = copy_skb;
818 }
819 skb->ip_summed = CHECKSUM_NONE;
820 skb->protocol = eth_type_trans(skb, bp->dev);
821 netif_receive_skb(skb);
822 bp->dev->last_rx = jiffies;
823 received++;
824 budget--;
825 next_pkt:
826 bp->rx_prod = (bp->rx_prod + 1) &
827 (B44_RX_RING_SIZE - 1);
828 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
829 }
830
831 bp->rx_cons = cons;
832 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
833
834 return received;
835}
836
837static int b44_poll(struct net_device *netdev, int *budget)
838{
839 struct b44 *bp = netdev_priv(netdev);
840 int done;
841
842 spin_lock_irq(&bp->lock);
843
844 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
845 /* spin_lock(&bp->tx_lock); */
846 b44_tx(bp);
847 /* spin_unlock(&bp->tx_lock); */
848 }
849 spin_unlock_irq(&bp->lock);
850
851 done = 1;
852 if (bp->istat & ISTAT_RX) {
853 int orig_budget = *budget;
854 int work_done;
855
856 if (orig_budget > netdev->quota)
857 orig_budget = netdev->quota;
858
859 work_done = b44_rx(bp, orig_budget);
860
861 *budget -= work_done;
862 netdev->quota -= work_done;
863
864 if (work_done >= orig_budget)
865 done = 0;
866 }
867
868 if (bp->istat & ISTAT_ERRORS) {
869 spin_lock_irq(&bp->lock);
870 b44_halt(bp);
871 b44_init_rings(bp);
872 b44_init_hw(bp);
873 netif_wake_queue(bp->dev);
874 spin_unlock_irq(&bp->lock);
875 done = 1;
876 }
877
878 if (done) {
879 netif_rx_complete(netdev);
880 b44_enable_ints(bp);
881 }
882
883 return (done ? 0 : 1);
884}
885
886static irqreturn_t b44_interrupt(int irq, void *dev_id, struct pt_regs *regs)
887{
888 struct net_device *dev = dev_id;
889 struct b44 *bp = netdev_priv(dev);
1da177e4
LT
890 u32 istat, imask;
891 int handled = 0;
892
65b984f2 893 spin_lock(&bp->lock);
1da177e4
LT
894
895 istat = br32(bp, B44_ISTAT);
896 imask = br32(bp, B44_IMASK);
897
898 /* ??? What the fuck is the purpose of the interrupt mask
899 * ??? register if we have to mask it out by hand anyways?
900 */
901 istat &= imask;
902 if (istat) {
903 handled = 1;
ba5eec9c
FR
904
905 if (unlikely(!netif_running(dev))) {
906 printk(KERN_INFO "%s: late interrupt.\n", dev->name);
907 goto irq_ack;
908 }
909
1da177e4
LT
910 if (netif_rx_schedule_prep(dev)) {
911 /* NOTE: These writes are posted by the readback of
912 * the ISTAT register below.
913 */
914 bp->istat = istat;
915 __b44_disable_ints(bp);
916 __netif_rx_schedule(dev);
917 } else {
918 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
919 dev->name);
920 }
921
ba5eec9c 922irq_ack:
1da177e4
LT
923 bw32(bp, B44_ISTAT, istat);
924 br32(bp, B44_ISTAT);
925 }
65b984f2 926 spin_unlock(&bp->lock);
1da177e4
LT
927 return IRQ_RETVAL(handled);
928}
929
930static void b44_tx_timeout(struct net_device *dev)
931{
932 struct b44 *bp = netdev_priv(dev);
933
934 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
935 dev->name);
936
937 spin_lock_irq(&bp->lock);
938
939 b44_halt(bp);
940 b44_init_rings(bp);
941 b44_init_hw(bp);
942
943 spin_unlock_irq(&bp->lock);
944
945 b44_enable_ints(bp);
946
947 netif_wake_queue(dev);
948}
949
950static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
951{
952 struct b44 *bp = netdev_priv(dev);
953 struct sk_buff *bounce_skb;
c7193693 954 int rc = NETDEV_TX_OK;
1da177e4
LT
955 dma_addr_t mapping;
956 u32 len, entry, ctrl;
957
958 len = skb->len;
959 spin_lock_irq(&bp->lock);
960
961 /* This is a hard error, log it. */
962 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
963 netif_stop_queue(dev);
1da177e4
LT
964 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
965 dev->name);
c7193693 966 goto err_out;
1da177e4
LT
967 }
968
969 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
874a6214 970 if (mapping + len > B44_DMA_MASK) {
1da177e4
LT
971 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
972 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
973
974 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
975 GFP_ATOMIC|GFP_DMA);
976 if (!bounce_skb)
c7193693 977 goto err_out;
1da177e4
LT
978
979 mapping = pci_map_single(bp->pdev, bounce_skb->data,
980 len, PCI_DMA_TODEVICE);
874a6214 981 if (mapping + len > B44_DMA_MASK) {
1da177e4
LT
982 pci_unmap_single(bp->pdev, mapping,
983 len, PCI_DMA_TODEVICE);
984 dev_kfree_skb_any(bounce_skb);
c7193693 985 goto err_out;
1da177e4
LT
986 }
987
988 memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
989 dev_kfree_skb_any(skb);
990 skb = bounce_skb;
991 }
992
993 entry = bp->tx_prod;
994 bp->tx_buffers[entry].skb = skb;
995 pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
996
997 ctrl = (len & DESC_CTRL_LEN);
998 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
999 if (entry == (B44_TX_RING_SIZE - 1))
1000 ctrl |= DESC_CTRL_EOT;
1001
1002 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1003 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1004
9f38c636
JL
1005 if (bp->flags & B44_FLAG_TX_RING_HACK)
1006 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1007 entry * sizeof(bp->tx_ring[0]),
1008 DMA_TO_DEVICE);
1009
1da177e4
LT
1010 entry = NEXT_TX(entry);
1011
1012 bp->tx_prod = entry;
1013
1014 wmb();
1015
1016 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1017 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1018 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1019 if (bp->flags & B44_FLAG_REORDER_BUG)
1020 br32(bp, B44_DMATX_PTR);
1021
1022 if (TX_BUFFS_AVAIL(bp) < 1)
1023 netif_stop_queue(dev);
1024
c7193693
FR
1025 dev->trans_start = jiffies;
1026
1027out_unlock:
1da177e4
LT
1028 spin_unlock_irq(&bp->lock);
1029
c7193693 1030 return rc;
1da177e4 1031
c7193693
FR
1032err_out:
1033 rc = NETDEV_TX_BUSY;
1034 goto out_unlock;
1da177e4
LT
1035}
1036
1037static int b44_change_mtu(struct net_device *dev, int new_mtu)
1038{
1039 struct b44 *bp = netdev_priv(dev);
1040
1041 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1042 return -EINVAL;
1043
1044 if (!netif_running(dev)) {
1045 /* We'll just catch it later when the
1046 * device is up'd.
1047 */
1048 dev->mtu = new_mtu;
1049 return 0;
1050 }
1051
1052 spin_lock_irq(&bp->lock);
1053 b44_halt(bp);
1054 dev->mtu = new_mtu;
1055 b44_init_rings(bp);
1056 b44_init_hw(bp);
1057 spin_unlock_irq(&bp->lock);
1058
1059 b44_enable_ints(bp);
1060
1061 return 0;
1062}
1063
1064/* Free up pending packets in all rx/tx rings.
1065 *
1066 * The chip has been shut down and the driver detached from
1067 * the networking, so no interrupts or new tx packets will
1068 * end up in the driver. bp->lock is not held and we are not
1069 * in an interrupt context and thus may sleep.
1070 */
1071static void b44_free_rings(struct b44 *bp)
1072{
1073 struct ring_info *rp;
1074 int i;
1075
1076 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1077 rp = &bp->rx_buffers[i];
1078
1079 if (rp->skb == NULL)
1080 continue;
1081 pci_unmap_single(bp->pdev,
1082 pci_unmap_addr(rp, mapping),
1083 RX_PKT_BUF_SZ,
1084 PCI_DMA_FROMDEVICE);
1085 dev_kfree_skb_any(rp->skb);
1086 rp->skb = NULL;
1087 }
1088
1089 /* XXX needs changes once NETIF_F_SG is set... */
1090 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1091 rp = &bp->tx_buffers[i];
1092
1093 if (rp->skb == NULL)
1094 continue;
1095 pci_unmap_single(bp->pdev,
1096 pci_unmap_addr(rp, mapping),
1097 rp->skb->len,
1098 PCI_DMA_TODEVICE);
1099 dev_kfree_skb_any(rp->skb);
1100 rp->skb = NULL;
1101 }
1102}
1103
1104/* Initialize tx/rx rings for packet processing.
1105 *
1106 * The chip has been shut down and the driver detached from
1107 * the networking, so no interrupts or new tx packets will
874a6214 1108 * end up in the driver.
1da177e4
LT
1109 */
1110static void b44_init_rings(struct b44 *bp)
1111{
1112 int i;
1113
1114 b44_free_rings(bp);
1115
1116 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1117 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1118
9f38c636
JL
1119 if (bp->flags & B44_FLAG_RX_RING_HACK)
1120 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1121 DMA_TABLE_BYTES,
1122 PCI_DMA_BIDIRECTIONAL);
1123
1124 if (bp->flags & B44_FLAG_TX_RING_HACK)
1125 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1126 DMA_TABLE_BYTES,
1127 PCI_DMA_TODEVICE);
1128
1da177e4
LT
1129 for (i = 0; i < bp->rx_pending; i++) {
1130 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1131 break;
1132 }
1133}
1134
1135/*
1136 * Must not be invoked with interrupt sources disabled and
1137 * the hardware shutdown down.
1138 */
1139static void b44_free_consistent(struct b44 *bp)
1140{
b4558ea9
JJ
1141 kfree(bp->rx_buffers);
1142 bp->rx_buffers = NULL;
1143 kfree(bp->tx_buffers);
1144 bp->tx_buffers = NULL;
1da177e4 1145 if (bp->rx_ring) {
9f38c636
JL
1146 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1147 dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1148 DMA_TABLE_BYTES,
1149 DMA_BIDIRECTIONAL);
1150 kfree(bp->rx_ring);
1151 } else
1152 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1153 bp->rx_ring, bp->rx_ring_dma);
1da177e4 1154 bp->rx_ring = NULL;
9f38c636 1155 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1da177e4
LT
1156 }
1157 if (bp->tx_ring) {
9f38c636
JL
1158 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1159 dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1160 DMA_TABLE_BYTES,
1161 DMA_TO_DEVICE);
1162 kfree(bp->tx_ring);
1163 } else
1164 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1165 bp->tx_ring, bp->tx_ring_dma);
1da177e4 1166 bp->tx_ring = NULL;
9f38c636 1167 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1da177e4
LT
1168 }
1169}
1170
1171/*
1172 * Must not be invoked with interrupt sources disabled and
1173 * the hardware shutdown down. Can sleep.
1174 */
1175static int b44_alloc_consistent(struct b44 *bp)
1176{
1177 int size;
1178
1179 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
874a6214 1180 bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1da177e4
LT
1181 if (!bp->rx_buffers)
1182 goto out_err;
1da177e4
LT
1183
1184 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
874a6214 1185 bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1da177e4
LT
1186 if (!bp->tx_buffers)
1187 goto out_err;
1da177e4
LT
1188
1189 size = DMA_TABLE_BYTES;
1190 bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
9f38c636
JL
1191 if (!bp->rx_ring) {
1192 /* Allocation may have failed due to pci_alloc_consistent
1193 insisting on use of GFP_DMA, which is more restrictive
1194 than necessary... */
1195 struct dma_desc *rx_ring;
1196 dma_addr_t rx_ring_dma;
1197
874a6214
FR
1198 rx_ring = kzalloc(size, GFP_KERNEL);
1199 if (!rx_ring)
9f38c636
JL
1200 goto out_err;
1201
9f38c636
JL
1202 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1203 DMA_TABLE_BYTES,
1204 DMA_BIDIRECTIONAL);
1205
1206 if (rx_ring_dma + size > B44_DMA_MASK) {
1207 kfree(rx_ring);
1208 goto out_err;
1209 }
1210
1211 bp->rx_ring = rx_ring;
1212 bp->rx_ring_dma = rx_ring_dma;
1213 bp->flags |= B44_FLAG_RX_RING_HACK;
1214 }
1da177e4
LT
1215
1216 bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
9f38c636
JL
1217 if (!bp->tx_ring) {
1218 /* Allocation may have failed due to pci_alloc_consistent
1219 insisting on use of GFP_DMA, which is more restrictive
1220 than necessary... */
1221 struct dma_desc *tx_ring;
1222 dma_addr_t tx_ring_dma;
1223
874a6214
FR
1224 tx_ring = kzalloc(size, GFP_KERNEL);
1225 if (!tx_ring)
9f38c636
JL
1226 goto out_err;
1227
9f38c636
JL
1228 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1229 DMA_TABLE_BYTES,
1230 DMA_TO_DEVICE);
1231
1232 if (tx_ring_dma + size > B44_DMA_MASK) {
1233 kfree(tx_ring);
1234 goto out_err;
1235 }
1236
1237 bp->tx_ring = tx_ring;
1238 bp->tx_ring_dma = tx_ring_dma;
1239 bp->flags |= B44_FLAG_TX_RING_HACK;
1240 }
1da177e4
LT
1241
1242 return 0;
1243
1244out_err:
1245 b44_free_consistent(bp);
1246 return -ENOMEM;
1247}
1248
1249/* bp->lock is held. */
1250static void b44_clear_stats(struct b44 *bp)
1251{
1252 unsigned long reg;
1253
1254 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1255 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1256 br32(bp, reg);
1257 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1258 br32(bp, reg);
1259}
1260
1261/* bp->lock is held. */
1262static void b44_chip_reset(struct b44 *bp)
1263{
1264 if (ssb_is_core_up(bp)) {
1265 bw32(bp, B44_RCV_LAZY, 0);
1266 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1267 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
1268 bw32(bp, B44_DMATX_CTRL, 0);
1269 bp->tx_prod = bp->tx_cons = 0;
1270 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1271 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1272 100, 0);
1273 }
1274 bw32(bp, B44_DMARX_CTRL, 0);
1275 bp->rx_prod = bp->rx_cons = 0;
1276 } else {
1277 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1278 SBINTVEC_ENET0 :
1279 SBINTVEC_ENET1));
1280 }
1281
1282 ssb_core_reset(bp);
1283
1284 b44_clear_stats(bp);
1285
1286 /* Make PHY accessible. */
1287 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1288 (0x0d & MDIO_CTRL_MAXF_MASK)));
1289 br32(bp, B44_MDIO_CTRL);
1290
1291 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1292 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1293 br32(bp, B44_ENET_CTRL);
1294 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1295 } else {
1296 u32 val = br32(bp, B44_DEVCTRL);
1297
1298 if (val & DEVCTRL_EPR) {
1299 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1300 br32(bp, B44_DEVCTRL);
1301 udelay(100);
1302 }
1303 bp->flags |= B44_FLAG_INTERNAL_PHY;
1304 }
1305}
1306
1307/* bp->lock is held. */
1308static void b44_halt(struct b44 *bp)
1309{
1310 b44_disable_ints(bp);
1311 b44_chip_reset(bp);
1312}
1313
1314/* bp->lock is held. */
1315static void __b44_set_mac_addr(struct b44 *bp)
1316{
1317 bw32(bp, B44_CAM_CTRL, 0);
1318 if (!(bp->dev->flags & IFF_PROMISC)) {
1319 u32 val;
1320
1321 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1322 val = br32(bp, B44_CAM_CTRL);
1323 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1324 }
1325}
1326
1327static int b44_set_mac_addr(struct net_device *dev, void *p)
1328{
1329 struct b44 *bp = netdev_priv(dev);
1330 struct sockaddr *addr = p;
1331
1332 if (netif_running(dev))
1333 return -EBUSY;
1334
391fc09a
GZ
1335 if (!is_valid_ether_addr(addr->sa_data))
1336 return -EINVAL;
1337
1da177e4
LT
1338 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1339
1340 spin_lock_irq(&bp->lock);
1341 __b44_set_mac_addr(bp);
1342 spin_unlock_irq(&bp->lock);
1343
1344 return 0;
1345}
1346
1347/* Called at device open time to get the chip ready for
1348 * packet processing. Invoked with bp->lock held.
1349 */
1350static void __b44_set_rx_mode(struct net_device *);
1351static void b44_init_hw(struct b44 *bp)
1352{
1353 u32 val;
1354
1355 b44_chip_reset(bp);
1356 b44_phy_reset(bp);
1357 b44_setup_phy(bp);
1358
1359 /* Enable CRC32, set proper LED modes and power on PHY */
1360 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1361 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1362
1363 /* This sets the MAC address too. */
1364 __b44_set_rx_mode(bp->dev);
1365
1366 /* MTU + eth header + possible VLAN tag + struct rx_header */
1367 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1368 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1369
1370 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1371 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1372 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1373 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1374 (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1375 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1376
1377 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1378 bp->rx_prod = bp->rx_pending;
1379
1380 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1381
1382 val = br32(bp, B44_ENET_CTRL);
1383 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1384}
1385
1386static int b44_open(struct net_device *dev)
1387{
1388 struct b44 *bp = netdev_priv(dev);
1389 int err;
1390
1391 err = b44_alloc_consistent(bp);
1392 if (err)
6c2f4267 1393 goto out;
1da177e4
LT
1394
1395 b44_init_rings(bp);
1396 b44_init_hw(bp);
1da177e4 1397
e254e9bf
JL
1398 b44_check_phy(bp);
1399
6c2f4267
FR
1400 err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev);
1401 if (unlikely(err < 0)) {
1402 b44_chip_reset(bp);
1403 b44_free_rings(bp);
1404 b44_free_consistent(bp);
1405 goto out;
1406 }
1da177e4
LT
1407
1408 init_timer(&bp->timer);
1409 bp->timer.expires = jiffies + HZ;
1410 bp->timer.data = (unsigned long) bp;
1411 bp->timer.function = b44_timer;
1412 add_timer(&bp->timer);
1413
1414 b44_enable_ints(bp);
d9e2d185 1415 netif_start_queue(dev);
6c2f4267 1416out:
1da177e4
LT
1417 return err;
1418}
1419
1420#if 0
1421/*static*/ void b44_dump_state(struct b44 *bp)
1422{
1423 u32 val32, val32_2, val32_3, val32_4, val32_5;
1424 u16 val16;
1425
1426 pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1427 printk("DEBUG: PCI status [%04x] \n", val16);
1428
1429}
1430#endif
1431
1432#ifdef CONFIG_NET_POLL_CONTROLLER
1433/*
1434 * Polling receive - used by netconsole and other diagnostic tools
1435 * to allow network i/o with interrupts disabled.
1436 */
1437static void b44_poll_controller(struct net_device *dev)
1438{
1439 disable_irq(dev->irq);
1440 b44_interrupt(dev->irq, dev, NULL);
1441 enable_irq(dev->irq);
1442}
1443#endif
1444
1445static int b44_close(struct net_device *dev)
1446{
1447 struct b44 *bp = netdev_priv(dev);
1448
1449 netif_stop_queue(dev);
1450
ba5eec9c
FR
1451 netif_poll_disable(dev);
1452
1da177e4
LT
1453 del_timer_sync(&bp->timer);
1454
1455 spin_lock_irq(&bp->lock);
1456
1457#if 0
1458 b44_dump_state(bp);
1459#endif
1460 b44_halt(bp);
1461 b44_free_rings(bp);
c35ca399 1462 netif_carrier_off(dev);
1da177e4
LT
1463
1464 spin_unlock_irq(&bp->lock);
1465
1466 free_irq(dev->irq, dev);
1467
ba5eec9c
FR
1468 netif_poll_enable(dev);
1469
1da177e4
LT
1470 b44_free_consistent(bp);
1471
1472 return 0;
1473}
1474
1475static struct net_device_stats *b44_get_stats(struct net_device *dev)
1476{
1477 struct b44 *bp = netdev_priv(dev);
1478 struct net_device_stats *nstat = &bp->stats;
1479 struct b44_hw_stats *hwstat = &bp->hw_stats;
1480
1481 /* Convert HW stats into netdevice stats. */
1482 nstat->rx_packets = hwstat->rx_pkts;
1483 nstat->tx_packets = hwstat->tx_pkts;
1484 nstat->rx_bytes = hwstat->rx_octets;
1485 nstat->tx_bytes = hwstat->tx_octets;
1486 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1487 hwstat->tx_oversize_pkts +
1488 hwstat->tx_underruns +
1489 hwstat->tx_excessive_cols +
1490 hwstat->tx_late_cols);
1491 nstat->multicast = hwstat->tx_multicast_pkts;
1492 nstat->collisions = hwstat->tx_total_cols;
1493
1494 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1495 hwstat->rx_undersize);
1496 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1497 nstat->rx_frame_errors = hwstat->rx_align_errs;
1498 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1499 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1500 hwstat->rx_oversize_pkts +
1501 hwstat->rx_missed_pkts +
1502 hwstat->rx_crc_align_errs +
1503 hwstat->rx_undersize +
1504 hwstat->rx_crc_errs +
1505 hwstat->rx_align_errs +
1506 hwstat->rx_symbol_errs);
1507
1508 nstat->tx_aborted_errors = hwstat->tx_underruns;
1509#if 0
1510 /* Carrier lost counter seems to be broken for some devices */
1511 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1512#endif
1513
1514 return nstat;
1515}
1516
1517static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1518{
1519 struct dev_mc_list *mclist;
1520 int i, num_ents;
1521
1522 num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1523 mclist = dev->mc_list;
1524 for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1525 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1526 }
1527 return i+1;
1528}
1529
1530static void __b44_set_rx_mode(struct net_device *dev)
1531{
1532 struct b44 *bp = netdev_priv(dev);
1533 u32 val;
1da177e4
LT
1534
1535 val = br32(bp, B44_RXCONFIG);
1536 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1537 if (dev->flags & IFF_PROMISC) {
1538 val |= RXCONFIG_PROMISC;
1539 bw32(bp, B44_RXCONFIG, val);
1540 } else {
874a6214
FR
1541 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1542 int i = 0;
1543
1da177e4
LT
1544 __b44_set_mac_addr(bp);
1545
1546 if (dev->flags & IFF_ALLMULTI)
1547 val |= RXCONFIG_ALLMULTI;
1548 else
874a6214 1549 i = __b44_load_mcast(bp, dev);
1da177e4 1550
874a6214 1551 for (; i < 64; i++) {
1da177e4
LT
1552 __b44_cam_write(bp, zero, i);
1553 }
1554 bw32(bp, B44_RXCONFIG, val);
1555 val = br32(bp, B44_CAM_CTRL);
1556 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1557 }
1558}
1559
1560static void b44_set_rx_mode(struct net_device *dev)
1561{
1562 struct b44 *bp = netdev_priv(dev);
1563
1564 spin_lock_irq(&bp->lock);
1565 __b44_set_rx_mode(dev);
1566 spin_unlock_irq(&bp->lock);
1567}
1568
1569static u32 b44_get_msglevel(struct net_device *dev)
1570{
1571 struct b44 *bp = netdev_priv(dev);
1572 return bp->msg_enable;
1573}
1574
1575static void b44_set_msglevel(struct net_device *dev, u32 value)
1576{
1577 struct b44 *bp = netdev_priv(dev);
1578 bp->msg_enable = value;
1579}
1580
1581static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1582{
1583 struct b44 *bp = netdev_priv(dev);
1584 struct pci_dev *pci_dev = bp->pdev;
1585
1586 strcpy (info->driver, DRV_MODULE_NAME);
1587 strcpy (info->version, DRV_MODULE_VERSION);
1588 strcpy (info->bus_info, pci_name(pci_dev));
1589}
1590
1591static int b44_nway_reset(struct net_device *dev)
1592{
1593 struct b44 *bp = netdev_priv(dev);
1594 u32 bmcr;
1595 int r;
1596
1597 spin_lock_irq(&bp->lock);
1598 b44_readphy(bp, MII_BMCR, &bmcr);
1599 b44_readphy(bp, MII_BMCR, &bmcr);
1600 r = -EINVAL;
1601 if (bmcr & BMCR_ANENABLE) {
1602 b44_writephy(bp, MII_BMCR,
1603 bmcr | BMCR_ANRESTART);
1604 r = 0;
1605 }
1606 spin_unlock_irq(&bp->lock);
1607
1608 return r;
1609}
1610
1611static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1612{
1613 struct b44 *bp = netdev_priv(dev);
1614
b9dcbb40 1615 if (!netif_running(dev))
1da177e4
LT
1616 return -EAGAIN;
1617 cmd->supported = (SUPPORTED_Autoneg);
1618 cmd->supported |= (SUPPORTED_100baseT_Half |
1619 SUPPORTED_100baseT_Full |
1620 SUPPORTED_10baseT_Half |
1621 SUPPORTED_10baseT_Full |
1622 SUPPORTED_MII);
1623
1624 cmd->advertising = 0;
1625 if (bp->flags & B44_FLAG_ADV_10HALF)
adf6e000 1626 cmd->advertising |= ADVERTISED_10baseT_Half;
1da177e4 1627 if (bp->flags & B44_FLAG_ADV_10FULL)
adf6e000 1628 cmd->advertising |= ADVERTISED_10baseT_Full;
1da177e4 1629 if (bp->flags & B44_FLAG_ADV_100HALF)
adf6e000 1630 cmd->advertising |= ADVERTISED_100baseT_Half;
1da177e4 1631 if (bp->flags & B44_FLAG_ADV_100FULL)
adf6e000
MW
1632 cmd->advertising |= ADVERTISED_100baseT_Full;
1633 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1da177e4
LT
1634 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1635 SPEED_100 : SPEED_10;
1636 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1637 DUPLEX_FULL : DUPLEX_HALF;
1638 cmd->port = 0;
1639 cmd->phy_address = bp->phy_addr;
1640 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1641 XCVR_INTERNAL : XCVR_EXTERNAL;
1642 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1643 AUTONEG_DISABLE : AUTONEG_ENABLE;
1644 cmd->maxtxpkt = 0;
1645 cmd->maxrxpkt = 0;
1646 return 0;
1647}
1648
1649static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1650{
1651 struct b44 *bp = netdev_priv(dev);
1652
b9dcbb40 1653 if (!netif_running(dev))
1da177e4
LT
1654 return -EAGAIN;
1655
1656 /* We do not support gigabit. */
1657 if (cmd->autoneg == AUTONEG_ENABLE) {
1658 if (cmd->advertising &
1659 (ADVERTISED_1000baseT_Half |
1660 ADVERTISED_1000baseT_Full))
1661 return -EINVAL;
1662 } else if ((cmd->speed != SPEED_100 &&
1663 cmd->speed != SPEED_10) ||
1664 (cmd->duplex != DUPLEX_HALF &&
1665 cmd->duplex != DUPLEX_FULL)) {
1666 return -EINVAL;
1667 }
1668
1669 spin_lock_irq(&bp->lock);
1670
1671 if (cmd->autoneg == AUTONEG_ENABLE) {
1672 bp->flags &= ~B44_FLAG_FORCE_LINK;
1673 bp->flags &= ~(B44_FLAG_ADV_10HALF |
1674 B44_FLAG_ADV_10FULL |
1675 B44_FLAG_ADV_100HALF |
1676 B44_FLAG_ADV_100FULL);
1677 if (cmd->advertising & ADVERTISE_10HALF)
1678 bp->flags |= B44_FLAG_ADV_10HALF;
1679 if (cmd->advertising & ADVERTISE_10FULL)
1680 bp->flags |= B44_FLAG_ADV_10FULL;
1681 if (cmd->advertising & ADVERTISE_100HALF)
1682 bp->flags |= B44_FLAG_ADV_100HALF;
1683 if (cmd->advertising & ADVERTISE_100FULL)
1684 bp->flags |= B44_FLAG_ADV_100FULL;
1685 } else {
1686 bp->flags |= B44_FLAG_FORCE_LINK;
1687 if (cmd->speed == SPEED_100)
1688 bp->flags |= B44_FLAG_100_BASE_T;
1689 if (cmd->duplex == DUPLEX_FULL)
1690 bp->flags |= B44_FLAG_FULL_DUPLEX;
1691 }
1692
1693 b44_setup_phy(bp);
1694
1695 spin_unlock_irq(&bp->lock);
1696
1697 return 0;
1698}
1699
1700static void b44_get_ringparam(struct net_device *dev,
1701 struct ethtool_ringparam *ering)
1702{
1703 struct b44 *bp = netdev_priv(dev);
1704
1705 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1706 ering->rx_pending = bp->rx_pending;
1707
1708 /* XXX ethtool lacks a tx_max_pending, oops... */
1709}
1710
1711static int b44_set_ringparam(struct net_device *dev,
1712 struct ethtool_ringparam *ering)
1713{
1714 struct b44 *bp = netdev_priv(dev);
1715
1716 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1717 (ering->rx_mini_pending != 0) ||
1718 (ering->rx_jumbo_pending != 0) ||
1719 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1720 return -EINVAL;
1721
1722 spin_lock_irq(&bp->lock);
1723
1724 bp->rx_pending = ering->rx_pending;
1725 bp->tx_pending = ering->tx_pending;
1726
1727 b44_halt(bp);
1728 b44_init_rings(bp);
1729 b44_init_hw(bp);
1730 netif_wake_queue(bp->dev);
1731 spin_unlock_irq(&bp->lock);
1732
1733 b44_enable_ints(bp);
1734
1735 return 0;
1736}
1737
1738static void b44_get_pauseparam(struct net_device *dev,
1739 struct ethtool_pauseparam *epause)
1740{
1741 struct b44 *bp = netdev_priv(dev);
1742
1743 epause->autoneg =
1744 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1745 epause->rx_pause =
1746 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1747 epause->tx_pause =
1748 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1749}
1750
1751static int b44_set_pauseparam(struct net_device *dev,
1752 struct ethtool_pauseparam *epause)
1753{
1754 struct b44 *bp = netdev_priv(dev);
1755
1756 spin_lock_irq(&bp->lock);
1757 if (epause->autoneg)
1758 bp->flags |= B44_FLAG_PAUSE_AUTO;
1759 else
1760 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1761 if (epause->rx_pause)
1762 bp->flags |= B44_FLAG_RX_PAUSE;
1763 else
1764 bp->flags &= ~B44_FLAG_RX_PAUSE;
1765 if (epause->tx_pause)
1766 bp->flags |= B44_FLAG_TX_PAUSE;
1767 else
1768 bp->flags &= ~B44_FLAG_TX_PAUSE;
1769 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1770 b44_halt(bp);
1771 b44_init_rings(bp);
1772 b44_init_hw(bp);
1773 } else {
1774 __b44_set_flow_ctrl(bp, bp->flags);
1775 }
1776 spin_unlock_irq(&bp->lock);
1777
1778 b44_enable_ints(bp);
1779
1780 return 0;
1781}
1782
3353930d
FR
1783static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1784{
1785 switch(stringset) {
1786 case ETH_SS_STATS:
1787 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1788 break;
1789 }
1790}
1791
1792static int b44_get_stats_count(struct net_device *dev)
1793{
1794 return ARRAY_SIZE(b44_gstrings);
1795}
1796
1797static void b44_get_ethtool_stats(struct net_device *dev,
1798 struct ethtool_stats *stats, u64 *data)
1799{
1800 struct b44 *bp = netdev_priv(dev);
1801 u32 *val = &bp->hw_stats.tx_good_octets;
1802 u32 i;
1803
1804 spin_lock_irq(&bp->lock);
1805
1806 b44_stats_update(bp);
1807
1808 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1809 *data++ = *val++;
1810
1811 spin_unlock_irq(&bp->lock);
1812}
1813
1da177e4
LT
1814static struct ethtool_ops b44_ethtool_ops = {
1815 .get_drvinfo = b44_get_drvinfo,
1816 .get_settings = b44_get_settings,
1817 .set_settings = b44_set_settings,
1818 .nway_reset = b44_nway_reset,
1819 .get_link = ethtool_op_get_link,
1820 .get_ringparam = b44_get_ringparam,
1821 .set_ringparam = b44_set_ringparam,
1822 .get_pauseparam = b44_get_pauseparam,
1823 .set_pauseparam = b44_set_pauseparam,
1824 .get_msglevel = b44_get_msglevel,
1825 .set_msglevel = b44_set_msglevel,
3353930d
FR
1826 .get_strings = b44_get_strings,
1827 .get_stats_count = b44_get_stats_count,
1828 .get_ethtool_stats = b44_get_ethtool_stats,
2160de53 1829 .get_perm_addr = ethtool_op_get_perm_addr,
1da177e4
LT
1830};
1831
1832static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1833{
1834 struct mii_ioctl_data *data = if_mii(ifr);
1835 struct b44 *bp = netdev_priv(dev);
3410572d
FR
1836 int err = -EINVAL;
1837
1838 if (!netif_running(dev))
1839 goto out;
1da177e4
LT
1840
1841 spin_lock_irq(&bp->lock);
1842 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
1843 spin_unlock_irq(&bp->lock);
3410572d 1844out:
1da177e4
LT
1845 return err;
1846}
1847
1848/* Read 128-bytes of EEPROM. */
1849static int b44_read_eeprom(struct b44 *bp, u8 *data)
1850{
1851 long i;
1852 u16 *ptr = (u16 *) data;
1853
1854 for (i = 0; i < 128; i += 2)
1855 ptr[i / 2] = readw(bp->regs + 4096 + i);
1856
1857 return 0;
1858}
1859
1860static int __devinit b44_get_invariants(struct b44 *bp)
1861{
1862 u8 eeprom[128];
1863 int err;
1864
1865 err = b44_read_eeprom(bp, &eeprom[0]);
1866 if (err)
1867 goto out;
1868
1869 bp->dev->dev_addr[0] = eeprom[79];
1870 bp->dev->dev_addr[1] = eeprom[78];
1871 bp->dev->dev_addr[2] = eeprom[81];
1872 bp->dev->dev_addr[3] = eeprom[80];
1873 bp->dev->dev_addr[4] = eeprom[83];
1874 bp->dev->dev_addr[5] = eeprom[82];
391fc09a
GZ
1875
1876 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
1877 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
1878 return -EINVAL;
1879 }
1880
2160de53 1881 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
1da177e4
LT
1882
1883 bp->phy_addr = eeprom[90] & 0x1f;
1884
1885 /* With this, plus the rx_header prepended to the data by the
1886 * hardware, we'll land the ethernet header on a 2-byte boundary.
1887 */
1888 bp->rx_offset = 30;
1889
1890 bp->imask = IMASK_DEF;
1891
1892 bp->core_unit = ssb_core_unit(bp);
1893 bp->dma_offset = SB_PCI_DMA;
1894
1895 /* XXX - really required?
1896 bp->flags |= B44_FLAG_BUGGY_TXPTR;
1897 */
1898out:
1899 return err;
1900}
1901
1902static int __devinit b44_init_one(struct pci_dev *pdev,
1903 const struct pci_device_id *ent)
1904{
1905 static int b44_version_printed = 0;
1906 unsigned long b44reg_base, b44reg_len;
1907 struct net_device *dev;
1908 struct b44 *bp;
1909 int err, i;
1910
1911 if (b44_version_printed++ == 0)
1912 printk(KERN_INFO "%s", version);
1913
1914 err = pci_enable_device(pdev);
1915 if (err) {
1916 printk(KERN_ERR PFX "Cannot enable PCI device, "
1917 "aborting.\n");
1918 return err;
1919 }
1920
1921 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1922 printk(KERN_ERR PFX "Cannot find proper PCI device "
1923 "base address, aborting.\n");
1924 err = -ENODEV;
1925 goto err_out_disable_pdev;
1926 }
1927
1928 err = pci_request_regions(pdev, DRV_MODULE_NAME);
1929 if (err) {
1930 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
1931 "aborting.\n");
1932 goto err_out_disable_pdev;
1933 }
1934
1935 pci_set_master(pdev);
1936
1937 err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK);
1938 if (err) {
1939 printk(KERN_ERR PFX "No usable DMA configuration, "
1940 "aborting.\n");
1941 goto err_out_free_res;
1942 }
1943
1944 err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK);
1945 if (err) {
874a6214
FR
1946 printk(KERN_ERR PFX "No usable DMA configuration, "
1947 "aborting.\n");
1948 goto err_out_free_res;
1da177e4
LT
1949 }
1950
1951 b44reg_base = pci_resource_start(pdev, 0);
1952 b44reg_len = pci_resource_len(pdev, 0);
1953
1954 dev = alloc_etherdev(sizeof(*bp));
1955 if (!dev) {
1956 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
1957 err = -ENOMEM;
1958 goto err_out_free_res;
1959 }
1960
1961 SET_MODULE_OWNER(dev);
1962 SET_NETDEV_DEV(dev,&pdev->dev);
1963
1964 /* No interesting netdevice features in this card... */
1965 dev->features |= 0;
1966
1967 bp = netdev_priv(dev);
1968 bp->pdev = pdev;
1969 bp->dev = dev;
874a6214
FR
1970
1971 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
1da177e4
LT
1972
1973 spin_lock_init(&bp->lock);
1974
1975 bp->regs = ioremap(b44reg_base, b44reg_len);
1976 if (bp->regs == 0UL) {
1977 printk(KERN_ERR PFX "Cannot map device registers, "
1978 "aborting.\n");
1979 err = -ENOMEM;
1980 goto err_out_free_dev;
1981 }
1982
1983 bp->rx_pending = B44_DEF_RX_RING_PENDING;
1984 bp->tx_pending = B44_DEF_TX_RING_PENDING;
1985
1986 dev->open = b44_open;
1987 dev->stop = b44_close;
1988 dev->hard_start_xmit = b44_start_xmit;
1989 dev->get_stats = b44_get_stats;
1990 dev->set_multicast_list = b44_set_rx_mode;
1991 dev->set_mac_address = b44_set_mac_addr;
1992 dev->do_ioctl = b44_ioctl;
1993 dev->tx_timeout = b44_tx_timeout;
1994 dev->poll = b44_poll;
1995 dev->weight = 64;
1996 dev->watchdog_timeo = B44_TX_TIMEOUT;
1997#ifdef CONFIG_NET_POLL_CONTROLLER
1998 dev->poll_controller = b44_poll_controller;
1999#endif
2000 dev->change_mtu = b44_change_mtu;
2001 dev->irq = pdev->irq;
2002 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2003
c35ca399
SH
2004 netif_carrier_off(dev);
2005
1da177e4
LT
2006 err = b44_get_invariants(bp);
2007 if (err) {
2008 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
2009 "aborting.\n");
2010 goto err_out_iounmap;
2011 }
2012
2013 bp->mii_if.dev = dev;
2014 bp->mii_if.mdio_read = b44_mii_read;
2015 bp->mii_if.mdio_write = b44_mii_write;
2016 bp->mii_if.phy_id = bp->phy_addr;
2017 bp->mii_if.phy_id_mask = 0x1f;
2018 bp->mii_if.reg_num_mask = 0x1f;
2019
2020 /* By default, advertise all speed/duplex settings. */
2021 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2022 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2023
2024 /* By default, auto-negotiate PAUSE. */
2025 bp->flags |= B44_FLAG_PAUSE_AUTO;
2026
2027 err = register_netdev(dev);
2028 if (err) {
2029 printk(KERN_ERR PFX "Cannot register net device, "
2030 "aborting.\n");
2031 goto err_out_iounmap;
2032 }
2033
2034 pci_set_drvdata(pdev, dev);
2035
2036 pci_save_state(bp->pdev);
2037
5c513129
GZ
2038 /* Chip reset provides power to the b44 MAC & PCI cores, which
2039 * is necessary for MAC register access.
2040 */
2041 b44_chip_reset(bp);
2042
1da177e4
LT
2043 printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2044 for (i = 0; i < 6; i++)
2045 printk("%2.2x%c", dev->dev_addr[i],
2046 i == 5 ? '\n' : ':');
2047
2048 return 0;
2049
2050err_out_iounmap:
2051 iounmap(bp->regs);
2052
2053err_out_free_dev:
2054 free_netdev(dev);
2055
2056err_out_free_res:
2057 pci_release_regions(pdev);
2058
2059err_out_disable_pdev:
2060 pci_disable_device(pdev);
2061 pci_set_drvdata(pdev, NULL);
2062 return err;
2063}
2064
2065static void __devexit b44_remove_one(struct pci_dev *pdev)
2066{
2067 struct net_device *dev = pci_get_drvdata(pdev);
874a6214 2068 struct b44 *bp = netdev_priv(dev);
1da177e4 2069
874a6214
FR
2070 unregister_netdev(dev);
2071 iounmap(bp->regs);
2072 free_netdev(dev);
2073 pci_release_regions(pdev);
2074 pci_disable_device(pdev);
2075 pci_set_drvdata(pdev, NULL);
1da177e4
LT
2076}
2077
2078static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2079{
2080 struct net_device *dev = pci_get_drvdata(pdev);
2081 struct b44 *bp = netdev_priv(dev);
2082
2083 if (!netif_running(dev))
2084 return 0;
2085
2086 del_timer_sync(&bp->timer);
2087
2088 spin_lock_irq(&bp->lock);
2089
2090 b44_halt(bp);
2091 netif_carrier_off(bp->dev);
2092 netif_device_detach(bp->dev);
2093 b44_free_rings(bp);
2094
2095 spin_unlock_irq(&bp->lock);
46e17853
PM
2096
2097 free_irq(dev->irq, dev);
d58da590 2098 pci_disable_device(pdev);
1da177e4
LT
2099 return 0;
2100}
2101
2102static int b44_resume(struct pci_dev *pdev)
2103{
2104 struct net_device *dev = pci_get_drvdata(pdev);
2105 struct b44 *bp = netdev_priv(dev);
2106
2107 pci_restore_state(pdev);
d58da590
DSL
2108 pci_enable_device(pdev);
2109 pci_set_master(pdev);
1da177e4
LT
2110
2111 if (!netif_running(dev))
2112 return 0;
2113
46e17853
PM
2114 if (request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev))
2115 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2116
1da177e4
LT
2117 spin_lock_irq(&bp->lock);
2118
2119 b44_init_rings(bp);
2120 b44_init_hw(bp);
2121 netif_device_attach(bp->dev);
2122 spin_unlock_irq(&bp->lock);
2123
2124 bp->timer.expires = jiffies + HZ;
2125 add_timer(&bp->timer);
2126
2127 b44_enable_ints(bp);
d9e2d185 2128 netif_wake_queue(dev);
1da177e4
LT
2129 return 0;
2130}
2131
2132static struct pci_driver b44_driver = {
2133 .name = DRV_MODULE_NAME,
2134 .id_table = b44_pci_tbl,
2135 .probe = b44_init_one,
2136 .remove = __devexit_p(b44_remove_one),
2137 .suspend = b44_suspend,
2138 .resume = b44_resume,
2139};
2140
2141static int __init b44_init(void)
2142{
9f38c636
JL
2143 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2144
2145 /* Setup paramaters for syncing RX/TX DMA descriptors */
2146 dma_desc_align_mask = ~(dma_desc_align_size - 1);
22d4d771 2147 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
9f38c636 2148
1da177e4
LT
2149 return pci_module_init(&b44_driver);
2150}
2151
2152static void __exit b44_cleanup(void)
2153{
2154 pci_unregister_driver(&b44_driver);
2155}
2156
2157module_init(b44_init);
2158module_exit(b44_cleanup);
2159