]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/tg3.c
[TG3]: Support shutdown WoL.
[net-next-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/if_vlan.h>
37 #include <linux/ip.h>
38 #include <linux/tcp.h>
39 #include <linux/workqueue.h>
40 #include <linux/prefetch.h>
41 #include <linux/dma-mapping.h>
42
43 #include <net/checksum.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC64
51 #include <asm/idprom.h>
52 #include <asm/oplib.h>
53 #include <asm/pbm.h>
54 #endif
55
56 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57 #define TG3_VLAN_TAG_USED 1
58 #else
59 #define TG3_VLAN_TAG_USED 0
60 #endif
61
62 #ifdef NETIF_F_TSO
63 #define TG3_TSO_SUPPORT 1
64 #else
65 #define TG3_TSO_SUPPORT 0
66 #endif
67
68 #include "tg3.h"
69
70 #define DRV_MODULE_NAME         "tg3"
71 #define PFX DRV_MODULE_NAME     ": "
72 #define DRV_MODULE_VERSION      "3.49"
73 #define DRV_MODULE_RELDATE      "Feb 2, 2006"
74
75 #define TG3_DEF_MAC_MODE        0
76 #define TG3_DEF_RX_MODE         0
77 #define TG3_DEF_TX_MODE         0
78 #define TG3_DEF_MSG_ENABLE        \
79         (NETIF_MSG_DRV          | \
80          NETIF_MSG_PROBE        | \
81          NETIF_MSG_LINK         | \
82          NETIF_MSG_TIMER        | \
83          NETIF_MSG_IFDOWN       | \
84          NETIF_MSG_IFUP         | \
85          NETIF_MSG_RX_ERR       | \
86          NETIF_MSG_TX_ERR)
87
88 /* length of time before we decide the hardware is borked,
89  * and dev->tx_timeout() should be called to fix the problem
90  */
91 #define TG3_TX_TIMEOUT                  (5 * HZ)
92
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU                     60
95 #define TG3_MAX_MTU(tp) \
96         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99  * You can't change the ring sizes, but you can change where you place
100  * them in the NIC onboard memory.
101  */
102 #define TG3_RX_RING_SIZE                512
103 #define TG3_DEF_RX_RING_PENDING         200
104 #define TG3_RX_JUMBO_RING_SIZE          256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
106
107 /* Do not place this n-ring entries value into the tp struct itself,
108  * we really want to expose these constants to GCC so that modulo et
109  * al.  operations are done with shifts and masks instead of with
110  * hw multiply/modulo instructions.  Another solution would be to
111  * replace things like '% foo' with '& (foo - 1)'.
112  */
113 #define TG3_RX_RCB_RING_SIZE(tp)        \
114         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
115
116 #define TG3_TX_RING_SIZE                512
117 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
118
119 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
120                                  TG3_RX_RING_SIZE)
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122                                  TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124                                    TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
126                                  TG3_TX_RING_SIZE)
127 #define TX_BUFFS_AVAIL(TP)                                              \
128         ((TP)->tx_pending -                                             \
129          (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
130 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
134
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
137
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
141 #define TG3_NUM_TEST            6
142
143 static char version[] __devinitdata =
144         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION);
150
151 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155 static struct pci_device_id tg3_pci_tbl[] = {
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
245           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
247           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
248         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
249           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
250         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
251           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
252         { 0, }
253 };
254
255 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
256
257 static struct {
258         const char string[ETH_GSTRING_LEN];
259 } ethtool_stats_keys[TG3_NUM_STATS] = {
260         { "rx_octets" },
261         { "rx_fragments" },
262         { "rx_ucast_packets" },
263         { "rx_mcast_packets" },
264         { "rx_bcast_packets" },
265         { "rx_fcs_errors" },
266         { "rx_align_errors" },
267         { "rx_xon_pause_rcvd" },
268         { "rx_xoff_pause_rcvd" },
269         { "rx_mac_ctrl_rcvd" },
270         { "rx_xoff_entered" },
271         { "rx_frame_too_long_errors" },
272         { "rx_jabbers" },
273         { "rx_undersize_packets" },
274         { "rx_in_length_errors" },
275         { "rx_out_length_errors" },
276         { "rx_64_or_less_octet_packets" },
277         { "rx_65_to_127_octet_packets" },
278         { "rx_128_to_255_octet_packets" },
279         { "rx_256_to_511_octet_packets" },
280         { "rx_512_to_1023_octet_packets" },
281         { "rx_1024_to_1522_octet_packets" },
282         { "rx_1523_to_2047_octet_packets" },
283         { "rx_2048_to_4095_octet_packets" },
284         { "rx_4096_to_8191_octet_packets" },
285         { "rx_8192_to_9022_octet_packets" },
286
287         { "tx_octets" },
288         { "tx_collisions" },
289
290         { "tx_xon_sent" },
291         { "tx_xoff_sent" },
292         { "tx_flow_control" },
293         { "tx_mac_errors" },
294         { "tx_single_collisions" },
295         { "tx_mult_collisions" },
296         { "tx_deferred" },
297         { "tx_excessive_collisions" },
298         { "tx_late_collisions" },
299         { "tx_collide_2times" },
300         { "tx_collide_3times" },
301         { "tx_collide_4times" },
302         { "tx_collide_5times" },
303         { "tx_collide_6times" },
304         { "tx_collide_7times" },
305         { "tx_collide_8times" },
306         { "tx_collide_9times" },
307         { "tx_collide_10times" },
308         { "tx_collide_11times" },
309         { "tx_collide_12times" },
310         { "tx_collide_13times" },
311         { "tx_collide_14times" },
312         { "tx_collide_15times" },
313         { "tx_ucast_packets" },
314         { "tx_mcast_packets" },
315         { "tx_bcast_packets" },
316         { "tx_carrier_sense_errors" },
317         { "tx_discards" },
318         { "tx_errors" },
319
320         { "dma_writeq_full" },
321         { "dma_write_prioq_full" },
322         { "rxbds_empty" },
323         { "rx_discards" },
324         { "rx_errors" },
325         { "rx_threshold_hit" },
326
327         { "dma_readq_full" },
328         { "dma_read_prioq_full" },
329         { "tx_comp_queue_full" },
330
331         { "ring_set_send_prod_index" },
332         { "ring_status_update" },
333         { "nic_irqs" },
334         { "nic_avoided_irqs" },
335         { "nic_tx_threshold_hit" }
336 };
337
338 static struct {
339         const char string[ETH_GSTRING_LEN];
340 } ethtool_test_keys[TG3_NUM_TEST] = {
341         { "nvram test     (online) " },
342         { "link test      (online) " },
343         { "register test  (offline)" },
344         { "memory test    (offline)" },
345         { "loopback test  (offline)" },
346         { "interrupt test (offline)" },
347 };
348
349 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
350 {
351         writel(val, tp->regs + off);
352 }
353
354 static u32 tg3_read32(struct tg3 *tp, u32 off)
355 {
356         return (readl(tp->regs + off)); 
357 }
358
359 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
360 {
361         unsigned long flags;
362
363         spin_lock_irqsave(&tp->indirect_lock, flags);
364         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
365         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
366         spin_unlock_irqrestore(&tp->indirect_lock, flags);
367 }
368
369 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
370 {
371         writel(val, tp->regs + off);
372         readl(tp->regs + off);
373 }
374
375 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
376 {
377         unsigned long flags;
378         u32 val;
379
380         spin_lock_irqsave(&tp->indirect_lock, flags);
381         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
382         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
383         spin_unlock_irqrestore(&tp->indirect_lock, flags);
384         return val;
385 }
386
387 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
388 {
389         unsigned long flags;
390
391         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
392                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
393                                        TG3_64BIT_REG_LOW, val);
394                 return;
395         }
396         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
397                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
398                                        TG3_64BIT_REG_LOW, val);
399                 return;
400         }
401
402         spin_lock_irqsave(&tp->indirect_lock, flags);
403         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
404         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
405         spin_unlock_irqrestore(&tp->indirect_lock, flags);
406
407         /* In indirect mode when disabling interrupts, we also need
408          * to clear the interrupt bit in the GRC local ctrl register.
409          */
410         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
411             (val == 0x1)) {
412                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
413                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
414         }
415 }
416
417 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
418 {
419         unsigned long flags;
420         u32 val;
421
422         spin_lock_irqsave(&tp->indirect_lock, flags);
423         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
424         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
425         spin_unlock_irqrestore(&tp->indirect_lock, flags);
426         return val;
427 }
428
429 /* usec_wait specifies the wait time in usec when writing to certain registers
430  * where it is unsafe to read back the register without some delay.
431  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
432  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
433  */
434 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
435 {
436         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
437             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
438                 /* Non-posted methods */
439                 tp->write32(tp, off, val);
440         else {
441                 /* Posted method */
442                 tg3_write32(tp, off, val);
443                 if (usec_wait)
444                         udelay(usec_wait);
445                 tp->read32(tp, off);
446         }
447         /* Wait again after the read for the posted method to guarantee that
448          * the wait time is met.
449          */
450         if (usec_wait)
451                 udelay(usec_wait);
452 }
453
454 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
455 {
456         tp->write32_mbox(tp, off, val);
457         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
458             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
459                 tp->read32_mbox(tp, off);
460 }
461
462 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
463 {
464         void __iomem *mbox = tp->regs + off;
465         writel(val, mbox);
466         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
467                 writel(val, mbox);
468         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
469                 readl(mbox);
470 }
471
472 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
473 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
474 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
475 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
476 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
477
478 #define tw32(reg,val)           tp->write32(tp, reg, val)
479 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
480 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
481 #define tr32(reg)               tp->read32(tp, reg)
482
483 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
484 {
485         unsigned long flags;
486
487         spin_lock_irqsave(&tp->indirect_lock, flags);
488         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
489         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
490
491         /* Always leave this as zero. */
492         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
493         spin_unlock_irqrestore(&tp->indirect_lock, flags);
494 }
495
496 static void tg3_write_mem_fast(struct tg3 *tp, u32 off, u32 val)
497 {
498         /* If no workaround is needed, write to mem space directly */
499         if (tp->write32 != tg3_write_indirect_reg32)
500                 tw32(NIC_SRAM_WIN_BASE + off, val);
501         else
502                 tg3_write_mem(tp, off, val);
503 }
504
505 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
506 {
507         unsigned long flags;
508
509         spin_lock_irqsave(&tp->indirect_lock, flags);
510         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
511         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
512
513         /* Always leave this as zero. */
514         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
515         spin_unlock_irqrestore(&tp->indirect_lock, flags);
516 }
517
518 static void tg3_disable_ints(struct tg3 *tp)
519 {
520         tw32(TG3PCI_MISC_HOST_CTRL,
521              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
522         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
523 }
524
525 static inline void tg3_cond_int(struct tg3 *tp)
526 {
527         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
528             (tp->hw_status->status & SD_STATUS_UPDATED))
529                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
530 }
531
532 static void tg3_enable_ints(struct tg3 *tp)
533 {
534         tp->irq_sync = 0;
535         wmb();
536
537         tw32(TG3PCI_MISC_HOST_CTRL,
538              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
539         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
540                        (tp->last_tag << 24));
541         tg3_cond_int(tp);
542 }
543
544 static inline unsigned int tg3_has_work(struct tg3 *tp)
545 {
546         struct tg3_hw_status *sblk = tp->hw_status;
547         unsigned int work_exists = 0;
548
549         /* check for phy events */
550         if (!(tp->tg3_flags &
551               (TG3_FLAG_USE_LINKCHG_REG |
552                TG3_FLAG_POLL_SERDES))) {
553                 if (sblk->status & SD_STATUS_LINK_CHG)
554                         work_exists = 1;
555         }
556         /* check for RX/TX work to do */
557         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
558             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
559                 work_exists = 1;
560
561         return work_exists;
562 }
563
564 /* tg3_restart_ints
565  *  similar to tg3_enable_ints, but it accurately determines whether there
566  *  is new work pending and can return without flushing the PIO write
567  *  which reenables interrupts 
568  */
569 static void tg3_restart_ints(struct tg3 *tp)
570 {
571         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
572                      tp->last_tag << 24);
573         mmiowb();
574
575         /* When doing tagged status, this work check is unnecessary.
576          * The last_tag we write above tells the chip which piece of
577          * work we've completed.
578          */
579         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
580             tg3_has_work(tp))
581                 tw32(HOSTCC_MODE, tp->coalesce_mode |
582                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
583 }
584
585 static inline void tg3_netif_stop(struct tg3 *tp)
586 {
587         tp->dev->trans_start = jiffies; /* prevent tx timeout */
588         netif_poll_disable(tp->dev);
589         netif_tx_disable(tp->dev);
590 }
591
592 static inline void tg3_netif_start(struct tg3 *tp)
593 {
594         netif_wake_queue(tp->dev);
595         /* NOTE: unconditional netif_wake_queue is only appropriate
596          * so long as all callers are assured to have free tx slots
597          * (such as after tg3_init_hw)
598          */
599         netif_poll_enable(tp->dev);
600         tp->hw_status->status |= SD_STATUS_UPDATED;
601         tg3_enable_ints(tp);
602 }
603
604 static void tg3_switch_clocks(struct tg3 *tp)
605 {
606         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
607         u32 orig_clock_ctrl;
608
609         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
610                 return;
611
612         orig_clock_ctrl = clock_ctrl;
613         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
614                        CLOCK_CTRL_CLKRUN_OENABLE |
615                        0x1f);
616         tp->pci_clock_ctrl = clock_ctrl;
617
618         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
619                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
620                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
621                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
622                 }
623         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
624                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
625                             clock_ctrl |
626                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
627                             40);
628                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
629                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
630                             40);
631         }
632         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
633 }
634
635 #define PHY_BUSY_LOOPS  5000
636
637 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
638 {
639         u32 frame_val;
640         unsigned int loops;
641         int ret;
642
643         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
644                 tw32_f(MAC_MI_MODE,
645                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
646                 udelay(80);
647         }
648
649         *val = 0x0;
650
651         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
652                       MI_COM_PHY_ADDR_MASK);
653         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
654                       MI_COM_REG_ADDR_MASK);
655         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
656         
657         tw32_f(MAC_MI_COM, frame_val);
658
659         loops = PHY_BUSY_LOOPS;
660         while (loops != 0) {
661                 udelay(10);
662                 frame_val = tr32(MAC_MI_COM);
663
664                 if ((frame_val & MI_COM_BUSY) == 0) {
665                         udelay(5);
666                         frame_val = tr32(MAC_MI_COM);
667                         break;
668                 }
669                 loops -= 1;
670         }
671
672         ret = -EBUSY;
673         if (loops != 0) {
674                 *val = frame_val & MI_COM_DATA_MASK;
675                 ret = 0;
676         }
677
678         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
679                 tw32_f(MAC_MI_MODE, tp->mi_mode);
680                 udelay(80);
681         }
682
683         return ret;
684 }
685
686 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
687 {
688         u32 frame_val;
689         unsigned int loops;
690         int ret;
691
692         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
693                 tw32_f(MAC_MI_MODE,
694                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
695                 udelay(80);
696         }
697
698         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
699                       MI_COM_PHY_ADDR_MASK);
700         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
701                       MI_COM_REG_ADDR_MASK);
702         frame_val |= (val & MI_COM_DATA_MASK);
703         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
704         
705         tw32_f(MAC_MI_COM, frame_val);
706
707         loops = PHY_BUSY_LOOPS;
708         while (loops != 0) {
709                 udelay(10);
710                 frame_val = tr32(MAC_MI_COM);
711                 if ((frame_val & MI_COM_BUSY) == 0) {
712                         udelay(5);
713                         frame_val = tr32(MAC_MI_COM);
714                         break;
715                 }
716                 loops -= 1;
717         }
718
719         ret = -EBUSY;
720         if (loops != 0)
721                 ret = 0;
722
723         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
724                 tw32_f(MAC_MI_MODE, tp->mi_mode);
725                 udelay(80);
726         }
727
728         return ret;
729 }
730
731 static void tg3_phy_set_wirespeed(struct tg3 *tp)
732 {
733         u32 val;
734
735         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
736                 return;
737
738         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
739             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
740                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
741                              (val | (1 << 15) | (1 << 4)));
742 }
743
744 static int tg3_bmcr_reset(struct tg3 *tp)
745 {
746         u32 phy_control;
747         int limit, err;
748
749         /* OK, reset it, and poll the BMCR_RESET bit until it
750          * clears or we time out.
751          */
752         phy_control = BMCR_RESET;
753         err = tg3_writephy(tp, MII_BMCR, phy_control);
754         if (err != 0)
755                 return -EBUSY;
756
757         limit = 5000;
758         while (limit--) {
759                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
760                 if (err != 0)
761                         return -EBUSY;
762
763                 if ((phy_control & BMCR_RESET) == 0) {
764                         udelay(40);
765                         break;
766                 }
767                 udelay(10);
768         }
769         if (limit <= 0)
770                 return -EBUSY;
771
772         return 0;
773 }
774
775 static int tg3_wait_macro_done(struct tg3 *tp)
776 {
777         int limit = 100;
778
779         while (limit--) {
780                 u32 tmp32;
781
782                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
783                         if ((tmp32 & 0x1000) == 0)
784                                 break;
785                 }
786         }
787         if (limit <= 0)
788                 return -EBUSY;
789
790         return 0;
791 }
792
793 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
794 {
795         static const u32 test_pat[4][6] = {
796         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
797         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
798         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
799         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
800         };
801         int chan;
802
803         for (chan = 0; chan < 4; chan++) {
804                 int i;
805
806                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
807                              (chan * 0x2000) | 0x0200);
808                 tg3_writephy(tp, 0x16, 0x0002);
809
810                 for (i = 0; i < 6; i++)
811                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
812                                      test_pat[chan][i]);
813
814                 tg3_writephy(tp, 0x16, 0x0202);
815                 if (tg3_wait_macro_done(tp)) {
816                         *resetp = 1;
817                         return -EBUSY;
818                 }
819
820                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
821                              (chan * 0x2000) | 0x0200);
822                 tg3_writephy(tp, 0x16, 0x0082);
823                 if (tg3_wait_macro_done(tp)) {
824                         *resetp = 1;
825                         return -EBUSY;
826                 }
827
828                 tg3_writephy(tp, 0x16, 0x0802);
829                 if (tg3_wait_macro_done(tp)) {
830                         *resetp = 1;
831                         return -EBUSY;
832                 }
833
834                 for (i = 0; i < 6; i += 2) {
835                         u32 low, high;
836
837                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
838                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
839                             tg3_wait_macro_done(tp)) {
840                                 *resetp = 1;
841                                 return -EBUSY;
842                         }
843                         low &= 0x7fff;
844                         high &= 0x000f;
845                         if (low != test_pat[chan][i] ||
846                             high != test_pat[chan][i+1]) {
847                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
848                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
849                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
850
851                                 return -EBUSY;
852                         }
853                 }
854         }
855
856         return 0;
857 }
858
859 static int tg3_phy_reset_chanpat(struct tg3 *tp)
860 {
861         int chan;
862
863         for (chan = 0; chan < 4; chan++) {
864                 int i;
865
866                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
867                              (chan * 0x2000) | 0x0200);
868                 tg3_writephy(tp, 0x16, 0x0002);
869                 for (i = 0; i < 6; i++)
870                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
871                 tg3_writephy(tp, 0x16, 0x0202);
872                 if (tg3_wait_macro_done(tp))
873                         return -EBUSY;
874         }
875
876         return 0;
877 }
878
879 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
880 {
881         u32 reg32, phy9_orig;
882         int retries, do_phy_reset, err;
883
884         retries = 10;
885         do_phy_reset = 1;
886         do {
887                 if (do_phy_reset) {
888                         err = tg3_bmcr_reset(tp);
889                         if (err)
890                                 return err;
891                         do_phy_reset = 0;
892                 }
893
894                 /* Disable transmitter and interrupt.  */
895                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
896                         continue;
897
898                 reg32 |= 0x3000;
899                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
900
901                 /* Set full-duplex, 1000 mbps.  */
902                 tg3_writephy(tp, MII_BMCR,
903                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
904
905                 /* Set to master mode.  */
906                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
907                         continue;
908
909                 tg3_writephy(tp, MII_TG3_CTRL,
910                              (MII_TG3_CTRL_AS_MASTER |
911                               MII_TG3_CTRL_ENABLE_AS_MASTER));
912
913                 /* Enable SM_DSP_CLOCK and 6dB.  */
914                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
915
916                 /* Block the PHY control access.  */
917                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
918                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
919
920                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
921                 if (!err)
922                         break;
923         } while (--retries);
924
925         err = tg3_phy_reset_chanpat(tp);
926         if (err)
927                 return err;
928
929         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
930         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
931
932         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
933         tg3_writephy(tp, 0x16, 0x0000);
934
935         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
936             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
937                 /* Set Extended packet length bit for jumbo frames */
938                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
939         }
940         else {
941                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
942         }
943
944         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
945
946         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
947                 reg32 &= ~0x3000;
948                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
949         } else if (!err)
950                 err = -EBUSY;
951
952         return err;
953 }
954
955 /* This will reset the tigon3 PHY if there is no valid
956  * link unless the FORCE argument is non-zero.
957  */
958 static int tg3_phy_reset(struct tg3 *tp)
959 {
960         u32 phy_status;
961         int err;
962
963         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
964         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
965         if (err != 0)
966                 return -EBUSY;
967
968         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
969             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
970             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
971                 err = tg3_phy_reset_5703_4_5(tp);
972                 if (err)
973                         return err;
974                 goto out;
975         }
976
977         err = tg3_bmcr_reset(tp);
978         if (err)
979                 return err;
980
981 out:
982         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
983                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
984                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
985                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
986                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
987                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
988                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
989         }
990         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
991                 tg3_writephy(tp, 0x1c, 0x8d68);
992                 tg3_writephy(tp, 0x1c, 0x8d68);
993         }
994         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
995                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
996                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
997                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
998                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
999                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1000                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1001                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1002                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1003         }
1004         /* Set Extended packet length bit (bit 14) on all chips that */
1005         /* support jumbo frames */
1006         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1007                 /* Cannot do read-modify-write on 5401 */
1008                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1009         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1010                 u32 phy_reg;
1011
1012                 /* Set bit 14 with read-modify-write to preserve other bits */
1013                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1014                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1015                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1016         }
1017
1018         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1019          * jumbo frames transmission.
1020          */
1021         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1022                 u32 phy_reg;
1023
1024                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1025                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1026                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1027         }
1028
1029         tg3_phy_set_wirespeed(tp);
1030         return 0;
1031 }
1032
1033 static void tg3_frob_aux_power(struct tg3 *tp)
1034 {
1035         struct tg3 *tp_peer = tp;
1036
1037         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1038                 return;
1039
1040         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1041             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1042                 struct net_device *dev_peer;
1043
1044                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1045                 /* remove_one() may have been run on the peer. */
1046                 if (!dev_peer)
1047                         tp_peer = tp;
1048                 else
1049                         tp_peer = netdev_priv(dev_peer);
1050         }
1051
1052         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1053             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1054             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1055             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1056                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1057                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1058                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1059                                     (GRC_LCLCTRL_GPIO_OE0 |
1060                                      GRC_LCLCTRL_GPIO_OE1 |
1061                                      GRC_LCLCTRL_GPIO_OE2 |
1062                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1063                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1064                                     100);
1065                 } else {
1066                         u32 no_gpio2;
1067                         u32 grc_local_ctrl = 0;
1068
1069                         if (tp_peer != tp &&
1070                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1071                                 return;
1072
1073                         /* Workaround to prevent overdrawing Amps. */
1074                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1075                             ASIC_REV_5714) {
1076                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1077                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1078                                             grc_local_ctrl, 100);
1079                         }
1080
1081                         /* On 5753 and variants, GPIO2 cannot be used. */
1082                         no_gpio2 = tp->nic_sram_data_cfg &
1083                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1084
1085                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1086                                          GRC_LCLCTRL_GPIO_OE1 |
1087                                          GRC_LCLCTRL_GPIO_OE2 |
1088                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1089                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1090                         if (no_gpio2) {
1091                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1092                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1093                         }
1094                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1095                                                     grc_local_ctrl, 100);
1096
1097                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1098
1099                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1100                                                     grc_local_ctrl, 100);
1101
1102                         if (!no_gpio2) {
1103                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1104                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1105                                             grc_local_ctrl, 100);
1106                         }
1107                 }
1108         } else {
1109                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1110                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1111                         if (tp_peer != tp &&
1112                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1113                                 return;
1114
1115                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1116                                     (GRC_LCLCTRL_GPIO_OE1 |
1117                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1118
1119                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1120                                     GRC_LCLCTRL_GPIO_OE1, 100);
1121
1122                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1123                                     (GRC_LCLCTRL_GPIO_OE1 |
1124                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1125                 }
1126         }
1127 }
1128
1129 static int tg3_setup_phy(struct tg3 *, int);
1130
1131 #define RESET_KIND_SHUTDOWN     0
1132 #define RESET_KIND_INIT         1
1133 #define RESET_KIND_SUSPEND      2
1134
1135 static void tg3_write_sig_post_reset(struct tg3 *, int);
1136 static int tg3_halt_cpu(struct tg3 *, u32);
1137 static int tg3_nvram_lock(struct tg3 *);
1138 static void tg3_nvram_unlock(struct tg3 *);
1139
1140 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1141 {
1142         u32 misc_host_ctrl;
1143         u16 power_control, power_caps;
1144         int pm = tp->pm_cap;
1145
1146         /* Make sure register accesses (indirect or otherwise)
1147          * will function correctly.
1148          */
1149         pci_write_config_dword(tp->pdev,
1150                                TG3PCI_MISC_HOST_CTRL,
1151                                tp->misc_host_ctrl);
1152
1153         pci_read_config_word(tp->pdev,
1154                              pm + PCI_PM_CTRL,
1155                              &power_control);
1156         power_control |= PCI_PM_CTRL_PME_STATUS;
1157         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1158         switch (state) {
1159         case PCI_D0:
1160                 power_control |= 0;
1161                 pci_write_config_word(tp->pdev,
1162                                       pm + PCI_PM_CTRL,
1163                                       power_control);
1164                 udelay(100);    /* Delay after power state change */
1165
1166                 /* Switch out of Vaux if it is not a LOM */
1167                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1168                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1169
1170                 return 0;
1171
1172         case PCI_D1:
1173                 power_control |= 1;
1174                 break;
1175
1176         case PCI_D2:
1177                 power_control |= 2;
1178                 break;
1179
1180         case PCI_D3hot:
1181                 power_control |= 3;
1182                 break;
1183
1184         default:
1185                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1186                        "requested.\n",
1187                        tp->dev->name, state);
1188                 return -EINVAL;
1189         };
1190
1191         power_control |= PCI_PM_CTRL_PME_ENABLE;
1192
1193         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1194         tw32(TG3PCI_MISC_HOST_CTRL,
1195              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1196
1197         if (tp->link_config.phy_is_low_power == 0) {
1198                 tp->link_config.phy_is_low_power = 1;
1199                 tp->link_config.orig_speed = tp->link_config.speed;
1200                 tp->link_config.orig_duplex = tp->link_config.duplex;
1201                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1202         }
1203
1204         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1205                 tp->link_config.speed = SPEED_10;
1206                 tp->link_config.duplex = DUPLEX_HALF;
1207                 tp->link_config.autoneg = AUTONEG_ENABLE;
1208                 tg3_setup_phy(tp, 0);
1209         }
1210
1211         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1212                 int i;
1213                 u32 val;
1214
1215                 for (i = 0; i < 200; i++) {
1216                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1217                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1218                                 break;
1219                         msleep(1);
1220                 }
1221         }
1222         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1223                                              WOL_DRV_STATE_SHUTDOWN |
1224                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1225
1226         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1227
1228         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1229                 u32 mac_mode;
1230
1231                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1232                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1233                         udelay(40);
1234
1235                         mac_mode = MAC_MODE_PORT_MODE_MII;
1236
1237                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1238                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1239                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1240                 } else {
1241                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1242                 }
1243
1244                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1245                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1246
1247                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1248                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1249                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1250
1251                 tw32_f(MAC_MODE, mac_mode);
1252                 udelay(100);
1253
1254                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1255                 udelay(10);
1256         }
1257
1258         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1259             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1260              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1261                 u32 base_val;
1262
1263                 base_val = tp->pci_clock_ctrl;
1264                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1265                              CLOCK_CTRL_TXCLK_DISABLE);
1266
1267                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1268                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1269         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1270                 /* do nothing */
1271         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1272                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1273                 u32 newbits1, newbits2;
1274
1275                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1276                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1277                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1278                                     CLOCK_CTRL_TXCLK_DISABLE |
1279                                     CLOCK_CTRL_ALTCLK);
1280                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1281                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1282                         newbits1 = CLOCK_CTRL_625_CORE;
1283                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1284                 } else {
1285                         newbits1 = CLOCK_CTRL_ALTCLK;
1286                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1287                 }
1288
1289                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1290                             40);
1291
1292                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1293                             40);
1294
1295                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1296                         u32 newbits3;
1297
1298                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1299                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1300                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1301                                             CLOCK_CTRL_TXCLK_DISABLE |
1302                                             CLOCK_CTRL_44MHZ_CORE);
1303                         } else {
1304                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1305                         }
1306
1307                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1308                                     tp->pci_clock_ctrl | newbits3, 40);
1309                 }
1310         }
1311
1312         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1313             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1314                 /* Turn off the PHY */
1315                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1316                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1317                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1318                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1319                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
1320                                 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1321                 }
1322         }
1323
1324         tg3_frob_aux_power(tp);
1325
1326         /* Workaround for unstable PLL clock */
1327         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1328             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1329                 u32 val = tr32(0x7d00);
1330
1331                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1332                 tw32(0x7d00, val);
1333                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1334                         int err;
1335
1336                         err = tg3_nvram_lock(tp);
1337                         tg3_halt_cpu(tp, RX_CPU_BASE);
1338                         if (!err)
1339                                 tg3_nvram_unlock(tp);
1340                 }
1341         }
1342
1343         /* Finally, set the new power state. */
1344         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1345         udelay(100);    /* Delay after power state change */
1346
1347         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1348
1349         return 0;
1350 }
1351
1352 static void tg3_link_report(struct tg3 *tp)
1353 {
1354         if (!netif_carrier_ok(tp->dev)) {
1355                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1356         } else {
1357                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1358                        tp->dev->name,
1359                        (tp->link_config.active_speed == SPEED_1000 ?
1360                         1000 :
1361                         (tp->link_config.active_speed == SPEED_100 ?
1362                          100 : 10)),
1363                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1364                         "full" : "half"));
1365
1366                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1367                        "%s for RX.\n",
1368                        tp->dev->name,
1369                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1370                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1371         }
1372 }
1373
1374 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1375 {
1376         u32 new_tg3_flags = 0;
1377         u32 old_rx_mode = tp->rx_mode;
1378         u32 old_tx_mode = tp->tx_mode;
1379
1380         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1381
1382                 /* Convert 1000BaseX flow control bits to 1000BaseT
1383                  * bits before resolving flow control.
1384                  */
1385                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1386                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1387                                        ADVERTISE_PAUSE_ASYM);
1388                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1389
1390                         if (local_adv & ADVERTISE_1000XPAUSE)
1391                                 local_adv |= ADVERTISE_PAUSE_CAP;
1392                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1393                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1394                         if (remote_adv & LPA_1000XPAUSE)
1395                                 remote_adv |= LPA_PAUSE_CAP;
1396                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1397                                 remote_adv |= LPA_PAUSE_ASYM;
1398                 }
1399
1400                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1401                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1402                                 if (remote_adv & LPA_PAUSE_CAP)
1403                                         new_tg3_flags |=
1404                                                 (TG3_FLAG_RX_PAUSE |
1405                                                 TG3_FLAG_TX_PAUSE);
1406                                 else if (remote_adv & LPA_PAUSE_ASYM)
1407                                         new_tg3_flags |=
1408                                                 (TG3_FLAG_RX_PAUSE);
1409                         } else {
1410                                 if (remote_adv & LPA_PAUSE_CAP)
1411                                         new_tg3_flags |=
1412                                                 (TG3_FLAG_RX_PAUSE |
1413                                                 TG3_FLAG_TX_PAUSE);
1414                         }
1415                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1416                         if ((remote_adv & LPA_PAUSE_CAP) &&
1417                         (remote_adv & LPA_PAUSE_ASYM))
1418                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1419                 }
1420
1421                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1422                 tp->tg3_flags |= new_tg3_flags;
1423         } else {
1424                 new_tg3_flags = tp->tg3_flags;
1425         }
1426
1427         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1428                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1429         else
1430                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1431
1432         if (old_rx_mode != tp->rx_mode) {
1433                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1434         }
1435         
1436         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1437                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1438         else
1439                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1440
1441         if (old_tx_mode != tp->tx_mode) {
1442                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1443         }
1444 }
1445
1446 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1447 {
1448         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1449         case MII_TG3_AUX_STAT_10HALF:
1450                 *speed = SPEED_10;
1451                 *duplex = DUPLEX_HALF;
1452                 break;
1453
1454         case MII_TG3_AUX_STAT_10FULL:
1455                 *speed = SPEED_10;
1456                 *duplex = DUPLEX_FULL;
1457                 break;
1458
1459         case MII_TG3_AUX_STAT_100HALF:
1460                 *speed = SPEED_100;
1461                 *duplex = DUPLEX_HALF;
1462                 break;
1463
1464         case MII_TG3_AUX_STAT_100FULL:
1465                 *speed = SPEED_100;
1466                 *duplex = DUPLEX_FULL;
1467                 break;
1468
1469         case MII_TG3_AUX_STAT_1000HALF:
1470                 *speed = SPEED_1000;
1471                 *duplex = DUPLEX_HALF;
1472                 break;
1473
1474         case MII_TG3_AUX_STAT_1000FULL:
1475                 *speed = SPEED_1000;
1476                 *duplex = DUPLEX_FULL;
1477                 break;
1478
1479         default:
1480                 *speed = SPEED_INVALID;
1481                 *duplex = DUPLEX_INVALID;
1482                 break;
1483         };
1484 }
1485
1486 static void tg3_phy_copper_begin(struct tg3 *tp)
1487 {
1488         u32 new_adv;
1489         int i;
1490
1491         if (tp->link_config.phy_is_low_power) {
1492                 /* Entering low power mode.  Disable gigabit and
1493                  * 100baseT advertisements.
1494                  */
1495                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1496
1497                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1498                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1499                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1500                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1501
1502                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1503         } else if (tp->link_config.speed == SPEED_INVALID) {
1504                 tp->link_config.advertising =
1505                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1506                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1507                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1508                          ADVERTISED_Autoneg | ADVERTISED_MII);
1509
1510                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1511                         tp->link_config.advertising &=
1512                                 ~(ADVERTISED_1000baseT_Half |
1513                                   ADVERTISED_1000baseT_Full);
1514
1515                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1516                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1517                         new_adv |= ADVERTISE_10HALF;
1518                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1519                         new_adv |= ADVERTISE_10FULL;
1520                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1521                         new_adv |= ADVERTISE_100HALF;
1522                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1523                         new_adv |= ADVERTISE_100FULL;
1524                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1525
1526                 if (tp->link_config.advertising &
1527                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1528                         new_adv = 0;
1529                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1530                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1531                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1532                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1533                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1534                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1535                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1536                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1537                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1538                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1539                 } else {
1540                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1541                 }
1542         } else {
1543                 /* Asking for a specific link mode. */
1544                 if (tp->link_config.speed == SPEED_1000) {
1545                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1546                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1547
1548                         if (tp->link_config.duplex == DUPLEX_FULL)
1549                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1550                         else
1551                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1552                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1553                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1554                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1555                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1556                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1557                 } else {
1558                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1559
1560                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1561                         if (tp->link_config.speed == SPEED_100) {
1562                                 if (tp->link_config.duplex == DUPLEX_FULL)
1563                                         new_adv |= ADVERTISE_100FULL;
1564                                 else
1565                                         new_adv |= ADVERTISE_100HALF;
1566                         } else {
1567                                 if (tp->link_config.duplex == DUPLEX_FULL)
1568                                         new_adv |= ADVERTISE_10FULL;
1569                                 else
1570                                         new_adv |= ADVERTISE_10HALF;
1571                         }
1572                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1573                 }
1574         }
1575
1576         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1577             tp->link_config.speed != SPEED_INVALID) {
1578                 u32 bmcr, orig_bmcr;
1579
1580                 tp->link_config.active_speed = tp->link_config.speed;
1581                 tp->link_config.active_duplex = tp->link_config.duplex;
1582
1583                 bmcr = 0;
1584                 switch (tp->link_config.speed) {
1585                 default:
1586                 case SPEED_10:
1587                         break;
1588
1589                 case SPEED_100:
1590                         bmcr |= BMCR_SPEED100;
1591                         break;
1592
1593                 case SPEED_1000:
1594                         bmcr |= TG3_BMCR_SPEED1000;
1595                         break;
1596                 };
1597
1598                 if (tp->link_config.duplex == DUPLEX_FULL)
1599                         bmcr |= BMCR_FULLDPLX;
1600
1601                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1602                     (bmcr != orig_bmcr)) {
1603                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1604                         for (i = 0; i < 1500; i++) {
1605                                 u32 tmp;
1606
1607                                 udelay(10);
1608                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1609                                     tg3_readphy(tp, MII_BMSR, &tmp))
1610                                         continue;
1611                                 if (!(tmp & BMSR_LSTATUS)) {
1612                                         udelay(40);
1613                                         break;
1614                                 }
1615                         }
1616                         tg3_writephy(tp, MII_BMCR, bmcr);
1617                         udelay(40);
1618                 }
1619         } else {
1620                 tg3_writephy(tp, MII_BMCR,
1621                              BMCR_ANENABLE | BMCR_ANRESTART);
1622         }
1623 }
1624
1625 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1626 {
1627         int err;
1628
1629         /* Turn off tap power management. */
1630         /* Set Extended packet length bit */
1631         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1632
1633         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1634         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1635
1636         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1637         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1638
1639         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1640         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1641
1642         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1643         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1644
1645         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1646         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1647
1648         udelay(40);
1649
1650         return err;
1651 }
1652
1653 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1654 {
1655         u32 adv_reg, all_mask;
1656
1657         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1658                 return 0;
1659
1660         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1661                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1662         if ((adv_reg & all_mask) != all_mask)
1663                 return 0;
1664         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1665                 u32 tg3_ctrl;
1666
1667                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1668                         return 0;
1669
1670                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1671                             MII_TG3_CTRL_ADV_1000_FULL);
1672                 if ((tg3_ctrl & all_mask) != all_mask)
1673                         return 0;
1674         }
1675         return 1;
1676 }
1677
1678 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1679 {
1680         int current_link_up;
1681         u32 bmsr, dummy;
1682         u16 current_speed;
1683         u8 current_duplex;
1684         int i, err;
1685
1686         tw32(MAC_EVENT, 0);
1687
1688         tw32_f(MAC_STATUS,
1689              (MAC_STATUS_SYNC_CHANGED |
1690               MAC_STATUS_CFG_CHANGED |
1691               MAC_STATUS_MI_COMPLETION |
1692               MAC_STATUS_LNKSTATE_CHANGED));
1693         udelay(40);
1694
1695         tp->mi_mode = MAC_MI_MODE_BASE;
1696         tw32_f(MAC_MI_MODE, tp->mi_mode);
1697         udelay(80);
1698
1699         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1700
1701         /* Some third-party PHYs need to be reset on link going
1702          * down.
1703          */
1704         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1705              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1706              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1707             netif_carrier_ok(tp->dev)) {
1708                 tg3_readphy(tp, MII_BMSR, &bmsr);
1709                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1710                     !(bmsr & BMSR_LSTATUS))
1711                         force_reset = 1;
1712         }
1713         if (force_reset)
1714                 tg3_phy_reset(tp);
1715
1716         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1717                 tg3_readphy(tp, MII_BMSR, &bmsr);
1718                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1719                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1720                         bmsr = 0;
1721
1722                 if (!(bmsr & BMSR_LSTATUS)) {
1723                         err = tg3_init_5401phy_dsp(tp);
1724                         if (err)
1725                                 return err;
1726
1727                         tg3_readphy(tp, MII_BMSR, &bmsr);
1728                         for (i = 0; i < 1000; i++) {
1729                                 udelay(10);
1730                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1731                                     (bmsr & BMSR_LSTATUS)) {
1732                                         udelay(40);
1733                                         break;
1734                                 }
1735                         }
1736
1737                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1738                             !(bmsr & BMSR_LSTATUS) &&
1739                             tp->link_config.active_speed == SPEED_1000) {
1740                                 err = tg3_phy_reset(tp);
1741                                 if (!err)
1742                                         err = tg3_init_5401phy_dsp(tp);
1743                                 if (err)
1744                                         return err;
1745                         }
1746                 }
1747         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1748                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1749                 /* 5701 {A0,B0} CRC bug workaround */
1750                 tg3_writephy(tp, 0x15, 0x0a75);
1751                 tg3_writephy(tp, 0x1c, 0x8c68);
1752                 tg3_writephy(tp, 0x1c, 0x8d68);
1753                 tg3_writephy(tp, 0x1c, 0x8c68);
1754         }
1755
1756         /* Clear pending interrupts... */
1757         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1758         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1759
1760         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1761                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1762         else
1763                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1764
1765         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1766             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1767                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1768                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1769                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1770                 else
1771                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1772         }
1773
1774         current_link_up = 0;
1775         current_speed = SPEED_INVALID;
1776         current_duplex = DUPLEX_INVALID;
1777
1778         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1779                 u32 val;
1780
1781                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1782                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1783                 if (!(val & (1 << 10))) {
1784                         val |= (1 << 10);
1785                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1786                         goto relink;
1787                 }
1788         }
1789
1790         bmsr = 0;
1791         for (i = 0; i < 100; i++) {
1792                 tg3_readphy(tp, MII_BMSR, &bmsr);
1793                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1794                     (bmsr & BMSR_LSTATUS))
1795                         break;
1796                 udelay(40);
1797         }
1798
1799         if (bmsr & BMSR_LSTATUS) {
1800                 u32 aux_stat, bmcr;
1801
1802                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1803                 for (i = 0; i < 2000; i++) {
1804                         udelay(10);
1805                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1806                             aux_stat)
1807                                 break;
1808                 }
1809
1810                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1811                                              &current_speed,
1812                                              &current_duplex);
1813
1814                 bmcr = 0;
1815                 for (i = 0; i < 200; i++) {
1816                         tg3_readphy(tp, MII_BMCR, &bmcr);
1817                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1818                                 continue;
1819                         if (bmcr && bmcr != 0x7fff)
1820                                 break;
1821                         udelay(10);
1822                 }
1823
1824                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1825                         if (bmcr & BMCR_ANENABLE) {
1826                                 current_link_up = 1;
1827
1828                                 /* Force autoneg restart if we are exiting
1829                                  * low power mode.
1830                                  */
1831                                 if (!tg3_copper_is_advertising_all(tp))
1832                                         current_link_up = 0;
1833                         } else {
1834                                 current_link_up = 0;
1835                         }
1836                 } else {
1837                         if (!(bmcr & BMCR_ANENABLE) &&
1838                             tp->link_config.speed == current_speed &&
1839                             tp->link_config.duplex == current_duplex) {
1840                                 current_link_up = 1;
1841                         } else {
1842                                 current_link_up = 0;
1843                         }
1844                 }
1845
1846                 tp->link_config.active_speed = current_speed;
1847                 tp->link_config.active_duplex = current_duplex;
1848         }
1849
1850         if (current_link_up == 1 &&
1851             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1852             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1853                 u32 local_adv, remote_adv;
1854
1855                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1856                         local_adv = 0;
1857                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1858
1859                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1860                         remote_adv = 0;
1861
1862                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1863
1864                 /* If we are not advertising full pause capability,
1865                  * something is wrong.  Bring the link down and reconfigure.
1866                  */
1867                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1868                         current_link_up = 0;
1869                 } else {
1870                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1871                 }
1872         }
1873 relink:
1874         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1875                 u32 tmp;
1876
1877                 tg3_phy_copper_begin(tp);
1878
1879                 tg3_readphy(tp, MII_BMSR, &tmp);
1880                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1881                     (tmp & BMSR_LSTATUS))
1882                         current_link_up = 1;
1883         }
1884
1885         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1886         if (current_link_up == 1) {
1887                 if (tp->link_config.active_speed == SPEED_100 ||
1888                     tp->link_config.active_speed == SPEED_10)
1889                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1890                 else
1891                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1892         } else
1893                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1894
1895         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1896         if (tp->link_config.active_duplex == DUPLEX_HALF)
1897                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1898
1899         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1900         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1901                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1902                     (current_link_up == 1 &&
1903                      tp->link_config.active_speed == SPEED_10))
1904                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1905         } else {
1906                 if (current_link_up == 1)
1907                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1908         }
1909
1910         /* ??? Without this setting Netgear GA302T PHY does not
1911          * ??? send/receive packets...
1912          */
1913         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1914             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1915                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1916                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1917                 udelay(80);
1918         }
1919
1920         tw32_f(MAC_MODE, tp->mac_mode);
1921         udelay(40);
1922
1923         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1924                 /* Polled via timer. */
1925                 tw32_f(MAC_EVENT, 0);
1926         } else {
1927                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1928         }
1929         udelay(40);
1930
1931         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1932             current_link_up == 1 &&
1933             tp->link_config.active_speed == SPEED_1000 &&
1934             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1935              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1936                 udelay(120);
1937                 tw32_f(MAC_STATUS,
1938                      (MAC_STATUS_SYNC_CHANGED |
1939                       MAC_STATUS_CFG_CHANGED));
1940                 udelay(40);
1941                 tg3_write_mem(tp,
1942                               NIC_SRAM_FIRMWARE_MBOX,
1943                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1944         }
1945
1946         if (current_link_up != netif_carrier_ok(tp->dev)) {
1947                 if (current_link_up)
1948                         netif_carrier_on(tp->dev);
1949                 else
1950                         netif_carrier_off(tp->dev);
1951                 tg3_link_report(tp);
1952         }
1953
1954         return 0;
1955 }
1956
1957 struct tg3_fiber_aneginfo {
1958         int state;
1959 #define ANEG_STATE_UNKNOWN              0
1960 #define ANEG_STATE_AN_ENABLE            1
1961 #define ANEG_STATE_RESTART_INIT         2
1962 #define ANEG_STATE_RESTART              3
1963 #define ANEG_STATE_DISABLE_LINK_OK      4
1964 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1965 #define ANEG_STATE_ABILITY_DETECT       6
1966 #define ANEG_STATE_ACK_DETECT_INIT      7
1967 #define ANEG_STATE_ACK_DETECT           8
1968 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1969 #define ANEG_STATE_COMPLETE_ACK         10
1970 #define ANEG_STATE_IDLE_DETECT_INIT     11
1971 #define ANEG_STATE_IDLE_DETECT          12
1972 #define ANEG_STATE_LINK_OK              13
1973 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1974 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1975
1976         u32 flags;
1977 #define MR_AN_ENABLE            0x00000001
1978 #define MR_RESTART_AN           0x00000002
1979 #define MR_AN_COMPLETE          0x00000004
1980 #define MR_PAGE_RX              0x00000008
1981 #define MR_NP_LOADED            0x00000010
1982 #define MR_TOGGLE_TX            0x00000020
1983 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1984 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1985 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1986 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1987 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1988 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1989 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1990 #define MR_TOGGLE_RX            0x00002000
1991 #define MR_NP_RX                0x00004000
1992
1993 #define MR_LINK_OK              0x80000000
1994
1995         unsigned long link_time, cur_time;
1996
1997         u32 ability_match_cfg;
1998         int ability_match_count;
1999
2000         char ability_match, idle_match, ack_match;
2001
2002         u32 txconfig, rxconfig;
2003 #define ANEG_CFG_NP             0x00000080
2004 #define ANEG_CFG_ACK            0x00000040
2005 #define ANEG_CFG_RF2            0x00000020
2006 #define ANEG_CFG_RF1            0x00000010
2007 #define ANEG_CFG_PS2            0x00000001
2008 #define ANEG_CFG_PS1            0x00008000
2009 #define ANEG_CFG_HD             0x00004000
2010 #define ANEG_CFG_FD             0x00002000
2011 #define ANEG_CFG_INVAL          0x00001f06
2012
2013 };
2014 #define ANEG_OK         0
2015 #define ANEG_DONE       1
2016 #define ANEG_TIMER_ENAB 2
2017 #define ANEG_FAILED     -1
2018
2019 #define ANEG_STATE_SETTLE_TIME  10000
2020
2021 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2022                                    struct tg3_fiber_aneginfo *ap)
2023 {
2024         unsigned long delta;
2025         u32 rx_cfg_reg;
2026         int ret;
2027
2028         if (ap->state == ANEG_STATE_UNKNOWN) {
2029                 ap->rxconfig = 0;
2030                 ap->link_time = 0;
2031                 ap->cur_time = 0;
2032                 ap->ability_match_cfg = 0;
2033                 ap->ability_match_count = 0;
2034                 ap->ability_match = 0;
2035                 ap->idle_match = 0;
2036                 ap->ack_match = 0;
2037         }
2038         ap->cur_time++;
2039
2040         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2041                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2042
2043                 if (rx_cfg_reg != ap->ability_match_cfg) {
2044                         ap->ability_match_cfg = rx_cfg_reg;
2045                         ap->ability_match = 0;
2046                         ap->ability_match_count = 0;
2047                 } else {
2048                         if (++ap->ability_match_count > 1) {
2049                                 ap->ability_match = 1;
2050                                 ap->ability_match_cfg = rx_cfg_reg;
2051                         }
2052                 }
2053                 if (rx_cfg_reg & ANEG_CFG_ACK)
2054                         ap->ack_match = 1;
2055                 else
2056                         ap->ack_match = 0;
2057
2058                 ap->idle_match = 0;
2059         } else {
2060                 ap->idle_match = 1;
2061                 ap->ability_match_cfg = 0;
2062                 ap->ability_match_count = 0;
2063                 ap->ability_match = 0;
2064                 ap->ack_match = 0;
2065
2066                 rx_cfg_reg = 0;
2067         }
2068
2069         ap->rxconfig = rx_cfg_reg;
2070         ret = ANEG_OK;
2071
2072         switch(ap->state) {
2073         case ANEG_STATE_UNKNOWN:
2074                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2075                         ap->state = ANEG_STATE_AN_ENABLE;
2076
2077                 /* fallthru */
2078         case ANEG_STATE_AN_ENABLE:
2079                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2080                 if (ap->flags & MR_AN_ENABLE) {
2081                         ap->link_time = 0;
2082                         ap->cur_time = 0;
2083                         ap->ability_match_cfg = 0;
2084                         ap->ability_match_count = 0;
2085                         ap->ability_match = 0;
2086                         ap->idle_match = 0;
2087                         ap->ack_match = 0;
2088
2089                         ap->state = ANEG_STATE_RESTART_INIT;
2090                 } else {
2091                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2092                 }
2093                 break;
2094
2095         case ANEG_STATE_RESTART_INIT:
2096                 ap->link_time = ap->cur_time;
2097                 ap->flags &= ~(MR_NP_LOADED);
2098                 ap->txconfig = 0;
2099                 tw32(MAC_TX_AUTO_NEG, 0);
2100                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2101                 tw32_f(MAC_MODE, tp->mac_mode);
2102                 udelay(40);
2103
2104                 ret = ANEG_TIMER_ENAB;
2105                 ap->state = ANEG_STATE_RESTART;
2106
2107                 /* fallthru */
2108         case ANEG_STATE_RESTART:
2109                 delta = ap->cur_time - ap->link_time;
2110                 if (delta > ANEG_STATE_SETTLE_TIME) {
2111                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2112                 } else {
2113                         ret = ANEG_TIMER_ENAB;
2114                 }
2115                 break;
2116
2117         case ANEG_STATE_DISABLE_LINK_OK:
2118                 ret = ANEG_DONE;
2119                 break;
2120
2121         case ANEG_STATE_ABILITY_DETECT_INIT:
2122                 ap->flags &= ~(MR_TOGGLE_TX);
2123                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2124                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2125                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2126                 tw32_f(MAC_MODE, tp->mac_mode);
2127                 udelay(40);
2128
2129                 ap->state = ANEG_STATE_ABILITY_DETECT;
2130                 break;
2131
2132         case ANEG_STATE_ABILITY_DETECT:
2133                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2134                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2135                 }
2136                 break;
2137
2138         case ANEG_STATE_ACK_DETECT_INIT:
2139                 ap->txconfig |= ANEG_CFG_ACK;
2140                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2141                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2142                 tw32_f(MAC_MODE, tp->mac_mode);
2143                 udelay(40);
2144
2145                 ap->state = ANEG_STATE_ACK_DETECT;
2146
2147                 /* fallthru */
2148         case ANEG_STATE_ACK_DETECT:
2149                 if (ap->ack_match != 0) {
2150                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2151                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2152                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2153                         } else {
2154                                 ap->state = ANEG_STATE_AN_ENABLE;
2155                         }
2156                 } else if (ap->ability_match != 0 &&
2157                            ap->rxconfig == 0) {
2158                         ap->state = ANEG_STATE_AN_ENABLE;
2159                 }
2160                 break;
2161
2162         case ANEG_STATE_COMPLETE_ACK_INIT:
2163                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2164                         ret = ANEG_FAILED;
2165                         break;
2166                 }
2167                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2168                                MR_LP_ADV_HALF_DUPLEX |
2169                                MR_LP_ADV_SYM_PAUSE |
2170                                MR_LP_ADV_ASYM_PAUSE |
2171                                MR_LP_ADV_REMOTE_FAULT1 |
2172                                MR_LP_ADV_REMOTE_FAULT2 |
2173                                MR_LP_ADV_NEXT_PAGE |
2174                                MR_TOGGLE_RX |
2175                                MR_NP_RX);
2176                 if (ap->rxconfig & ANEG_CFG_FD)
2177                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2178                 if (ap->rxconfig & ANEG_CFG_HD)
2179                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2180                 if (ap->rxconfig & ANEG_CFG_PS1)
2181                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2182                 if (ap->rxconfig & ANEG_CFG_PS2)
2183                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2184                 if (ap->rxconfig & ANEG_CFG_RF1)
2185                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2186                 if (ap->rxconfig & ANEG_CFG_RF2)
2187                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2188                 if (ap->rxconfig & ANEG_CFG_NP)
2189                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2190
2191                 ap->link_time = ap->cur_time;
2192
2193                 ap->flags ^= (MR_TOGGLE_TX);
2194                 if (ap->rxconfig & 0x0008)
2195                         ap->flags |= MR_TOGGLE_RX;
2196                 if (ap->rxconfig & ANEG_CFG_NP)
2197                         ap->flags |= MR_NP_RX;
2198                 ap->flags |= MR_PAGE_RX;
2199
2200                 ap->state = ANEG_STATE_COMPLETE_ACK;
2201                 ret = ANEG_TIMER_ENAB;
2202                 break;
2203
2204         case ANEG_STATE_COMPLETE_ACK:
2205                 if (ap->ability_match != 0 &&
2206                     ap->rxconfig == 0) {
2207                         ap->state = ANEG_STATE_AN_ENABLE;
2208                         break;
2209                 }
2210                 delta = ap->cur_time - ap->link_time;
2211                 if (delta > ANEG_STATE_SETTLE_TIME) {
2212                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2213                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2214                         } else {
2215                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2216                                     !(ap->flags & MR_NP_RX)) {
2217                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2218                                 } else {
2219                                         ret = ANEG_FAILED;
2220                                 }
2221                         }
2222                 }
2223                 break;
2224
2225         case ANEG_STATE_IDLE_DETECT_INIT:
2226                 ap->link_time = ap->cur_time;
2227                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2228                 tw32_f(MAC_MODE, tp->mac_mode);
2229                 udelay(40);
2230
2231                 ap->state = ANEG_STATE_IDLE_DETECT;
2232                 ret = ANEG_TIMER_ENAB;
2233                 break;
2234
2235         case ANEG_STATE_IDLE_DETECT:
2236                 if (ap->ability_match != 0 &&
2237                     ap->rxconfig == 0) {
2238                         ap->state = ANEG_STATE_AN_ENABLE;
2239                         break;
2240                 }
2241                 delta = ap->cur_time - ap->link_time;
2242                 if (delta > ANEG_STATE_SETTLE_TIME) {
2243                         /* XXX another gem from the Broadcom driver :( */
2244                         ap->state = ANEG_STATE_LINK_OK;
2245                 }
2246                 break;
2247
2248         case ANEG_STATE_LINK_OK:
2249                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2250                 ret = ANEG_DONE;
2251                 break;
2252
2253         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2254                 /* ??? unimplemented */
2255                 break;
2256
2257         case ANEG_STATE_NEXT_PAGE_WAIT:
2258                 /* ??? unimplemented */
2259                 break;
2260
2261         default:
2262                 ret = ANEG_FAILED;
2263                 break;
2264         };
2265
2266         return ret;
2267 }
2268
2269 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2270 {
2271         int res = 0;
2272         struct tg3_fiber_aneginfo aninfo;
2273         int status = ANEG_FAILED;
2274         unsigned int tick;
2275         u32 tmp;
2276
2277         tw32_f(MAC_TX_AUTO_NEG, 0);
2278
2279         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2280         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2281         udelay(40);
2282
2283         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2284         udelay(40);
2285
2286         memset(&aninfo, 0, sizeof(aninfo));
2287         aninfo.flags |= MR_AN_ENABLE;
2288         aninfo.state = ANEG_STATE_UNKNOWN;
2289         aninfo.cur_time = 0;
2290         tick = 0;
2291         while (++tick < 195000) {
2292                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2293                 if (status == ANEG_DONE || status == ANEG_FAILED)
2294                         break;
2295
2296                 udelay(1);
2297         }
2298
2299         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2300         tw32_f(MAC_MODE, tp->mac_mode);
2301         udelay(40);
2302
2303         *flags = aninfo.flags;
2304
2305         if (status == ANEG_DONE &&
2306             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2307                              MR_LP_ADV_FULL_DUPLEX)))
2308                 res = 1;
2309
2310         return res;
2311 }
2312
2313 static void tg3_init_bcm8002(struct tg3 *tp)
2314 {
2315         u32 mac_status = tr32(MAC_STATUS);
2316         int i;
2317
2318         /* Reset when initting first time or we have a link. */
2319         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2320             !(mac_status & MAC_STATUS_PCS_SYNCED))
2321                 return;
2322
2323         /* Set PLL lock range. */
2324         tg3_writephy(tp, 0x16, 0x8007);
2325
2326         /* SW reset */
2327         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2328
2329         /* Wait for reset to complete. */
2330         /* XXX schedule_timeout() ... */
2331         for (i = 0; i < 500; i++)
2332                 udelay(10);
2333
2334         /* Config mode; select PMA/Ch 1 regs. */
2335         tg3_writephy(tp, 0x10, 0x8411);
2336
2337         /* Enable auto-lock and comdet, select txclk for tx. */
2338         tg3_writephy(tp, 0x11, 0x0a10);
2339
2340         tg3_writephy(tp, 0x18, 0x00a0);
2341         tg3_writephy(tp, 0x16, 0x41ff);
2342
2343         /* Assert and deassert POR. */
2344         tg3_writephy(tp, 0x13, 0x0400);
2345         udelay(40);
2346         tg3_writephy(tp, 0x13, 0x0000);
2347
2348         tg3_writephy(tp, 0x11, 0x0a50);
2349         udelay(40);
2350         tg3_writephy(tp, 0x11, 0x0a10);
2351
2352         /* Wait for signal to stabilize */
2353         /* XXX schedule_timeout() ... */
2354         for (i = 0; i < 15000; i++)
2355                 udelay(10);
2356
2357         /* Deselect the channel register so we can read the PHYID
2358          * later.
2359          */
2360         tg3_writephy(tp, 0x10, 0x8011);
2361 }
2362
2363 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2364 {
2365         u32 sg_dig_ctrl, sg_dig_status;
2366         u32 serdes_cfg, expected_sg_dig_ctrl;
2367         int workaround, port_a;
2368         int current_link_up;
2369
2370         serdes_cfg = 0;
2371         expected_sg_dig_ctrl = 0;
2372         workaround = 0;
2373         port_a = 1;
2374         current_link_up = 0;
2375
2376         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2377             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2378                 workaround = 1;
2379                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2380                         port_a = 0;
2381
2382                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2383                 /* preserve bits 20-23 for voltage regulator */
2384                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2385         }
2386
2387         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2388
2389         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2390                 if (sg_dig_ctrl & (1 << 31)) {
2391                         if (workaround) {
2392                                 u32 val = serdes_cfg;
2393
2394                                 if (port_a)
2395                                         val |= 0xc010000;
2396                                 else
2397                                         val |= 0x4010000;
2398                                 tw32_f(MAC_SERDES_CFG, val);
2399                         }
2400                         tw32_f(SG_DIG_CTRL, 0x01388400);
2401                 }
2402                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2403                         tg3_setup_flow_control(tp, 0, 0);
2404                         current_link_up = 1;
2405                 }
2406                 goto out;
2407         }
2408
2409         /* Want auto-negotiation.  */
2410         expected_sg_dig_ctrl = 0x81388400;
2411
2412         /* Pause capability */
2413         expected_sg_dig_ctrl |= (1 << 11);
2414
2415         /* Asymettric pause */
2416         expected_sg_dig_ctrl |= (1 << 12);
2417
2418         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2419                 if (workaround)
2420                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2421                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2422                 udelay(5);
2423                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2424
2425                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2426         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2427                                  MAC_STATUS_SIGNAL_DET)) {
2428                 int i;
2429
2430                 /* Giver time to negotiate (~200ms) */
2431                 for (i = 0; i < 40000; i++) {
2432                         sg_dig_status = tr32(SG_DIG_STATUS);
2433                         if (sg_dig_status & (0x3))
2434                                 break;
2435                         udelay(5);
2436                 }
2437                 mac_status = tr32(MAC_STATUS);
2438
2439                 if ((sg_dig_status & (1 << 1)) &&
2440                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2441                         u32 local_adv, remote_adv;
2442
2443                         local_adv = ADVERTISE_PAUSE_CAP;
2444                         remote_adv = 0;
2445                         if (sg_dig_status & (1 << 19))
2446                                 remote_adv |= LPA_PAUSE_CAP;
2447                         if (sg_dig_status & (1 << 20))
2448                                 remote_adv |= LPA_PAUSE_ASYM;
2449
2450                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2451                         current_link_up = 1;
2452                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2453                 } else if (!(sg_dig_status & (1 << 1))) {
2454                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2455                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2456                         else {
2457                                 if (workaround) {
2458                                         u32 val = serdes_cfg;
2459
2460                                         if (port_a)
2461                                                 val |= 0xc010000;
2462                                         else
2463                                                 val |= 0x4010000;
2464
2465                                         tw32_f(MAC_SERDES_CFG, val);
2466                                 }
2467
2468                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2469                                 udelay(40);
2470
2471                                 /* Link parallel detection - link is up */
2472                                 /* only if we have PCS_SYNC and not */
2473                                 /* receiving config code words */
2474                                 mac_status = tr32(MAC_STATUS);
2475                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2476                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2477                                         tg3_setup_flow_control(tp, 0, 0);
2478                                         current_link_up = 1;
2479                                 }
2480                         }
2481                 }
2482         }
2483
2484 out:
2485         return current_link_up;
2486 }
2487
2488 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2489 {
2490         int current_link_up = 0;
2491
2492         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2493                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2494                 goto out;
2495         }
2496
2497         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2498                 u32 flags;
2499                 int i;
2500   
2501                 if (fiber_autoneg(tp, &flags)) {
2502                         u32 local_adv, remote_adv;
2503
2504                         local_adv = ADVERTISE_PAUSE_CAP;
2505                         remote_adv = 0;
2506                         if (flags & MR_LP_ADV_SYM_PAUSE)
2507                                 remote_adv |= LPA_PAUSE_CAP;
2508                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2509                                 remote_adv |= LPA_PAUSE_ASYM;
2510
2511                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2512
2513                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2514                         current_link_up = 1;
2515                 }
2516                 for (i = 0; i < 30; i++) {
2517                         udelay(20);
2518                         tw32_f(MAC_STATUS,
2519                                (MAC_STATUS_SYNC_CHANGED |
2520                                 MAC_STATUS_CFG_CHANGED));
2521                         udelay(40);
2522                         if ((tr32(MAC_STATUS) &
2523                              (MAC_STATUS_SYNC_CHANGED |
2524                               MAC_STATUS_CFG_CHANGED)) == 0)
2525                                 break;
2526                 }
2527
2528                 mac_status = tr32(MAC_STATUS);
2529                 if (current_link_up == 0 &&
2530                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2531                     !(mac_status & MAC_STATUS_RCVD_CFG))
2532                         current_link_up = 1;
2533         } else {
2534                 /* Forcing 1000FD link up. */
2535                 current_link_up = 1;
2536                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2537
2538                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2539                 udelay(40);
2540         }
2541
2542 out:
2543         return current_link_up;
2544 }
2545
2546 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2547 {
2548         u32 orig_pause_cfg;
2549         u16 orig_active_speed;
2550         u8 orig_active_duplex;
2551         u32 mac_status;
2552         int current_link_up;
2553         int i;
2554
2555         orig_pause_cfg =
2556                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2557                                   TG3_FLAG_TX_PAUSE));
2558         orig_active_speed = tp->link_config.active_speed;
2559         orig_active_duplex = tp->link_config.active_duplex;
2560
2561         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2562             netif_carrier_ok(tp->dev) &&
2563             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2564                 mac_status = tr32(MAC_STATUS);
2565                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2566                                MAC_STATUS_SIGNAL_DET |
2567                                MAC_STATUS_CFG_CHANGED |
2568                                MAC_STATUS_RCVD_CFG);
2569                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2570                                    MAC_STATUS_SIGNAL_DET)) {
2571                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2572                                             MAC_STATUS_CFG_CHANGED));
2573                         return 0;
2574                 }
2575         }
2576
2577         tw32_f(MAC_TX_AUTO_NEG, 0);
2578
2579         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2580         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2581         tw32_f(MAC_MODE, tp->mac_mode);
2582         udelay(40);
2583
2584         if (tp->phy_id == PHY_ID_BCM8002)
2585                 tg3_init_bcm8002(tp);
2586
2587         /* Enable link change event even when serdes polling.  */
2588         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2589         udelay(40);
2590
2591         current_link_up = 0;
2592         mac_status = tr32(MAC_STATUS);
2593
2594         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2595                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2596         else
2597                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2598
2599         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2600         tw32_f(MAC_MODE, tp->mac_mode);
2601         udelay(40);
2602
2603         tp->hw_status->status =
2604                 (SD_STATUS_UPDATED |
2605                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2606
2607         for (i = 0; i < 100; i++) {
2608                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2609                                     MAC_STATUS_CFG_CHANGED));
2610                 udelay(5);
2611                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2612                                          MAC_STATUS_CFG_CHANGED)) == 0)
2613                         break;
2614         }
2615
2616         mac_status = tr32(MAC_STATUS);
2617         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2618                 current_link_up = 0;
2619                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2620                         tw32_f(MAC_MODE, (tp->mac_mode |
2621                                           MAC_MODE_SEND_CONFIGS));
2622                         udelay(1);
2623                         tw32_f(MAC_MODE, tp->mac_mode);
2624                 }
2625         }
2626
2627         if (current_link_up == 1) {
2628                 tp->link_config.active_speed = SPEED_1000;
2629                 tp->link_config.active_duplex = DUPLEX_FULL;
2630                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2631                                     LED_CTRL_LNKLED_OVERRIDE |
2632                                     LED_CTRL_1000MBPS_ON));
2633         } else {
2634                 tp->link_config.active_speed = SPEED_INVALID;
2635                 tp->link_config.active_duplex = DUPLEX_INVALID;
2636                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2637                                     LED_CTRL_LNKLED_OVERRIDE |
2638                                     LED_CTRL_TRAFFIC_OVERRIDE));
2639         }
2640
2641         if (current_link_up != netif_carrier_ok(tp->dev)) {
2642                 if (current_link_up)
2643                         netif_carrier_on(tp->dev);
2644                 else
2645                         netif_carrier_off(tp->dev);
2646                 tg3_link_report(tp);
2647         } else {
2648                 u32 now_pause_cfg =
2649                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2650                                          TG3_FLAG_TX_PAUSE);
2651                 if (orig_pause_cfg != now_pause_cfg ||
2652                     orig_active_speed != tp->link_config.active_speed ||
2653                     orig_active_duplex != tp->link_config.active_duplex)
2654                         tg3_link_report(tp);
2655         }
2656
2657         return 0;
2658 }
2659
2660 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2661 {
2662         int current_link_up, err = 0;
2663         u32 bmsr, bmcr;
2664         u16 current_speed;
2665         u8 current_duplex;
2666
2667         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2668         tw32_f(MAC_MODE, tp->mac_mode);
2669         udelay(40);
2670
2671         tw32(MAC_EVENT, 0);
2672
2673         tw32_f(MAC_STATUS,
2674              (MAC_STATUS_SYNC_CHANGED |
2675               MAC_STATUS_CFG_CHANGED |
2676               MAC_STATUS_MI_COMPLETION |
2677               MAC_STATUS_LNKSTATE_CHANGED));
2678         udelay(40);
2679
2680         if (force_reset)
2681                 tg3_phy_reset(tp);
2682
2683         current_link_up = 0;
2684         current_speed = SPEED_INVALID;
2685         current_duplex = DUPLEX_INVALID;
2686
2687         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2688         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2689         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2690                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2691                         bmsr |= BMSR_LSTATUS;
2692                 else
2693                         bmsr &= ~BMSR_LSTATUS;
2694         }
2695
2696         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2697
2698         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2699             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2700                 /* do nothing, just check for link up at the end */
2701         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2702                 u32 adv, new_adv;
2703
2704                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2705                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2706                                   ADVERTISE_1000XPAUSE |
2707                                   ADVERTISE_1000XPSE_ASYM |
2708                                   ADVERTISE_SLCT);
2709
2710                 /* Always advertise symmetric PAUSE just like copper */
2711                 new_adv |= ADVERTISE_1000XPAUSE;
2712
2713                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2714                         new_adv |= ADVERTISE_1000XHALF;
2715                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2716                         new_adv |= ADVERTISE_1000XFULL;
2717
2718                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2719                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2720                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2721                         tg3_writephy(tp, MII_BMCR, bmcr);
2722
2723                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2724                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2725                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2726
2727                         return err;
2728                 }
2729         } else {
2730                 u32 new_bmcr;
2731
2732                 bmcr &= ~BMCR_SPEED1000;
2733                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2734
2735                 if (tp->link_config.duplex == DUPLEX_FULL)
2736                         new_bmcr |= BMCR_FULLDPLX;
2737
2738                 if (new_bmcr != bmcr) {
2739                         /* BMCR_SPEED1000 is a reserved bit that needs
2740                          * to be set on write.
2741                          */
2742                         new_bmcr |= BMCR_SPEED1000;
2743
2744                         /* Force a linkdown */
2745                         if (netif_carrier_ok(tp->dev)) {
2746                                 u32 adv;
2747
2748                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2749                                 adv &= ~(ADVERTISE_1000XFULL |
2750                                          ADVERTISE_1000XHALF |
2751                                          ADVERTISE_SLCT);
2752                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2753                                 tg3_writephy(tp, MII_BMCR, bmcr |
2754                                                            BMCR_ANRESTART |
2755                                                            BMCR_ANENABLE);
2756                                 udelay(10);
2757                                 netif_carrier_off(tp->dev);
2758                         }
2759                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2760                         bmcr = new_bmcr;
2761                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2762                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2763                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2764                             ASIC_REV_5714) {
2765                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2766                                         bmsr |= BMSR_LSTATUS;
2767                                 else
2768                                         bmsr &= ~BMSR_LSTATUS;
2769                         }
2770                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2771                 }
2772         }
2773
2774         if (bmsr & BMSR_LSTATUS) {
2775                 current_speed = SPEED_1000;
2776                 current_link_up = 1;
2777                 if (bmcr & BMCR_FULLDPLX)
2778                         current_duplex = DUPLEX_FULL;
2779                 else
2780                         current_duplex = DUPLEX_HALF;
2781
2782                 if (bmcr & BMCR_ANENABLE) {
2783                         u32 local_adv, remote_adv, common;
2784
2785                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2786                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2787                         common = local_adv & remote_adv;
2788                         if (common & (ADVERTISE_1000XHALF |
2789                                       ADVERTISE_1000XFULL)) {
2790                                 if (common & ADVERTISE_1000XFULL)
2791                                         current_duplex = DUPLEX_FULL;
2792                                 else
2793                                         current_duplex = DUPLEX_HALF;
2794
2795                                 tg3_setup_flow_control(tp, local_adv,
2796                                                        remote_adv);
2797                         }
2798                         else
2799                                 current_link_up = 0;
2800                 }
2801         }
2802
2803         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2804         if (tp->link_config.active_duplex == DUPLEX_HALF)
2805                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2806
2807         tw32_f(MAC_MODE, tp->mac_mode);
2808         udelay(40);
2809
2810         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2811
2812         tp->link_config.active_speed = current_speed;
2813         tp->link_config.active_duplex = current_duplex;
2814
2815         if (current_link_up != netif_carrier_ok(tp->dev)) {
2816                 if (current_link_up)
2817                         netif_carrier_on(tp->dev);
2818                 else {
2819                         netif_carrier_off(tp->dev);
2820                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2821                 }
2822                 tg3_link_report(tp);
2823         }
2824         return err;
2825 }
2826
2827 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2828 {
2829         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2830                 /* Give autoneg time to complete. */
2831                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2832                 return;
2833         }
2834         if (!netif_carrier_ok(tp->dev) &&
2835             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2836                 u32 bmcr;
2837
2838                 tg3_readphy(tp, MII_BMCR, &bmcr);
2839                 if (bmcr & BMCR_ANENABLE) {
2840                         u32 phy1, phy2;
2841
2842                         /* Select shadow register 0x1f */
2843                         tg3_writephy(tp, 0x1c, 0x7c00);
2844                         tg3_readphy(tp, 0x1c, &phy1);
2845
2846                         /* Select expansion interrupt status register */
2847                         tg3_writephy(tp, 0x17, 0x0f01);
2848                         tg3_readphy(tp, 0x15, &phy2);
2849                         tg3_readphy(tp, 0x15, &phy2);
2850
2851                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2852                                 /* We have signal detect and not receiving
2853                                  * config code words, link is up by parallel
2854                                  * detection.
2855                                  */
2856
2857                                 bmcr &= ~BMCR_ANENABLE;
2858                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2859                                 tg3_writephy(tp, MII_BMCR, bmcr);
2860                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2861                         }
2862                 }
2863         }
2864         else if (netif_carrier_ok(tp->dev) &&
2865                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2866                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2867                 u32 phy2;
2868
2869                 /* Select expansion interrupt status register */
2870                 tg3_writephy(tp, 0x17, 0x0f01);
2871                 tg3_readphy(tp, 0x15, &phy2);
2872                 if (phy2 & 0x20) {
2873                         u32 bmcr;
2874
2875                         /* Config code words received, turn on autoneg. */
2876                         tg3_readphy(tp, MII_BMCR, &bmcr);
2877                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2878
2879                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2880
2881                 }
2882         }
2883 }
2884
2885 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2886 {
2887         int err;
2888
2889         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2890                 err = tg3_setup_fiber_phy(tp, force_reset);
2891         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2892                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2893         } else {
2894                 err = tg3_setup_copper_phy(tp, force_reset);
2895         }
2896
2897         if (tp->link_config.active_speed == SPEED_1000 &&
2898             tp->link_config.active_duplex == DUPLEX_HALF)
2899                 tw32(MAC_TX_LENGTHS,
2900                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2901                       (6 << TX_LENGTHS_IPG_SHIFT) |
2902                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2903         else
2904                 tw32(MAC_TX_LENGTHS,
2905                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2906                       (6 << TX_LENGTHS_IPG_SHIFT) |
2907                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2908
2909         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2910                 if (netif_carrier_ok(tp->dev)) {
2911                         tw32(HOSTCC_STAT_COAL_TICKS,
2912                              tp->coal.stats_block_coalesce_usecs);
2913                 } else {
2914                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2915                 }
2916         }
2917
2918         return err;
2919 }
2920
2921 /* Tigon3 never reports partial packet sends.  So we do not
2922  * need special logic to handle SKBs that have not had all
2923  * of their frags sent yet, like SunGEM does.
2924  */
2925 static void tg3_tx(struct tg3 *tp)
2926 {
2927         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2928         u32 sw_idx = tp->tx_cons;
2929
2930         while (sw_idx != hw_idx) {
2931                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2932                 struct sk_buff *skb = ri->skb;
2933                 int i;
2934
2935                 if (unlikely(skb == NULL))
2936                         BUG();
2937
2938                 pci_unmap_single(tp->pdev,
2939                                  pci_unmap_addr(ri, mapping),
2940                                  skb_headlen(skb),
2941                                  PCI_DMA_TODEVICE);
2942
2943                 ri->skb = NULL;
2944
2945                 sw_idx = NEXT_TX(sw_idx);
2946
2947                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2948                         if (unlikely(sw_idx == hw_idx))
2949                                 BUG();
2950
2951                         ri = &tp->tx_buffers[sw_idx];
2952                         if (unlikely(ri->skb != NULL))
2953                                 BUG();
2954
2955                         pci_unmap_page(tp->pdev,
2956                                        pci_unmap_addr(ri, mapping),
2957                                        skb_shinfo(skb)->frags[i].size,
2958                                        PCI_DMA_TODEVICE);
2959
2960                         sw_idx = NEXT_TX(sw_idx);
2961                 }
2962
2963                 dev_kfree_skb(skb);
2964         }
2965
2966         tp->tx_cons = sw_idx;
2967
2968         if (unlikely(netif_queue_stopped(tp->dev))) {
2969                 spin_lock(&tp->tx_lock);
2970                 if (netif_queue_stopped(tp->dev) &&
2971                     (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2972                         netif_wake_queue(tp->dev);
2973                 spin_unlock(&tp->tx_lock);
2974         }
2975 }
2976
2977 /* Returns size of skb allocated or < 0 on error.
2978  *
2979  * We only need to fill in the address because the other members
2980  * of the RX descriptor are invariant, see tg3_init_rings.
2981  *
2982  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2983  * posting buffers we only dirty the first cache line of the RX
2984  * descriptor (containing the address).  Whereas for the RX status
2985  * buffers the cpu only reads the last cacheline of the RX descriptor
2986  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2987  */
2988 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2989                             int src_idx, u32 dest_idx_unmasked)
2990 {
2991         struct tg3_rx_buffer_desc *desc;
2992         struct ring_info *map, *src_map;
2993         struct sk_buff *skb;
2994         dma_addr_t mapping;
2995         int skb_size, dest_idx;
2996
2997         src_map = NULL;
2998         switch (opaque_key) {
2999         case RXD_OPAQUE_RING_STD:
3000                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3001                 desc = &tp->rx_std[dest_idx];
3002                 map = &tp->rx_std_buffers[dest_idx];
3003                 if (src_idx >= 0)
3004                         src_map = &tp->rx_std_buffers[src_idx];
3005                 skb_size = tp->rx_pkt_buf_sz;
3006                 break;
3007
3008         case RXD_OPAQUE_RING_JUMBO:
3009                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3010                 desc = &tp->rx_jumbo[dest_idx];
3011                 map = &tp->rx_jumbo_buffers[dest_idx];
3012                 if (src_idx >= 0)
3013                         src_map = &tp->rx_jumbo_buffers[src_idx];
3014                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3015                 break;
3016
3017         default:
3018                 return -EINVAL;
3019         };
3020
3021         /* Do not overwrite any of the map or rp information
3022          * until we are sure we can commit to a new buffer.
3023          *
3024          * Callers depend upon this behavior and assume that
3025          * we leave everything unchanged if we fail.
3026          */
3027         skb = dev_alloc_skb(skb_size);
3028         if (skb == NULL)
3029                 return -ENOMEM;
3030
3031         skb->dev = tp->dev;
3032         skb_reserve(skb, tp->rx_offset);
3033
3034         mapping = pci_map_single(tp->pdev, skb->data,
3035                                  skb_size - tp->rx_offset,
3036                                  PCI_DMA_FROMDEVICE);
3037
3038         map->skb = skb;
3039         pci_unmap_addr_set(map, mapping, mapping);
3040
3041         if (src_map != NULL)
3042                 src_map->skb = NULL;
3043
3044         desc->addr_hi = ((u64)mapping >> 32);
3045         desc->addr_lo = ((u64)mapping & 0xffffffff);
3046
3047         return skb_size;
3048 }
3049
3050 /* We only need to move over in the address because the other
3051  * members of the RX descriptor are invariant.  See notes above
3052  * tg3_alloc_rx_skb for full details.
3053  */
3054 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3055                            int src_idx, u32 dest_idx_unmasked)
3056 {
3057         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3058         struct ring_info *src_map, *dest_map;
3059         int dest_idx;
3060
3061         switch (opaque_key) {
3062         case RXD_OPAQUE_RING_STD:
3063                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3064                 dest_desc = &tp->rx_std[dest_idx];
3065                 dest_map = &tp->rx_std_buffers[dest_idx];
3066                 src_desc = &tp->rx_std[src_idx];
3067                 src_map = &tp->rx_std_buffers[src_idx];
3068                 break;
3069
3070         case RXD_OPAQUE_RING_JUMBO:
3071                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3072                 dest_desc = &tp->rx_jumbo[dest_idx];
3073                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3074                 src_desc = &tp->rx_jumbo[src_idx];
3075                 src_map = &tp->rx_jumbo_buffers[src_idx];
3076                 break;
3077
3078         default:
3079                 return;
3080         };
3081
3082         dest_map->skb = src_map->skb;
3083         pci_unmap_addr_set(dest_map, mapping,
3084                            pci_unmap_addr(src_map, mapping));
3085         dest_desc->addr_hi = src_desc->addr_hi;
3086         dest_desc->addr_lo = src_desc->addr_lo;
3087
3088         src_map->skb = NULL;
3089 }
3090
3091 #if TG3_VLAN_TAG_USED
3092 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3093 {
3094         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3095 }
3096 #endif
3097
3098 /* The RX ring scheme is composed of multiple rings which post fresh
3099  * buffers to the chip, and one special ring the chip uses to report
3100  * status back to the host.
3101  *
3102  * The special ring reports the status of received packets to the
3103  * host.  The chip does not write into the original descriptor the
3104  * RX buffer was obtained from.  The chip simply takes the original
3105  * descriptor as provided by the host, updates the status and length
3106  * field, then writes this into the next status ring entry.
3107  *
3108  * Each ring the host uses to post buffers to the chip is described
3109  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3110  * it is first placed into the on-chip ram.  When the packet's length
3111  * is known, it walks down the TG3_BDINFO entries to select the ring.
3112  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3113  * which is within the range of the new packet's length is chosen.
3114  *
3115  * The "separate ring for rx status" scheme may sound queer, but it makes
3116  * sense from a cache coherency perspective.  If only the host writes
3117  * to the buffer post rings, and only the chip writes to the rx status
3118  * rings, then cache lines never move beyond shared-modified state.
3119  * If both the host and chip were to write into the same ring, cache line
3120  * eviction could occur since both entities want it in an exclusive state.
3121  */
3122 static int tg3_rx(struct tg3 *tp, int budget)
3123 {
3124         u32 work_mask;
3125         u32 sw_idx = tp->rx_rcb_ptr;
3126         u16 hw_idx;
3127         int received;
3128
3129         hw_idx = tp->hw_status->idx[0].rx_producer;
3130         /*
3131          * We need to order the read of hw_idx and the read of
3132          * the opaque cookie.
3133          */
3134         rmb();
3135         work_mask = 0;
3136         received = 0;
3137         while (sw_idx != hw_idx && budget > 0) {
3138                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3139                 unsigned int len;
3140                 struct sk_buff *skb;
3141                 dma_addr_t dma_addr;
3142                 u32 opaque_key, desc_idx, *post_ptr;
3143
3144                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3145                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3146                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3147                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3148                                                   mapping);
3149                         skb = tp->rx_std_buffers[desc_idx].skb;
3150                         post_ptr = &tp->rx_std_ptr;
3151                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3152                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3153                                                   mapping);
3154                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3155                         post_ptr = &tp->rx_jumbo_ptr;
3156                 }
3157                 else {
3158                         goto next_pkt_nopost;
3159                 }
3160
3161                 work_mask |= opaque_key;
3162
3163                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3164                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3165                 drop_it:
3166                         tg3_recycle_rx(tp, opaque_key,
3167                                        desc_idx, *post_ptr);
3168                 drop_it_no_recycle:
3169                         /* Other statistics kept track of by card. */
3170                         tp->net_stats.rx_dropped++;
3171                         goto next_pkt;
3172                 }
3173
3174                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3175
3176                 if (len > RX_COPY_THRESHOLD 
3177                         && tp->rx_offset == 2
3178                         /* rx_offset != 2 iff this is a 5701 card running
3179                          * in PCI-X mode [see tg3_get_invariants()] */
3180                 ) {
3181                         int skb_size;
3182
3183                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3184                                                     desc_idx, *post_ptr);
3185                         if (skb_size < 0)
3186                                 goto drop_it;
3187
3188                         pci_unmap_single(tp->pdev, dma_addr,
3189                                          skb_size - tp->rx_offset,
3190                                          PCI_DMA_FROMDEVICE);
3191
3192                         skb_put(skb, len);
3193                 } else {
3194                         struct sk_buff *copy_skb;
3195
3196                         tg3_recycle_rx(tp, opaque_key,
3197                                        desc_idx, *post_ptr);
3198
3199                         copy_skb = dev_alloc_skb(len + 2);
3200                         if (copy_skb == NULL)
3201                                 goto drop_it_no_recycle;
3202
3203                         copy_skb->dev = tp->dev;
3204                         skb_reserve(copy_skb, 2);
3205                         skb_put(copy_skb, len);
3206                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3207                         memcpy(copy_skb->data, skb->data, len);
3208                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3209
3210                         /* We'll reuse the original ring buffer. */
3211                         skb = copy_skb;
3212                 }
3213
3214                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3215                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3216                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3217                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3218                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3219                 else
3220                         skb->ip_summed = CHECKSUM_NONE;
3221
3222                 skb->protocol = eth_type_trans(skb, tp->dev);
3223 #if TG3_VLAN_TAG_USED
3224                 if (tp->vlgrp != NULL &&
3225                     desc->type_flags & RXD_FLAG_VLAN) {
3226                         tg3_vlan_rx(tp, skb,
3227                                     desc->err_vlan & RXD_VLAN_MASK);
3228                 } else
3229 #endif
3230                         netif_receive_skb(skb);
3231
3232                 tp->dev->last_rx = jiffies;
3233                 received++;
3234                 budget--;
3235
3236 next_pkt:
3237                 (*post_ptr)++;
3238 next_pkt_nopost:
3239                 sw_idx++;
3240                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3241
3242                 /* Refresh hw_idx to see if there is new work */
3243                 if (sw_idx == hw_idx) {
3244                         hw_idx = tp->hw_status->idx[0].rx_producer;
3245                         rmb();
3246                 }
3247         }
3248
3249         /* ACK the status ring. */
3250         tp->rx_rcb_ptr = sw_idx;
3251         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3252
3253         /* Refill RX ring(s). */
3254         if (work_mask & RXD_OPAQUE_RING_STD) {
3255                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3256                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3257                              sw_idx);
3258         }
3259         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3260                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3261                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3262                              sw_idx);
3263         }
3264         mmiowb();
3265
3266         return received;
3267 }
3268
3269 static int tg3_poll(struct net_device *netdev, int *budget)
3270 {
3271         struct tg3 *tp = netdev_priv(netdev);
3272         struct tg3_hw_status *sblk = tp->hw_status;
3273         int done;
3274
3275         /* handle link change and other phy events */
3276         if (!(tp->tg3_flags &
3277               (TG3_FLAG_USE_LINKCHG_REG |
3278                TG3_FLAG_POLL_SERDES))) {
3279                 if (sblk->status & SD_STATUS_LINK_CHG) {
3280                         sblk->status = SD_STATUS_UPDATED |
3281                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3282                         spin_lock(&tp->lock);
3283                         tg3_setup_phy(tp, 0);
3284                         spin_unlock(&tp->lock);
3285                 }
3286         }
3287
3288         /* run TX completion thread */
3289         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3290                 tg3_tx(tp);
3291         }
3292
3293         /* run RX thread, within the bounds set by NAPI.
3294          * All RX "locking" is done by ensuring outside
3295          * code synchronizes with dev->poll()
3296          */
3297         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3298                 int orig_budget = *budget;
3299                 int work_done;
3300
3301                 if (orig_budget > netdev->quota)
3302                         orig_budget = netdev->quota;
3303
3304                 work_done = tg3_rx(tp, orig_budget);
3305
3306                 *budget -= work_done;
3307                 netdev->quota -= work_done;
3308         }
3309
3310         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3311                 tp->last_tag = sblk->status_tag;
3312                 rmb();
3313         } else
3314                 sblk->status &= ~SD_STATUS_UPDATED;
3315
3316         /* if no more work, tell net stack and NIC we're done */
3317         done = !tg3_has_work(tp);
3318         if (done) {
3319                 netif_rx_complete(netdev);
3320                 tg3_restart_ints(tp);
3321         }
3322
3323         return (done ? 0 : 1);
3324 }
3325
3326 static void tg3_irq_quiesce(struct tg3 *tp)
3327 {
3328         BUG_ON(tp->irq_sync);
3329
3330         tp->irq_sync = 1;
3331         smp_mb();
3332
3333         synchronize_irq(tp->pdev->irq);
3334 }
3335
3336 static inline int tg3_irq_sync(struct tg3 *tp)
3337 {
3338         return tp->irq_sync;
3339 }
3340
3341 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3342  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3343  * with as well.  Most of the time, this is not necessary except when
3344  * shutting down the device.
3345  */
3346 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3347 {
3348         if (irq_sync)
3349                 tg3_irq_quiesce(tp);
3350         spin_lock_bh(&tp->lock);
3351         spin_lock(&tp->tx_lock);
3352 }
3353
3354 static inline void tg3_full_unlock(struct tg3 *tp)
3355 {
3356         spin_unlock(&tp->tx_lock);
3357         spin_unlock_bh(&tp->lock);
3358 }
3359
3360 /* MSI ISR - No need to check for interrupt sharing and no need to
3361  * flush status block and interrupt mailbox. PCI ordering rules
3362  * guarantee that MSI will arrive after the status block.
3363  */
3364 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3365 {
3366         struct net_device *dev = dev_id;
3367         struct tg3 *tp = netdev_priv(dev);
3368
3369         prefetch(tp->hw_status);
3370         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3371         /*
3372          * Writing any value to intr-mbox-0 clears PCI INTA# and
3373          * chip-internal interrupt pending events.
3374          * Writing non-zero to intr-mbox-0 additional tells the
3375          * NIC to stop sending us irqs, engaging "in-intr-handler"
3376          * event coalescing.
3377          */
3378         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3379         if (likely(!tg3_irq_sync(tp)))
3380                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3381
3382         return IRQ_RETVAL(1);
3383 }
3384
3385 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3386 {
3387         struct net_device *dev = dev_id;
3388         struct tg3 *tp = netdev_priv(dev);
3389         struct tg3_hw_status *sblk = tp->hw_status;
3390         unsigned int handled = 1;
3391
3392         /* In INTx mode, it is possible for the interrupt to arrive at
3393          * the CPU before the status block posted prior to the interrupt.
3394          * Reading the PCI State register will confirm whether the
3395          * interrupt is ours and will flush the status block.
3396          */
3397         if ((sblk->status & SD_STATUS_UPDATED) ||
3398             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3399                 /*
3400                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3401                  * chip-internal interrupt pending events.
3402                  * Writing non-zero to intr-mbox-0 additional tells the
3403                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3404                  * event coalescing.
3405                  */
3406                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3407                              0x00000001);
3408                 if (tg3_irq_sync(tp))
3409                         goto out;
3410                 sblk->status &= ~SD_STATUS_UPDATED;
3411                 if (likely(tg3_has_work(tp))) {
3412                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3413                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3414                 } else {
3415                         /* No work, shared interrupt perhaps?  re-enable
3416                          * interrupts, and flush that PCI write
3417                          */
3418                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3419                                 0x00000000);
3420                 }
3421         } else {        /* shared interrupt */
3422                 handled = 0;
3423         }
3424 out:
3425         return IRQ_RETVAL(handled);
3426 }
3427
3428 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3429 {
3430         struct net_device *dev = dev_id;
3431         struct tg3 *tp = netdev_priv(dev);
3432         struct tg3_hw_status *sblk = tp->hw_status;
3433         unsigned int handled = 1;
3434
3435         /* In INTx mode, it is possible for the interrupt to arrive at
3436          * the CPU before the status block posted prior to the interrupt.
3437          * Reading the PCI State register will confirm whether the
3438          * interrupt is ours and will flush the status block.
3439          */
3440         if ((sblk->status_tag != tp->last_tag) ||
3441             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3442                 /*
3443                  * writing any value to intr-mbox-0 clears PCI INTA# and
3444                  * chip-internal interrupt pending events.
3445                  * writing non-zero to intr-mbox-0 additional tells the
3446                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3447                  * event coalescing.
3448                  */
3449                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3450                              0x00000001);
3451                 if (tg3_irq_sync(tp))
3452                         goto out;
3453                 if (netif_rx_schedule_prep(dev)) {
3454                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3455                         /* Update last_tag to mark that this status has been
3456                          * seen. Because interrupt may be shared, we may be
3457                          * racing with tg3_poll(), so only update last_tag
3458                          * if tg3_poll() is not scheduled.
3459                          */
3460                         tp->last_tag = sblk->status_tag;
3461                         __netif_rx_schedule(dev);
3462                 }
3463         } else {        /* shared interrupt */
3464                 handled = 0;
3465         }
3466 out:
3467         return IRQ_RETVAL(handled);
3468 }
3469
3470 /* ISR for interrupt test */
3471 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3472                 struct pt_regs *regs)
3473 {
3474         struct net_device *dev = dev_id;
3475         struct tg3 *tp = netdev_priv(dev);
3476         struct tg3_hw_status *sblk = tp->hw_status;
3477
3478         if ((sblk->status & SD_STATUS_UPDATED) ||
3479             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3480                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3481                              0x00000001);
3482                 return IRQ_RETVAL(1);
3483         }
3484         return IRQ_RETVAL(0);
3485 }
3486
3487 static int tg3_init_hw(struct tg3 *);
3488 static int tg3_halt(struct tg3 *, int, int);
3489
3490 #ifdef CONFIG_NET_POLL_CONTROLLER
3491 static void tg3_poll_controller(struct net_device *dev)
3492 {
3493         struct tg3 *tp = netdev_priv(dev);
3494
3495         tg3_interrupt(tp->pdev->irq, dev, NULL);
3496 }
3497 #endif
3498
3499 static void tg3_reset_task(void *_data)
3500 {
3501         struct tg3 *tp = _data;
3502         unsigned int restart_timer;
3503
3504         tg3_full_lock(tp, 0);
3505         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3506
3507         if (!netif_running(tp->dev)) {
3508                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3509                 tg3_full_unlock(tp);
3510                 return;
3511         }
3512
3513         tg3_full_unlock(tp);
3514
3515         tg3_netif_stop(tp);
3516
3517         tg3_full_lock(tp, 1);
3518
3519         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3520         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3521
3522         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3523         tg3_init_hw(tp);
3524
3525         tg3_netif_start(tp);
3526
3527         if (restart_timer)
3528                 mod_timer(&tp->timer, jiffies + 1);
3529
3530         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3531
3532         tg3_full_unlock(tp);
3533 }
3534
3535 static void tg3_tx_timeout(struct net_device *dev)
3536 {
3537         struct tg3 *tp = netdev_priv(dev);
3538
3539         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3540                dev->name);
3541
3542         schedule_work(&tp->reset_task);
3543 }
3544
3545 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3546 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3547 {
3548         u32 base = (u32) mapping & 0xffffffff;
3549
3550         return ((base > 0xffffdcc0) &&
3551                 (base + len + 8 < base));
3552 }
3553
3554 /* Test for DMA addresses > 40-bit */
3555 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3556                                           int len)
3557 {
3558 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3559         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
3560                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3561         return 0;
3562 #else
3563         return 0;
3564 #endif
3565 }
3566
3567 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3568
3569 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3570 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3571                                        u32 last_plus_one, u32 *start,
3572                                        u32 base_flags, u32 mss)
3573 {
3574         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3575         dma_addr_t new_addr = 0;
3576         u32 entry = *start;
3577         int i, ret = 0;
3578
3579         if (!new_skb) {
3580                 ret = -1;
3581         } else {
3582                 /* New SKB is guaranteed to be linear. */
3583                 entry = *start;
3584                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3585                                           PCI_DMA_TODEVICE);
3586                 /* Make sure new skb does not cross any 4G boundaries.
3587                  * Drop the packet if it does.
3588                  */
3589                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3590                         ret = -1;
3591                         dev_kfree_skb(new_skb);
3592                         new_skb = NULL;
3593                 } else {
3594                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3595                                     base_flags, 1 | (mss << 1));
3596                         *start = NEXT_TX(entry);
3597                 }
3598         }
3599
3600         /* Now clean up the sw ring entries. */
3601         i = 0;
3602         while (entry != last_plus_one) {
3603                 int len;
3604
3605                 if (i == 0)
3606                         len = skb_headlen(skb);
3607                 else
3608                         len = skb_shinfo(skb)->frags[i-1].size;
3609                 pci_unmap_single(tp->pdev,
3610                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3611                                  len, PCI_DMA_TODEVICE);
3612                 if (i == 0) {
3613                         tp->tx_buffers[entry].skb = new_skb;
3614                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3615                 } else {
3616                         tp->tx_buffers[entry].skb = NULL;
3617                 }
3618                 entry = NEXT_TX(entry);
3619                 i++;
3620         }
3621
3622         dev_kfree_skb(skb);
3623
3624         return ret;
3625 }
3626
3627 static void tg3_set_txd(struct tg3 *tp, int entry,
3628                         dma_addr_t mapping, int len, u32 flags,
3629                         u32 mss_and_is_end)
3630 {
3631         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3632         int is_end = (mss_and_is_end & 0x1);
3633         u32 mss = (mss_and_is_end >> 1);
3634         u32 vlan_tag = 0;
3635
3636         if (is_end)
3637                 flags |= TXD_FLAG_END;
3638         if (flags & TXD_FLAG_VLAN) {
3639                 vlan_tag = flags >> 16;
3640                 flags &= 0xffff;
3641         }
3642         vlan_tag |= (mss << TXD_MSS_SHIFT);
3643
3644         txd->addr_hi = ((u64) mapping >> 32);
3645         txd->addr_lo = ((u64) mapping & 0xffffffff);
3646         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3647         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3648 }
3649
3650 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3651 {
3652         struct tg3 *tp = netdev_priv(dev);
3653         dma_addr_t mapping;
3654         u32 len, entry, base_flags, mss;
3655         int would_hit_hwbug;
3656
3657         len = skb_headlen(skb);
3658
3659         /* No BH disabling for tx_lock here.  We are running in BH disabled
3660          * context and TX reclaim runs via tp->poll inside of a software
3661          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3662          * no IRQ context deadlocks to worry about either.  Rejoice!
3663          */
3664         if (!spin_trylock(&tp->tx_lock))
3665                 return NETDEV_TX_LOCKED; 
3666
3667         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3668                 if (!netif_queue_stopped(dev)) {
3669                         netif_stop_queue(dev);
3670
3671                         /* This is a hard error, log it. */
3672                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3673                                "queue awake!\n", dev->name);
3674                 }
3675                 spin_unlock(&tp->tx_lock);
3676                 return NETDEV_TX_BUSY;
3677         }
3678
3679         entry = tp->tx_prod;
3680         base_flags = 0;
3681         if (skb->ip_summed == CHECKSUM_HW)
3682                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3683 #if TG3_TSO_SUPPORT != 0
3684         mss = 0;
3685         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3686             (mss = skb_shinfo(skb)->tso_size) != 0) {
3687                 int tcp_opt_len, ip_tcp_len;
3688
3689                 if (skb_header_cloned(skb) &&
3690                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3691                         dev_kfree_skb(skb);
3692                         goto out_unlock;
3693                 }
3694
3695                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3696                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3697
3698                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3699                                TXD_FLAG_CPU_POST_DMA);
3700
3701                 skb->nh.iph->check = 0;
3702                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3703                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3704                         skb->h.th->check = 0;
3705                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3706                 }
3707                 else {
3708                         skb->h.th->check =
3709                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3710                                                    skb->nh.iph->daddr,
3711                                                    0, IPPROTO_TCP, 0);
3712                 }
3713
3714                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3715                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3716                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3717                                 int tsflags;
3718
3719                                 tsflags = ((skb->nh.iph->ihl - 5) +
3720                                            (tcp_opt_len >> 2));
3721                                 mss |= (tsflags << 11);
3722                         }
3723                 } else {
3724                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3725                                 int tsflags;
3726
3727                                 tsflags = ((skb->nh.iph->ihl - 5) +
3728                                            (tcp_opt_len >> 2));
3729                                 base_flags |= tsflags << 12;
3730                         }
3731                 }
3732         }
3733 #else
3734         mss = 0;
3735 #endif
3736 #if TG3_VLAN_TAG_USED
3737         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3738                 base_flags |= (TXD_FLAG_VLAN |
3739                                (vlan_tx_tag_get(skb) << 16));
3740 #endif
3741
3742         /* Queue skb data, a.k.a. the main skb fragment. */
3743         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3744
3745         tp->tx_buffers[entry].skb = skb;
3746         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3747
3748         would_hit_hwbug = 0;
3749
3750         if (tg3_4g_overflow_test(mapping, len))
3751                 would_hit_hwbug = 1;
3752
3753         tg3_set_txd(tp, entry, mapping, len, base_flags,
3754                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3755
3756         entry = NEXT_TX(entry);
3757
3758         /* Now loop through additional data fragments, and queue them. */
3759         if (skb_shinfo(skb)->nr_frags > 0) {
3760                 unsigned int i, last;
3761
3762                 last = skb_shinfo(skb)->nr_frags - 1;
3763                 for (i = 0; i <= last; i++) {
3764                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3765
3766                         len = frag->size;
3767                         mapping = pci_map_page(tp->pdev,
3768                                                frag->page,
3769                                                frag->page_offset,
3770                                                len, PCI_DMA_TODEVICE);
3771
3772                         tp->tx_buffers[entry].skb = NULL;
3773                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3774
3775                         if (tg3_4g_overflow_test(mapping, len))
3776                                 would_hit_hwbug = 1;
3777
3778                         if (tg3_40bit_overflow_test(tp, mapping, len))
3779                                 would_hit_hwbug = 1;
3780
3781                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3782                                 tg3_set_txd(tp, entry, mapping, len,
3783                                             base_flags, (i == last)|(mss << 1));
3784                         else
3785                                 tg3_set_txd(tp, entry, mapping, len,
3786                                             base_flags, (i == last));
3787
3788                         entry = NEXT_TX(entry);
3789                 }
3790         }
3791
3792         if (would_hit_hwbug) {
3793                 u32 last_plus_one = entry;
3794                 u32 start;
3795
3796                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3797                 start &= (TG3_TX_RING_SIZE - 1);
3798
3799                 /* If the workaround fails due to memory/mapping
3800                  * failure, silently drop this packet.
3801                  */
3802                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
3803                                                 &start, base_flags, mss))
3804                         goto out_unlock;
3805
3806                 entry = start;
3807         }
3808
3809         /* Packets are ready, update Tx producer idx local and on card. */
3810         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3811
3812         tp->tx_prod = entry;
3813         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3814                 netif_stop_queue(dev);
3815                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3816                         netif_wake_queue(tp->dev);
3817         }
3818
3819 out_unlock:
3820         mmiowb();
3821         spin_unlock(&tp->tx_lock);
3822
3823         dev->trans_start = jiffies;
3824
3825         return NETDEV_TX_OK;
3826 }
3827
3828 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3829                                int new_mtu)
3830 {
3831         dev->mtu = new_mtu;
3832
3833         if (new_mtu > ETH_DATA_LEN) {
3834                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
3835                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
3836                         ethtool_op_set_tso(dev, 0);
3837                 }
3838                 else
3839                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3840         } else {
3841                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
3842                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
3843                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
3844         }
3845 }
3846
3847 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3848 {
3849         struct tg3 *tp = netdev_priv(dev);
3850
3851         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3852                 return -EINVAL;
3853
3854         if (!netif_running(dev)) {
3855                 /* We'll just catch it later when the
3856                  * device is up'd.
3857                  */
3858                 tg3_set_mtu(dev, tp, new_mtu);
3859                 return 0;
3860         }
3861
3862         tg3_netif_stop(tp);
3863
3864         tg3_full_lock(tp, 1);
3865
3866         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3867
3868         tg3_set_mtu(dev, tp, new_mtu);
3869
3870         tg3_init_hw(tp);
3871
3872         tg3_netif_start(tp);
3873
3874         tg3_full_unlock(tp);
3875
3876         return 0;
3877 }
3878
3879 /* Free up pending packets in all rx/tx rings.
3880  *
3881  * The chip has been shut down and the driver detached from
3882  * the networking, so no interrupts or new tx packets will
3883  * end up in the driver.  tp->{tx,}lock is not held and we are not
3884  * in an interrupt context and thus may sleep.
3885  */
3886 static void tg3_free_rings(struct tg3 *tp)
3887 {
3888         struct ring_info *rxp;
3889         int i;
3890
3891         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3892                 rxp = &tp->rx_std_buffers[i];
3893
3894                 if (rxp->skb == NULL)
3895                         continue;
3896                 pci_unmap_single(tp->pdev,
3897                                  pci_unmap_addr(rxp, mapping),
3898                                  tp->rx_pkt_buf_sz - tp->rx_offset,
3899                                  PCI_DMA_FROMDEVICE);
3900                 dev_kfree_skb_any(rxp->skb);
3901                 rxp->skb = NULL;
3902         }
3903
3904         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3905                 rxp = &tp->rx_jumbo_buffers[i];
3906
3907                 if (rxp->skb == NULL)
3908                         continue;
3909                 pci_unmap_single(tp->pdev,
3910                                  pci_unmap_addr(rxp, mapping),
3911                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3912                                  PCI_DMA_FROMDEVICE);
3913                 dev_kfree_skb_any(rxp->skb);
3914                 rxp->skb = NULL;
3915         }
3916
3917         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3918                 struct tx_ring_info *txp;
3919                 struct sk_buff *skb;
3920                 int j;
3921
3922                 txp = &tp->tx_buffers[i];
3923                 skb = txp->skb;
3924
3925                 if (skb == NULL) {
3926                         i++;
3927                         continue;
3928                 }
3929
3930                 pci_unmap_single(tp->pdev,
3931                                  pci_unmap_addr(txp, mapping),
3932                                  skb_headlen(skb),
3933                                  PCI_DMA_TODEVICE);
3934                 txp->skb = NULL;
3935
3936                 i++;
3937
3938                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3939                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3940                         pci_unmap_page(tp->pdev,
3941                                        pci_unmap_addr(txp, mapping),
3942                                        skb_shinfo(skb)->frags[j].size,
3943                                        PCI_DMA_TODEVICE);
3944                         i++;
3945                 }
3946
3947                 dev_kfree_skb_any(skb);
3948         }
3949 }
3950
3951 /* Initialize tx/rx rings for packet processing.
3952  *
3953  * The chip has been shut down and the driver detached from
3954  * the networking, so no interrupts or new tx packets will
3955  * end up in the driver.  tp->{tx,}lock are held and thus
3956  * we may not sleep.
3957  */
3958 static void tg3_init_rings(struct tg3 *tp)
3959 {
3960         u32 i;
3961
3962         /* Free up all the SKBs. */
3963         tg3_free_rings(tp);
3964
3965         /* Zero out all descriptors. */
3966         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3967         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3968         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3969         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3970
3971         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
3972         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
3973             (tp->dev->mtu > ETH_DATA_LEN))
3974                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
3975
3976         /* Initialize invariants of the rings, we only set this
3977          * stuff once.  This works because the card does not
3978          * write into the rx buffer posting rings.
3979          */
3980         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3981                 struct tg3_rx_buffer_desc *rxd;
3982
3983                 rxd = &tp->rx_std[i];
3984                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
3985                         << RXD_LEN_SHIFT;
3986                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3987                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3988                                (i << RXD_OPAQUE_INDEX_SHIFT));
3989         }
3990
3991         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3992                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3993                         struct tg3_rx_buffer_desc *rxd;
3994
3995                         rxd = &tp->rx_jumbo[i];
3996                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3997                                 << RXD_LEN_SHIFT;
3998                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3999                                 RXD_FLAG_JUMBO;
4000                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4001                                (i << RXD_OPAQUE_INDEX_SHIFT));
4002                 }
4003         }
4004
4005         /* Now allocate fresh SKBs for each rx ring. */
4006         for (i = 0; i < tp->rx_pending; i++) {
4007                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
4008                                      -1, i) < 0)
4009                         break;
4010         }
4011
4012         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4013                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4014                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4015                                              -1, i) < 0)
4016                                 break;
4017                 }
4018         }
4019 }
4020
4021 /*
4022  * Must not be invoked with interrupt sources disabled and
4023  * the hardware shutdown down.
4024  */
4025 static void tg3_free_consistent(struct tg3 *tp)
4026 {
4027         kfree(tp->rx_std_buffers);
4028         tp->rx_std_buffers = NULL;
4029         if (tp->rx_std) {
4030                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4031                                     tp->rx_std, tp->rx_std_mapping);
4032                 tp->rx_std = NULL;
4033         }
4034         if (tp->rx_jumbo) {
4035                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4036                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4037                 tp->rx_jumbo = NULL;
4038         }
4039         if (tp->rx_rcb) {
4040                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4041                                     tp->rx_rcb, tp->rx_rcb_mapping);
4042                 tp->rx_rcb = NULL;
4043         }
4044         if (tp->tx_ring) {
4045                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4046                         tp->tx_ring, tp->tx_desc_mapping);
4047                 tp->tx_ring = NULL;
4048         }
4049         if (tp->hw_status) {
4050                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4051                                     tp->hw_status, tp->status_mapping);
4052                 tp->hw_status = NULL;
4053         }
4054         if (tp->hw_stats) {
4055                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4056                                     tp->hw_stats, tp->stats_mapping);
4057                 tp->hw_stats = NULL;
4058         }
4059 }
4060
4061 /*
4062  * Must not be invoked with interrupt sources disabled and
4063  * the hardware shutdown down.  Can sleep.
4064  */
4065 static int tg3_alloc_consistent(struct tg3 *tp)
4066 {
4067         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4068                                       (TG3_RX_RING_SIZE +
4069                                        TG3_RX_JUMBO_RING_SIZE)) +
4070                                      (sizeof(struct tx_ring_info) *
4071                                       TG3_TX_RING_SIZE),
4072                                      GFP_KERNEL);
4073         if (!tp->rx_std_buffers)
4074                 return -ENOMEM;
4075
4076         memset(tp->rx_std_buffers, 0,
4077                (sizeof(struct ring_info) *
4078                 (TG3_RX_RING_SIZE +
4079                  TG3_RX_JUMBO_RING_SIZE)) +
4080                (sizeof(struct tx_ring_info) *
4081                 TG3_TX_RING_SIZE));
4082
4083         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4084         tp->tx_buffers = (struct tx_ring_info *)
4085                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4086
4087         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4088                                           &tp->rx_std_mapping);
4089         if (!tp->rx_std)
4090                 goto err_out;
4091
4092         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4093                                             &tp->rx_jumbo_mapping);
4094
4095         if (!tp->rx_jumbo)
4096                 goto err_out;
4097
4098         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4099                                           &tp->rx_rcb_mapping);
4100         if (!tp->rx_rcb)
4101                 goto err_out;
4102
4103         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4104                                            &tp->tx_desc_mapping);
4105         if (!tp->tx_ring)
4106                 goto err_out;
4107
4108         tp->hw_status = pci_alloc_consistent(tp->pdev,
4109                                              TG3_HW_STATUS_SIZE,
4110                                              &tp->status_mapping);
4111         if (!tp->hw_status)
4112                 goto err_out;
4113
4114         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4115                                             sizeof(struct tg3_hw_stats),
4116                                             &tp->stats_mapping);
4117         if (!tp->hw_stats)
4118                 goto err_out;
4119
4120         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4121         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4122
4123         return 0;
4124
4125 err_out:
4126         tg3_free_consistent(tp);
4127         return -ENOMEM;
4128 }
4129
4130 #define MAX_WAIT_CNT 1000
4131
4132 /* To stop a block, clear the enable bit and poll till it
4133  * clears.  tp->lock is held.
4134  */
4135 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4136 {
4137         unsigned int i;
4138         u32 val;
4139
4140         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4141                 switch (ofs) {
4142                 case RCVLSC_MODE:
4143                 case DMAC_MODE:
4144                 case MBFREE_MODE:
4145                 case BUFMGR_MODE:
4146                 case MEMARB_MODE:
4147                         /* We can't enable/disable these bits of the
4148                          * 5705/5750, just say success.
4149                          */
4150                         return 0;
4151
4152                 default:
4153                         break;
4154                 };
4155         }
4156
4157         val = tr32(ofs);
4158         val &= ~enable_bit;
4159         tw32_f(ofs, val);
4160
4161         for (i = 0; i < MAX_WAIT_CNT; i++) {
4162                 udelay(100);
4163                 val = tr32(ofs);
4164                 if ((val & enable_bit) == 0)
4165                         break;
4166         }
4167
4168         if (i == MAX_WAIT_CNT && !silent) {
4169                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4170                        "ofs=%lx enable_bit=%x\n",
4171                        ofs, enable_bit);
4172                 return -ENODEV;
4173         }
4174
4175         return 0;
4176 }
4177
4178 /* tp->lock is held. */
4179 static int tg3_abort_hw(struct tg3 *tp, int silent)
4180 {
4181         int i, err;
4182
4183         tg3_disable_ints(tp);
4184
4185         tp->rx_mode &= ~RX_MODE_ENABLE;
4186         tw32_f(MAC_RX_MODE, tp->rx_mode);
4187         udelay(10);
4188
4189         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4190         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4191         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4192         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4193         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4194         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4195
4196         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4197         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4198         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4199         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4200         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4201         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4202         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4203
4204         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4205         tw32_f(MAC_MODE, tp->mac_mode);
4206         udelay(40);
4207
4208         tp->tx_mode &= ~TX_MODE_ENABLE;
4209         tw32_f(MAC_TX_MODE, tp->tx_mode);
4210
4211         for (i = 0; i < MAX_WAIT_CNT; i++) {
4212                 udelay(100);
4213                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4214                         break;
4215         }
4216         if (i >= MAX_WAIT_CNT) {
4217                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4218                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4219                        tp->dev->name, tr32(MAC_TX_MODE));
4220                 err |= -ENODEV;
4221         }
4222
4223         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4224         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4225         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4226
4227         tw32(FTQ_RESET, 0xffffffff);
4228         tw32(FTQ_RESET, 0x00000000);
4229
4230         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4231         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4232
4233         if (tp->hw_status)
4234                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4235         if (tp->hw_stats)
4236                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4237
4238         return err;
4239 }
4240
4241 /* tp->lock is held. */
4242 static int tg3_nvram_lock(struct tg3 *tp)
4243 {
4244         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4245                 int i;
4246
4247                 if (tp->nvram_lock_cnt == 0) {
4248                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4249                         for (i = 0; i < 8000; i++) {
4250                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4251                                         break;
4252                                 udelay(20);
4253                         }
4254                         if (i == 8000) {
4255                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4256                                 return -ENODEV;
4257                         }
4258                 }
4259                 tp->nvram_lock_cnt++;
4260         }
4261         return 0;
4262 }
4263
4264 /* tp->lock is held. */
4265 static void tg3_nvram_unlock(struct tg3 *tp)
4266 {
4267         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4268                 if (tp->nvram_lock_cnt > 0)
4269                         tp->nvram_lock_cnt--;
4270                 if (tp->nvram_lock_cnt == 0)
4271                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4272         }
4273 }
4274
4275 /* tp->lock is held. */
4276 static void tg3_enable_nvram_access(struct tg3 *tp)
4277 {
4278         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4279             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4280                 u32 nvaccess = tr32(NVRAM_ACCESS);
4281
4282                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4283         }
4284 }
4285
4286 /* tp->lock is held. */
4287 static void tg3_disable_nvram_access(struct tg3 *tp)
4288 {
4289         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4290             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4291                 u32 nvaccess = tr32(NVRAM_ACCESS);
4292
4293                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4294         }
4295 }
4296
4297 /* tp->lock is held. */
4298 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4299 {
4300         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4301                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4302                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4303
4304         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4305                 switch (kind) {
4306                 case RESET_KIND_INIT:
4307                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4308                                       DRV_STATE_START);
4309                         break;
4310
4311                 case RESET_KIND_SHUTDOWN:
4312                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4313                                       DRV_STATE_UNLOAD);
4314                         break;
4315
4316                 case RESET_KIND_SUSPEND:
4317                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4318                                       DRV_STATE_SUSPEND);
4319                         break;
4320
4321                 default:
4322                         break;
4323                 };
4324         }
4325 }
4326
4327 /* tp->lock is held. */
4328 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4329 {
4330         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4331                 switch (kind) {
4332                 case RESET_KIND_INIT:
4333                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4334                                       DRV_STATE_START_DONE);
4335                         break;
4336
4337                 case RESET_KIND_SHUTDOWN:
4338                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4339                                       DRV_STATE_UNLOAD_DONE);
4340                         break;
4341
4342                 default:
4343                         break;
4344                 };
4345         }
4346 }
4347
4348 /* tp->lock is held. */
4349 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4350 {
4351         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4352                 switch (kind) {
4353                 case RESET_KIND_INIT:
4354                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4355                                       DRV_STATE_START);
4356                         break;
4357
4358                 case RESET_KIND_SHUTDOWN:
4359                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4360                                       DRV_STATE_UNLOAD);
4361                         break;
4362
4363                 case RESET_KIND_SUSPEND:
4364                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4365                                       DRV_STATE_SUSPEND);
4366                         break;
4367
4368                 default:
4369                         break;
4370                 };
4371         }
4372 }
4373
4374 static void tg3_stop_fw(struct tg3 *);
4375
4376 /* tp->lock is held. */
4377 static int tg3_chip_reset(struct tg3 *tp)
4378 {
4379         u32 val;
4380         void (*write_op)(struct tg3 *, u32, u32);
4381         int i;
4382
4383         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4384                 tg3_nvram_lock(tp);
4385                 /* No matching tg3_nvram_unlock() after this because
4386                  * chip reset below will undo the nvram lock.
4387                  */
4388                 tp->nvram_lock_cnt = 0;
4389         }
4390
4391         /*
4392          * We must avoid the readl() that normally takes place.
4393          * It locks machines, causes machine checks, and other
4394          * fun things.  So, temporarily disable the 5701
4395          * hardware workaround, while we do the reset.
4396          */
4397         write_op = tp->write32;
4398         if (write_op == tg3_write_flush_reg32)
4399                 tp->write32 = tg3_write32;
4400
4401         /* do the reset */
4402         val = GRC_MISC_CFG_CORECLK_RESET;
4403
4404         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4405                 if (tr32(0x7e2c) == 0x60) {
4406                         tw32(0x7e2c, 0x20);
4407                 }
4408                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4409                         tw32(GRC_MISC_CFG, (1 << 29));
4410                         val |= (1 << 29);
4411                 }
4412         }
4413
4414         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4415                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4416         tw32(GRC_MISC_CFG, val);
4417
4418         /* restore 5701 hardware bug workaround write method */
4419         tp->write32 = write_op;
4420
4421         /* Unfortunately, we have to delay before the PCI read back.
4422          * Some 575X chips even will not respond to a PCI cfg access
4423          * when the reset command is given to the chip.
4424          *
4425          * How do these hardware designers expect things to work
4426          * properly if the PCI write is posted for a long period
4427          * of time?  It is always necessary to have some method by
4428          * which a register read back can occur to push the write
4429          * out which does the reset.
4430          *
4431          * For most tg3 variants the trick below was working.
4432          * Ho hum...
4433          */
4434         udelay(120);
4435
4436         /* Flush PCI posted writes.  The normal MMIO registers
4437          * are inaccessible at this time so this is the only
4438          * way to make this reliably (actually, this is no longer
4439          * the case, see above).  I tried to use indirect
4440          * register read/write but this upset some 5701 variants.
4441          */
4442         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4443
4444         udelay(120);
4445
4446         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4447                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4448                         int i;
4449                         u32 cfg_val;
4450
4451                         /* Wait for link training to complete.  */
4452                         for (i = 0; i < 5000; i++)
4453                                 udelay(100);
4454
4455                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4456                         pci_write_config_dword(tp->pdev, 0xc4,
4457                                                cfg_val | (1 << 15));
4458                 }
4459                 /* Set PCIE max payload size and clear error status.  */
4460                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4461         }
4462
4463         /* Re-enable indirect register accesses. */
4464         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4465                                tp->misc_host_ctrl);
4466
4467         /* Set MAX PCI retry to zero. */
4468         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4469         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4470             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4471                 val |= PCISTATE_RETRY_SAME_DMA;
4472         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4473
4474         pci_restore_state(tp->pdev);
4475
4476         /* Make sure PCI-X relaxed ordering bit is clear. */
4477         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4478         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4479         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4480
4481         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4482                 u32 val;
4483
4484                 /* Chip reset on 5780 will reset MSI enable bit,
4485                  * so need to restore it.
4486                  */
4487                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4488                         u16 ctrl;
4489
4490                         pci_read_config_word(tp->pdev,
4491                                              tp->msi_cap + PCI_MSI_FLAGS,
4492                                              &ctrl);
4493                         pci_write_config_word(tp->pdev,
4494                                               tp->msi_cap + PCI_MSI_FLAGS,
4495                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4496                         val = tr32(MSGINT_MODE);
4497                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4498                 }
4499
4500                 val = tr32(MEMARB_MODE);
4501                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4502
4503         } else
4504                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4505
4506         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4507                 tg3_stop_fw(tp);
4508                 tw32(0x5000, 0x400);
4509         }
4510
4511         tw32(GRC_MODE, tp->grc_mode);
4512
4513         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4514                 u32 val = tr32(0xc4);
4515
4516                 tw32(0xc4, val | (1 << 15));
4517         }
4518
4519         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4520             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4521                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4522                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4523                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4524                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4525         }
4526
4527         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4528                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4529                 tw32_f(MAC_MODE, tp->mac_mode);
4530         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4531                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4532                 tw32_f(MAC_MODE, tp->mac_mode);
4533         } else
4534                 tw32_f(MAC_MODE, 0);
4535         udelay(40);
4536
4537         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4538                 /* Wait for firmware initialization to complete. */
4539                 for (i = 0; i < 100000; i++) {
4540                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4541                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4542                                 break;
4543                         udelay(10);
4544                 }
4545                 if (i >= 100000) {
4546                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4547                                "firmware will not restart magic=%08x\n",
4548                                tp->dev->name, val);
4549                         return -ENODEV;
4550                 }
4551         }
4552
4553         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4554             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4555                 u32 val = tr32(0x7c00);
4556
4557                 tw32(0x7c00, val | (1 << 25));
4558         }
4559
4560         /* Reprobe ASF enable state.  */
4561         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4562         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4563         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4564         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4565                 u32 nic_cfg;
4566
4567                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4568                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4569                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4570                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4571                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4572                 }
4573         }
4574
4575         return 0;
4576 }
4577
4578 /* tp->lock is held. */
4579 static void tg3_stop_fw(struct tg3 *tp)
4580 {
4581         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4582                 u32 val;
4583                 int i;
4584
4585                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4586                 val = tr32(GRC_RX_CPU_EVENT);
4587                 val |= (1 << 14);
4588                 tw32(GRC_RX_CPU_EVENT, val);
4589
4590                 /* Wait for RX cpu to ACK the event.  */
4591                 for (i = 0; i < 100; i++) {
4592                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4593                                 break;
4594                         udelay(1);
4595                 }
4596         }
4597 }
4598
4599 /* tp->lock is held. */
4600 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4601 {
4602         int err;
4603
4604         tg3_stop_fw(tp);
4605
4606         tg3_write_sig_pre_reset(tp, kind);
4607
4608         tg3_abort_hw(tp, silent);
4609         err = tg3_chip_reset(tp);
4610
4611         tg3_write_sig_legacy(tp, kind);
4612         tg3_write_sig_post_reset(tp, kind);
4613
4614         if (err)
4615                 return err;
4616
4617         return 0;
4618 }
4619
4620 #define TG3_FW_RELEASE_MAJOR    0x0
4621 #define TG3_FW_RELASE_MINOR     0x0
4622 #define TG3_FW_RELEASE_FIX      0x0
4623 #define TG3_FW_START_ADDR       0x08000000
4624 #define TG3_FW_TEXT_ADDR        0x08000000
4625 #define TG3_FW_TEXT_LEN         0x9c0
4626 #define TG3_FW_RODATA_ADDR      0x080009c0
4627 #define TG3_FW_RODATA_LEN       0x60
4628 #define TG3_FW_DATA_ADDR        0x08000a40
4629 #define TG3_FW_DATA_LEN         0x20
4630 #define TG3_FW_SBSS_ADDR        0x08000a60
4631 #define TG3_FW_SBSS_LEN         0xc
4632 #define TG3_FW_BSS_ADDR         0x08000a70
4633 #define TG3_FW_BSS_LEN          0x10
4634
4635 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4636         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4637         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4638         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4639         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4640         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4641         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4642         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4643         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4644         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4645         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4646         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4647         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4648         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4649         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4650         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4651         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4652         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4653         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4654         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4655         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4656         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4657         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4658         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4659         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4660         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4661         0, 0, 0, 0, 0, 0,
4662         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4663         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4664         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4665         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4666         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4667         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4668         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4669         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4670         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4671         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4672         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4673         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4674         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4675         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4676         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4677         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4678         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4679         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4680         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4681         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4682         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4683         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4684         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4685         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4686         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4687         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4688         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4689         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4690         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4691         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4692         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4693         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4694         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4695         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4696         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4697         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4698         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4699         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4700         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4701         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4702         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4703         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4704         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4705         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4706         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4707         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4708         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4709         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4710         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4711         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4712         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4713         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4714         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4715         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4716         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4717         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4718         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4719         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4720         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4721         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4722         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4723         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4724         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4725         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4726         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4727 };
4728
4729 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4730         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4731         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4732         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4733         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4734         0x00000000
4735 };
4736
4737 #if 0 /* All zeros, don't eat up space with it. */
4738 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4739         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4740         0x00000000, 0x00000000, 0x00000000, 0x00000000
4741 };
4742 #endif
4743
4744 #define RX_CPU_SCRATCH_BASE     0x30000
4745 #define RX_CPU_SCRATCH_SIZE     0x04000
4746 #define TX_CPU_SCRATCH_BASE     0x34000
4747 #define TX_CPU_SCRATCH_SIZE     0x04000
4748
4749 /* tp->lock is held. */
4750 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4751 {
4752         int i;
4753
4754         if (offset == TX_CPU_BASE &&
4755             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4756                 BUG();
4757
4758         if (offset == RX_CPU_BASE) {
4759                 for (i = 0; i < 10000; i++) {
4760                         tw32(offset + CPU_STATE, 0xffffffff);
4761                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4762                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4763                                 break;
4764                 }
4765
4766                 tw32(offset + CPU_STATE, 0xffffffff);
4767                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4768                 udelay(10);
4769         } else {
4770                 for (i = 0; i < 10000; i++) {
4771                         tw32(offset + CPU_STATE, 0xffffffff);
4772                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4773                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4774                                 break;
4775                 }
4776         }
4777
4778         if (i >= 10000) {
4779                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4780                        "and %s CPU\n",
4781                        tp->dev->name,
4782                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4783                 return -ENODEV;
4784         }
4785
4786         /* Clear firmware's nvram arbitration. */
4787         if (tp->tg3_flags & TG3_FLAG_NVRAM)
4788                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
4789         return 0;
4790 }
4791
4792 struct fw_info {
4793         unsigned int text_base;
4794         unsigned int text_len;
4795         u32 *text_data;
4796         unsigned int rodata_base;
4797         unsigned int rodata_len;
4798         u32 *rodata_data;
4799         unsigned int data_base;
4800         unsigned int data_len;
4801         u32 *data_data;
4802 };
4803
4804 /* tp->lock is held. */
4805 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4806                                  int cpu_scratch_size, struct fw_info *info)
4807 {
4808         int err, lock_err, i;
4809         void (*write_op)(struct tg3 *, u32, u32);
4810
4811         if (cpu_base == TX_CPU_BASE &&
4812             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4813                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4814                        "TX cpu firmware on %s which is 5705.\n",
4815                        tp->dev->name);
4816                 return -EINVAL;
4817         }
4818
4819         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4820                 write_op = tg3_write_mem;
4821         else
4822                 write_op = tg3_write_indirect_reg32;
4823
4824         /* It is possible that bootcode is still loading at this point.
4825          * Get the nvram lock first before halting the cpu.
4826          */
4827         lock_err = tg3_nvram_lock(tp);
4828         err = tg3_halt_cpu(tp, cpu_base);
4829         if (!lock_err)
4830                 tg3_nvram_unlock(tp);
4831         if (err)
4832                 goto out;
4833
4834         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4835                 write_op(tp, cpu_scratch_base + i, 0);
4836         tw32(cpu_base + CPU_STATE, 0xffffffff);
4837         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4838         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4839                 write_op(tp, (cpu_scratch_base +
4840                               (info->text_base & 0xffff) +
4841                               (i * sizeof(u32))),
4842                          (info->text_data ?
4843                           info->text_data[i] : 0));
4844         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4845                 write_op(tp, (cpu_scratch_base +
4846                               (info->rodata_base & 0xffff) +
4847                               (i * sizeof(u32))),
4848                          (info->rodata_data ?
4849                           info->rodata_data[i] : 0));
4850         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4851                 write_op(tp, (cpu_scratch_base +
4852                               (info->data_base & 0xffff) +
4853                               (i * sizeof(u32))),
4854                          (info->data_data ?
4855                           info->data_data[i] : 0));
4856
4857         err = 0;
4858
4859 out:
4860         return err;
4861 }
4862
4863 /* tp->lock is held. */
4864 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4865 {
4866         struct fw_info info;
4867         int err, i;
4868
4869         info.text_base = TG3_FW_TEXT_ADDR;
4870         info.text_len = TG3_FW_TEXT_LEN;
4871         info.text_data = &tg3FwText[0];
4872         info.rodata_base = TG3_FW_RODATA_ADDR;
4873         info.rodata_len = TG3_FW_RODATA_LEN;
4874         info.rodata_data = &tg3FwRodata[0];
4875         info.data_base = TG3_FW_DATA_ADDR;
4876         info.data_len = TG3_FW_DATA_LEN;
4877         info.data_data = NULL;
4878
4879         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4880                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4881                                     &info);
4882         if (err)
4883                 return err;
4884
4885         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4886                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4887                                     &info);
4888         if (err)
4889                 return err;
4890
4891         /* Now startup only the RX cpu. */
4892         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4893         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4894
4895         for (i = 0; i < 5; i++) {
4896                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4897                         break;
4898                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4899                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4900                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4901                 udelay(1000);
4902         }
4903         if (i >= 5) {
4904                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4905                        "to set RX CPU PC, is %08x should be %08x\n",
4906                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4907                        TG3_FW_TEXT_ADDR);
4908                 return -ENODEV;
4909         }
4910         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4911         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4912
4913         return 0;
4914 }
4915
4916 #if TG3_TSO_SUPPORT != 0
4917
4918 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4919 #define TG3_TSO_FW_RELASE_MINOR         0x6
4920 #define TG3_TSO_FW_RELEASE_FIX          0x0
4921 #define TG3_TSO_FW_START_ADDR           0x08000000
4922 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4923 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4924 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4925 #define TG3_TSO_FW_RODATA_LEN           0x60
4926 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4927 #define TG3_TSO_FW_DATA_LEN             0x30
4928 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4929 #define TG3_TSO_FW_SBSS_LEN             0x2c
4930 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4931 #define TG3_TSO_FW_BSS_LEN              0x894
4932
4933 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4934         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4935         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4936         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4937         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4938         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4939         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4940         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4941         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4942         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4943         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4944         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4945         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4946         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4947         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4948         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4949         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4950         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4951         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4952         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4953         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4954         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4955         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4956         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4957         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4958         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4959         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4960         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4961         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4962         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4963         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4964         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4965         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4966         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4967         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4968         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4969         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4970         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4971         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4972         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4973         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4974         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4975         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4976         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4977         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4978         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4979         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4980         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4981         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4982         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4983         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4984         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4985         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4986         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4987         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4988         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4989         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4990         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4991         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4992         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4993         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4994         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4995         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4996         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4997         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4998         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4999         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5000         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5001         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5002         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5003         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5004         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5005         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5006         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5007         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5008         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5009         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5010         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5011         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5012         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5013         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5014         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5015         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5016         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5017         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5018         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5019         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5020         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5021         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5022         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5023         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5024         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5025         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5026         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5027         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5028         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5029         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5030         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5031         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5032         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5033         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5034         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5035         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5036         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5037         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5038         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5039         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5040         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5041         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5042         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5043         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5044         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5045         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5046         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5047         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5048         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5049         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5050         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5051         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5052         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5053         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5054         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5055         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5056         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5057         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5058         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5059         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5060         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5061         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5062         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5063         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5064         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5065         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5066         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5067         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5068         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5069         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5070         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5071         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5072         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5073         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5074         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5075         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5076         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5077         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5078         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5079         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5080         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5081         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5082         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5083         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5084         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5085         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5086         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5087         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5088         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5089         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5090         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5091         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5092         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5093         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5094         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5095         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5096         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5097         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5098         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5099         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5100         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5101         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5102         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5103         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5104         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5105         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5106         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5107         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5108         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5109         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5110         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5111         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5112         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5113         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5114         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5115         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5116         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5117         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5118         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5119         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5120         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5121         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5122         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5123         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5124         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5125         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5126         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5127         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5128         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5129         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5130         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5131         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5132         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5133         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5134         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5135         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5136         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5137         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5138         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5139         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5140         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5141         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5142         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5143         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5144         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5145         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5146         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5147         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5148         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5149         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5150         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5151         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5152         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5153         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5154         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5155         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5156         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5157         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5158         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5159         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5160         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5161         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5162         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5163         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5164         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5165         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5166         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5167         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5168         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5169         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5170         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5171         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5172         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5173         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5174         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5175         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5176         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5177         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5178         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5179         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5180         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5181         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5182         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5183         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5184         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5185         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5186         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5187         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5188         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5189         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5190         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5191         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5192         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5193         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5194         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5195         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5196         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5197         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5198         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5199         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5200         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5201         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5202         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5203         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5204         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5205         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5206         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5207         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5208         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5209         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5210         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5211         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5212         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5213         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5214         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5215         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5216         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5217         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5218 };
5219
5220 static u32 tg3TsoFwRodata[] = {
5221         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5222         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5223         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5224         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5225         0x00000000,
5226 };
5227
5228 static u32 tg3TsoFwData[] = {
5229         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5230         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5231         0x00000000,
5232 };
5233
5234 /* 5705 needs a special version of the TSO firmware.  */
5235 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5236 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5237 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5238 #define TG3_TSO5_FW_START_ADDR          0x00010000
5239 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5240 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5241 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5242 #define TG3_TSO5_FW_RODATA_LEN          0x50
5243 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5244 #define TG3_TSO5_FW_DATA_LEN            0x20
5245 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5246 #define TG3_TSO5_FW_SBSS_LEN            0x28
5247 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5248 #define TG3_TSO5_FW_BSS_LEN             0x88
5249
5250 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5251         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5252         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5253         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5254         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5255         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5256         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5257         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5258         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5259         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5260         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5261         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5262         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5263         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5264         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5265         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5266         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5267         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5268         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5269         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5270         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5271         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5272         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5273         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5274         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5275         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5276         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5277         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5278         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5279         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5280         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5281         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5282         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5283         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5284         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5285         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5286         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5287         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5288         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5289         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5290         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5291         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5292         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5293         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5294         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5295         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5296         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5297         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5298         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5299         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5300         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5301         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5302         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5303         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5304         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5305         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5306         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5307         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5308         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5309         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5310         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5311         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5312         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5313         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5314         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5315         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5316         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5317         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5318         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5319         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5320         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5321         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5322         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5323         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5324         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5325         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5326         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5327         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5328         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5329         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5330         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5331         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5332         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5333         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5334         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5335         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5336         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5337         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5338         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5339         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5340         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5341         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5342         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5343         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5344         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5345         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5346         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5347         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5348         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5349         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5350         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5351         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5352         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5353         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5354         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5355         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5356         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5357         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5358         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5359         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5360         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5361         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5362         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5363         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5364         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5365         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5366         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5367         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5368         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5369         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5370         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5371         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5372         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5373         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5374         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5375         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5376         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5377         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5378         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5379         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5380         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5381         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5382         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5383         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5384         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5385         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5386         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5387         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5388         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5389         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5390         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5391         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5392         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5393         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5394         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5395         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5396         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5397         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5398         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5399         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5400         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5401         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5402         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5403         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5404         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5405         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5406         0x00000000, 0x00000000, 0x00000000,
5407 };
5408
5409 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5410         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5411         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5412         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5413         0x00000000, 0x00000000, 0x00000000,
5414 };
5415
5416 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5417         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5418         0x00000000, 0x00000000, 0x00000000,
5419 };
5420
5421 /* tp->lock is held. */
5422 static int tg3_load_tso_firmware(struct tg3 *tp)
5423 {
5424         struct fw_info info;
5425         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5426         int err, i;
5427
5428         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5429                 return 0;
5430
5431         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5432                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5433                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5434                 info.text_data = &tg3Tso5FwText[0];
5435                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5436                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5437                 info.rodata_data = &tg3Tso5FwRodata[0];
5438                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5439                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5440                 info.data_data = &tg3Tso5FwData[0];
5441                 cpu_base = RX_CPU_BASE;
5442                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5443                 cpu_scratch_size = (info.text_len +
5444                                     info.rodata_len +
5445                                     info.data_len +
5446                                     TG3_TSO5_FW_SBSS_LEN +
5447                                     TG3_TSO5_FW_BSS_LEN);
5448         } else {
5449                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5450                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5451                 info.text_data = &tg3TsoFwText[0];
5452                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5453                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5454                 info.rodata_data = &tg3TsoFwRodata[0];
5455                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5456                 info.data_len = TG3_TSO_FW_DATA_LEN;
5457                 info.data_data = &tg3TsoFwData[0];
5458                 cpu_base = TX_CPU_BASE;
5459                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5460                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5461         }
5462
5463         err = tg3_load_firmware_cpu(tp, cpu_base,
5464                                     cpu_scratch_base, cpu_scratch_size,
5465                                     &info);
5466         if (err)
5467                 return err;
5468
5469         /* Now startup the cpu. */
5470         tw32(cpu_base + CPU_STATE, 0xffffffff);
5471         tw32_f(cpu_base + CPU_PC,    info.text_base);
5472
5473         for (i = 0; i < 5; i++) {
5474                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5475                         break;
5476                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5477                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5478                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5479                 udelay(1000);
5480         }
5481         if (i >= 5) {
5482                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5483                        "to set CPU PC, is %08x should be %08x\n",
5484                        tp->dev->name, tr32(cpu_base + CPU_PC),
5485                        info.text_base);
5486                 return -ENODEV;
5487         }
5488         tw32(cpu_base + CPU_STATE, 0xffffffff);
5489         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5490         return 0;
5491 }
5492
5493 #endif /* TG3_TSO_SUPPORT != 0 */
5494
5495 /* tp->lock is held. */
5496 static void __tg3_set_mac_addr(struct tg3 *tp)
5497 {
5498         u32 addr_high, addr_low;
5499         int i;
5500
5501         addr_high = ((tp->dev->dev_addr[0] << 8) |
5502                      tp->dev->dev_addr[1]);
5503         addr_low = ((tp->dev->dev_addr[2] << 24) |
5504                     (tp->dev->dev_addr[3] << 16) |
5505                     (tp->dev->dev_addr[4] <<  8) |
5506                     (tp->dev->dev_addr[5] <<  0));
5507         for (i = 0; i < 4; i++) {
5508                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5509                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5510         }
5511
5512         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5513             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5514                 for (i = 0; i < 12; i++) {
5515                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5516                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5517                 }
5518         }
5519
5520         addr_high = (tp->dev->dev_addr[0] +
5521                      tp->dev->dev_addr[1] +
5522                      tp->dev->dev_addr[2] +
5523                      tp->dev->dev_addr[3] +
5524                      tp->dev->dev_addr[4] +
5525                      tp->dev->dev_addr[5]) &
5526                 TX_BACKOFF_SEED_MASK;
5527         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5528 }
5529
5530 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5531 {
5532         struct tg3 *tp = netdev_priv(dev);
5533         struct sockaddr *addr = p;
5534
5535         if (!is_valid_ether_addr(addr->sa_data))
5536                 return -EINVAL;
5537
5538         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5539
5540         spin_lock_bh(&tp->lock);
5541         __tg3_set_mac_addr(tp);
5542         spin_unlock_bh(&tp->lock);
5543
5544         return 0;
5545 }
5546
5547 /* tp->lock is held. */
5548 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5549                            dma_addr_t mapping, u32 maxlen_flags,
5550                            u32 nic_addr)
5551 {
5552         tg3_write_mem(tp,
5553                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5554                       ((u64) mapping >> 32));
5555         tg3_write_mem(tp,
5556                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5557                       ((u64) mapping & 0xffffffff));
5558         tg3_write_mem(tp,
5559                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5560                        maxlen_flags);
5561
5562         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5563                 tg3_write_mem(tp,
5564                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5565                               nic_addr);
5566 }
5567
5568 static void __tg3_set_rx_mode(struct net_device *);
5569 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5570 {
5571         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5572         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5573         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5574         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5575         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5576                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5577                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5578         }
5579         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5580         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5581         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5582                 u32 val = ec->stats_block_coalesce_usecs;
5583
5584                 if (!netif_carrier_ok(tp->dev))
5585                         val = 0;
5586
5587                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5588         }
5589 }
5590
5591 /* tp->lock is held. */
5592 static int tg3_reset_hw(struct tg3 *tp)
5593 {
5594         u32 val, rdmac_mode;
5595         int i, err, limit;
5596
5597         tg3_disable_ints(tp);
5598
5599         tg3_stop_fw(tp);
5600
5601         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5602
5603         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5604                 tg3_abort_hw(tp, 1);
5605         }
5606
5607         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
5608                 tg3_phy_reset(tp);
5609
5610         err = tg3_chip_reset(tp);
5611         if (err)
5612                 return err;
5613
5614         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5615
5616         /* This works around an issue with Athlon chipsets on
5617          * B3 tigon3 silicon.  This bit has no effect on any
5618          * other revision.  But do not set this on PCI Express
5619          * chips.
5620          */
5621         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5622                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5623         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5624
5625         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5626             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5627                 val = tr32(TG3PCI_PCISTATE);
5628                 val |= PCISTATE_RETRY_SAME_DMA;
5629                 tw32(TG3PCI_PCISTATE, val);
5630         }
5631
5632         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5633                 /* Enable some hw fixes.  */
5634                 val = tr32(TG3PCI_MSI_DATA);
5635                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5636                 tw32(TG3PCI_MSI_DATA, val);
5637         }
5638
5639         /* Descriptor ring init may make accesses to the
5640          * NIC SRAM area to setup the TX descriptors, so we
5641          * can only do this after the hardware has been
5642          * successfully reset.
5643          */
5644         tg3_init_rings(tp);
5645
5646         /* This value is determined during the probe time DMA
5647          * engine test, tg3_test_dma.
5648          */
5649         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5650
5651         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5652                           GRC_MODE_4X_NIC_SEND_RINGS |
5653                           GRC_MODE_NO_TX_PHDR_CSUM |
5654                           GRC_MODE_NO_RX_PHDR_CSUM);
5655         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5656         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5657                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5658         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5659                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5660
5661         tw32(GRC_MODE,
5662              tp->grc_mode |
5663              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5664
5665         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5666         val = tr32(GRC_MISC_CFG);
5667         val &= ~0xff;
5668         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5669         tw32(GRC_MISC_CFG, val);
5670
5671         /* Initialize MBUF/DESC pool. */
5672         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5673                 /* Do nothing.  */
5674         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5675                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5676                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5677                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5678                 else
5679                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5680                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5681                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5682         }
5683 #if TG3_TSO_SUPPORT != 0
5684         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5685                 int fw_len;
5686
5687                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5688                           TG3_TSO5_FW_RODATA_LEN +
5689                           TG3_TSO5_FW_DATA_LEN +
5690                           TG3_TSO5_FW_SBSS_LEN +
5691                           TG3_TSO5_FW_BSS_LEN);
5692                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5693                 tw32(BUFMGR_MB_POOL_ADDR,
5694                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5695                 tw32(BUFMGR_MB_POOL_SIZE,
5696                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5697         }
5698 #endif
5699
5700         if (tp->dev->mtu <= ETH_DATA_LEN) {
5701                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5702                      tp->bufmgr_config.mbuf_read_dma_low_water);
5703                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5704                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5705                 tw32(BUFMGR_MB_HIGH_WATER,
5706                      tp->bufmgr_config.mbuf_high_water);
5707         } else {
5708                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5709                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5710                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5711                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5712                 tw32(BUFMGR_MB_HIGH_WATER,
5713                      tp->bufmgr_config.mbuf_high_water_jumbo);
5714         }
5715         tw32(BUFMGR_DMA_LOW_WATER,
5716              tp->bufmgr_config.dma_low_water);
5717         tw32(BUFMGR_DMA_HIGH_WATER,
5718              tp->bufmgr_config.dma_high_water);
5719
5720         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5721         for (i = 0; i < 2000; i++) {
5722                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5723                         break;
5724                 udelay(10);
5725         }
5726         if (i >= 2000) {
5727                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5728                        tp->dev->name);
5729                 return -ENODEV;
5730         }
5731
5732         /* Setup replenish threshold. */
5733         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5734
5735         /* Initialize TG3_BDINFO's at:
5736          *  RCVDBDI_STD_BD:     standard eth size rx ring
5737          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5738          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5739          *
5740          * like so:
5741          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5742          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5743          *                              ring attribute flags
5744          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5745          *
5746          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5747          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5748          *
5749          * The size of each ring is fixed in the firmware, but the location is
5750          * configurable.
5751          */
5752         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5753              ((u64) tp->rx_std_mapping >> 32));
5754         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5755              ((u64) tp->rx_std_mapping & 0xffffffff));
5756         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5757              NIC_SRAM_RX_BUFFER_DESC);
5758
5759         /* Don't even try to program the JUMBO/MINI buffer descriptor
5760          * configs on 5705.
5761          */
5762         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5763                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5764                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5765         } else {
5766                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5767                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5768
5769                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5770                      BDINFO_FLAGS_DISABLED);
5771
5772                 /* Setup replenish threshold. */
5773                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5774
5775                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5776                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5777                              ((u64) tp->rx_jumbo_mapping >> 32));
5778                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5779                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5780                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5781                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5782                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5783                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5784                 } else {
5785                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5786                              BDINFO_FLAGS_DISABLED);
5787                 }
5788
5789         }
5790
5791         /* There is only one send ring on 5705/5750, no need to explicitly
5792          * disable the others.
5793          */
5794         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5795                 /* Clear out send RCB ring in SRAM. */
5796                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5797                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5798                                       BDINFO_FLAGS_DISABLED);
5799         }
5800
5801         tp->tx_prod = 0;
5802         tp->tx_cons = 0;
5803         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5804         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5805
5806         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5807                        tp->tx_desc_mapping,
5808                        (TG3_TX_RING_SIZE <<
5809                         BDINFO_FLAGS_MAXLEN_SHIFT),
5810                        NIC_SRAM_TX_BUFFER_DESC);
5811
5812         /* There is only one receive return ring on 5705/5750, no need
5813          * to explicitly disable the others.
5814          */
5815         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5816                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5817                      i += TG3_BDINFO_SIZE) {
5818                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5819                                       BDINFO_FLAGS_DISABLED);
5820                 }
5821         }
5822
5823         tp->rx_rcb_ptr = 0;
5824         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5825
5826         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5827                        tp->rx_rcb_mapping,
5828                        (TG3_RX_RCB_RING_SIZE(tp) <<
5829                         BDINFO_FLAGS_MAXLEN_SHIFT),
5830                        0);
5831
5832         tp->rx_std_ptr = tp->rx_pending;
5833         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5834                      tp->rx_std_ptr);
5835
5836         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
5837                                                 tp->rx_jumbo_pending : 0;
5838         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5839                      tp->rx_jumbo_ptr);
5840
5841         /* Initialize MAC address and backoff seed. */
5842         __tg3_set_mac_addr(tp);
5843
5844         /* MTU + ethernet header + FCS + optional VLAN tag */
5845         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5846
5847         /* The slot time is changed by tg3_setup_phy if we
5848          * run at gigabit with half duplex.
5849          */
5850         tw32(MAC_TX_LENGTHS,
5851              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5852              (6 << TX_LENGTHS_IPG_SHIFT) |
5853              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5854
5855         /* Receive rules. */
5856         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5857         tw32(RCVLPC_CONFIG, 0x0181);
5858
5859         /* Calculate RDMAC_MODE setting early, we need it to determine
5860          * the RCVLPC_STATE_ENABLE mask.
5861          */
5862         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5863                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5864                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5865                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5866                       RDMAC_MODE_LNGREAD_ENAB);
5867         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5868                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5869
5870         /* If statement applies to 5705 and 5750 PCI devices only */
5871         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5872              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5873             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5874                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5875                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5876                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5877                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5878                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5879                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5880                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5881                 }
5882         }
5883
5884         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5885                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5886
5887 #if TG3_TSO_SUPPORT != 0
5888         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5889                 rdmac_mode |= (1 << 27);
5890 #endif
5891
5892         /* Receive/send statistics. */
5893         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5894             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5895                 val = tr32(RCVLPC_STATS_ENABLE);
5896                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5897                 tw32(RCVLPC_STATS_ENABLE, val);
5898         } else {
5899                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5900         }
5901         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5902         tw32(SNDDATAI_STATSENAB, 0xffffff);
5903         tw32(SNDDATAI_STATSCTRL,
5904              (SNDDATAI_SCTRL_ENABLE |
5905               SNDDATAI_SCTRL_FASTUPD));
5906
5907         /* Setup host coalescing engine. */
5908         tw32(HOSTCC_MODE, 0);
5909         for (i = 0; i < 2000; i++) {
5910                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5911                         break;
5912                 udelay(10);
5913         }
5914
5915         __tg3_set_coalesce(tp, &tp->coal);
5916
5917         /* set status block DMA address */
5918         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5919              ((u64) tp->status_mapping >> 32));
5920         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5921              ((u64) tp->status_mapping & 0xffffffff));
5922
5923         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5924                 /* Status/statistics block address.  See tg3_timer,
5925                  * the tg3_periodic_fetch_stats call there, and
5926                  * tg3_get_stats to see how this works for 5705/5750 chips.
5927                  */
5928                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5929                      ((u64) tp->stats_mapping >> 32));
5930                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5931                      ((u64) tp->stats_mapping & 0xffffffff));
5932                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5933                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5934         }
5935
5936         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5937
5938         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5939         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5940         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5941                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5942
5943         /* Clear statistics/status block in chip, and status block in ram. */
5944         for (i = NIC_SRAM_STATS_BLK;
5945              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5946              i += sizeof(u32)) {
5947                 tg3_write_mem(tp, i, 0);
5948                 udelay(40);
5949         }
5950         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5951
5952         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5953                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
5954                 /* reset to prevent losing 1st rx packet intermittently */
5955                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5956                 udelay(10);
5957         }
5958
5959         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5960                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5961         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5962         udelay(40);
5963
5964         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5965          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5966          * register to preserve the GPIO settings for LOMs. The GPIOs,
5967          * whether used as inputs or outputs, are set by boot code after
5968          * reset.
5969          */
5970         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5971                 u32 gpio_mask;
5972
5973                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5974                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
5975
5976                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5977                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5978                                      GRC_LCLCTRL_GPIO_OUTPUT3;
5979
5980                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5981
5982                 /* GPIO1 must be driven high for eeprom write protect */
5983                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5984                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5985         }
5986         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5987         udelay(100);
5988
5989         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5990         tp->last_tag = 0;
5991
5992         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5993                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5994                 udelay(40);
5995         }
5996
5997         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5998                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5999                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6000                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6001                WDMAC_MODE_LNGREAD_ENAB);
6002
6003         /* If statement applies to 5705 and 5750 PCI devices only */
6004         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6005              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6006             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6007                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6008                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6009                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6010                         /* nothing */
6011                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6012                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6013                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6014                         val |= WDMAC_MODE_RX_ACCEL;
6015                 }
6016         }
6017
6018         tw32_f(WDMAC_MODE, val);
6019         udelay(40);
6020
6021         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6022                 val = tr32(TG3PCI_X_CAPS);
6023                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6024                         val &= ~PCIX_CAPS_BURST_MASK;
6025                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6026                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6027                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6028                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6029                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6030                                 val |= (tp->split_mode_max_reqs <<
6031                                         PCIX_CAPS_SPLIT_SHIFT);
6032                 }
6033                 tw32(TG3PCI_X_CAPS, val);
6034         }
6035
6036         tw32_f(RDMAC_MODE, rdmac_mode);
6037         udelay(40);
6038
6039         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6040         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6041                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6042         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6043         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6044         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6045         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6046         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6047 #if TG3_TSO_SUPPORT != 0
6048         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6049                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6050 #endif
6051         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6052         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6053
6054         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6055                 err = tg3_load_5701_a0_firmware_fix(tp);
6056                 if (err)
6057                         return err;
6058         }
6059
6060 #if TG3_TSO_SUPPORT != 0
6061         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6062                 err = tg3_load_tso_firmware(tp);
6063                 if (err)
6064                         return err;
6065         }
6066 #endif
6067
6068         tp->tx_mode = TX_MODE_ENABLE;
6069         tw32_f(MAC_TX_MODE, tp->tx_mode);
6070         udelay(100);
6071
6072         tp->rx_mode = RX_MODE_ENABLE;
6073         tw32_f(MAC_RX_MODE, tp->rx_mode);
6074         udelay(10);
6075
6076         if (tp->link_config.phy_is_low_power) {
6077                 tp->link_config.phy_is_low_power = 0;
6078                 tp->link_config.speed = tp->link_config.orig_speed;
6079                 tp->link_config.duplex = tp->link_config.orig_duplex;
6080                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6081         }
6082
6083         tp->mi_mode = MAC_MI_MODE_BASE;
6084         tw32_f(MAC_MI_MODE, tp->mi_mode);
6085         udelay(80);
6086
6087         tw32(MAC_LED_CTRL, tp->led_ctrl);
6088
6089         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6090         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6091                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6092                 udelay(10);
6093         }
6094         tw32_f(MAC_RX_MODE, tp->rx_mode);
6095         udelay(10);
6096
6097         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6098                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6099                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6100                         /* Set drive transmission level to 1.2V  */
6101                         /* only if the signal pre-emphasis bit is not set  */
6102                         val = tr32(MAC_SERDES_CFG);
6103                         val &= 0xfffff000;
6104                         val |= 0x880;
6105                         tw32(MAC_SERDES_CFG, val);
6106                 }
6107                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6108                         tw32(MAC_SERDES_CFG, 0x616000);
6109         }
6110
6111         /* Prevent chip from dropping frames when flow control
6112          * is enabled.
6113          */
6114         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6115
6116         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6117             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6118                 /* Use hardware link auto-negotiation */
6119                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6120         }
6121
6122         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6123             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6124                 u32 tmp;
6125
6126                 tmp = tr32(SERDES_RX_CTRL);
6127                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6128                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6129                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6130                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6131         }
6132
6133         err = tg3_setup_phy(tp, 1);
6134         if (err)
6135                 return err;
6136
6137         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6138                 u32 tmp;
6139
6140                 /* Clear CRC stats. */
6141                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6142                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6143                         tg3_readphy(tp, 0x14, &tmp);
6144                 }
6145         }
6146
6147         __tg3_set_rx_mode(tp->dev);
6148
6149         /* Initialize receive rules. */
6150         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6151         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6152         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6153         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6154
6155         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6156             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6157                 limit = 8;
6158         else
6159                 limit = 16;
6160         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6161                 limit -= 4;
6162         switch (limit) {
6163         case 16:
6164                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6165         case 15:
6166                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6167         case 14:
6168                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6169         case 13:
6170                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6171         case 12:
6172                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6173         case 11:
6174                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6175         case 10:
6176                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6177         case 9:
6178                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6179         case 8:
6180                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6181         case 7:
6182                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6183         case 6:
6184                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6185         case 5:
6186                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6187         case 4:
6188                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6189         case 3:
6190                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6191         case 2:
6192         case 1:
6193
6194         default:
6195                 break;
6196         };
6197
6198         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6199
6200         return 0;
6201 }
6202
6203 /* Called at device open time to get the chip ready for
6204  * packet processing.  Invoked with tp->lock held.
6205  */
6206 static int tg3_init_hw(struct tg3 *tp)
6207 {
6208         int err;
6209
6210         /* Force the chip into D0. */
6211         err = tg3_set_power_state(tp, PCI_D0);
6212         if (err)
6213                 goto out;
6214
6215         tg3_switch_clocks(tp);
6216
6217         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6218
6219         err = tg3_reset_hw(tp);
6220
6221 out:
6222         return err;
6223 }
6224
6225 #define TG3_STAT_ADD32(PSTAT, REG) \
6226 do {    u32 __val = tr32(REG); \
6227         (PSTAT)->low += __val; \
6228         if ((PSTAT)->low < __val) \
6229                 (PSTAT)->high += 1; \
6230 } while (0)
6231
6232 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6233 {
6234         struct tg3_hw_stats *sp = tp->hw_stats;
6235
6236         if (!netif_carrier_ok(tp->dev))
6237                 return;
6238
6239         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6240         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6241         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6242         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6243         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6244         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6245         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6246         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6247         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6248         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6249         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6250         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6251         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6252
6253         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6254         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6255         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6256         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6257         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6258         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6259         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6260         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6261         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6262         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6263         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6264         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6265         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6266         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6267 }
6268
6269 static void tg3_timer(unsigned long __opaque)
6270 {
6271         struct tg3 *tp = (struct tg3 *) __opaque;
6272
6273         spin_lock(&tp->lock);
6274
6275         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6276                 /* All of this garbage is because when using non-tagged
6277                  * IRQ status the mailbox/status_block protocol the chip
6278                  * uses with the cpu is race prone.
6279                  */
6280                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6281                         tw32(GRC_LOCAL_CTRL,
6282                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6283                 } else {
6284                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6285                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6286                 }
6287
6288                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6289                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6290                         spin_unlock(&tp->lock);
6291                         schedule_work(&tp->reset_task);
6292                         return;
6293                 }
6294         }
6295
6296         /* This part only runs once per second. */
6297         if (!--tp->timer_counter) {
6298                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6299                         tg3_periodic_fetch_stats(tp);
6300
6301                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6302                         u32 mac_stat;
6303                         int phy_event;
6304
6305                         mac_stat = tr32(MAC_STATUS);
6306
6307                         phy_event = 0;
6308                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6309                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6310                                         phy_event = 1;
6311                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6312                                 phy_event = 1;
6313
6314                         if (phy_event)
6315                                 tg3_setup_phy(tp, 0);
6316                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6317                         u32 mac_stat = tr32(MAC_STATUS);
6318                         int need_setup = 0;
6319
6320                         if (netif_carrier_ok(tp->dev) &&
6321                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6322                                 need_setup = 1;
6323                         }
6324                         if (! netif_carrier_ok(tp->dev) &&
6325                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6326                                          MAC_STATUS_SIGNAL_DET))) {
6327                                 need_setup = 1;
6328                         }
6329                         if (need_setup) {
6330                                 tw32_f(MAC_MODE,
6331                                      (tp->mac_mode &
6332                                       ~MAC_MODE_PORT_MODE_MASK));
6333                                 udelay(40);
6334                                 tw32_f(MAC_MODE, tp->mac_mode);
6335                                 udelay(40);
6336                                 tg3_setup_phy(tp, 0);
6337                         }
6338                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6339                         tg3_serdes_parallel_detect(tp);
6340
6341                 tp->timer_counter = tp->timer_multiplier;
6342         }
6343
6344         /* Heartbeat is only sent once every 2 seconds.  */
6345         if (!--tp->asf_counter) {
6346                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6347                         u32 val;
6348
6349                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_MBOX,
6350                                            FWCMD_NICDRV_ALIVE2);
6351                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6352                         /* 5 seconds timeout */
6353                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6354                         val = tr32(GRC_RX_CPU_EVENT);
6355                         val |= (1 << 14);
6356                         tw32(GRC_RX_CPU_EVENT, val);
6357                 }
6358                 tp->asf_counter = tp->asf_multiplier;
6359         }
6360
6361         spin_unlock(&tp->lock);
6362
6363         tp->timer.expires = jiffies + tp->timer_offset;
6364         add_timer(&tp->timer);
6365 }
6366
6367 static int tg3_test_interrupt(struct tg3 *tp)
6368 {
6369         struct net_device *dev = tp->dev;
6370         int err, i;
6371         u32 int_mbox = 0;
6372
6373         if (!netif_running(dev))
6374                 return -ENODEV;
6375
6376         tg3_disable_ints(tp);
6377
6378         free_irq(tp->pdev->irq, dev);
6379
6380         err = request_irq(tp->pdev->irq, tg3_test_isr,
6381                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6382         if (err)
6383                 return err;
6384
6385         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6386         tg3_enable_ints(tp);
6387
6388         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6389                HOSTCC_MODE_NOW);
6390
6391         for (i = 0; i < 5; i++) {
6392                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6393                                         TG3_64BIT_REG_LOW);
6394                 if (int_mbox != 0)
6395                         break;
6396                 msleep(10);
6397         }
6398
6399         tg3_disable_ints(tp);
6400
6401         free_irq(tp->pdev->irq, dev);
6402         
6403         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6404                 err = request_irq(tp->pdev->irq, tg3_msi,
6405                                   SA_SAMPLE_RANDOM, dev->name, dev);
6406         else {
6407                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6408                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6409                         fn = tg3_interrupt_tagged;
6410                 err = request_irq(tp->pdev->irq, fn,
6411                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6412         }
6413
6414         if (err)
6415                 return err;
6416
6417         if (int_mbox != 0)
6418                 return 0;
6419
6420         return -EIO;
6421 }
6422
6423 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6424  * successfully restored
6425  */
6426 static int tg3_test_msi(struct tg3 *tp)
6427 {
6428         struct net_device *dev = tp->dev;
6429         int err;
6430         u16 pci_cmd;
6431
6432         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6433                 return 0;
6434
6435         /* Turn off SERR reporting in case MSI terminates with Master
6436          * Abort.
6437          */
6438         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6439         pci_write_config_word(tp->pdev, PCI_COMMAND,
6440                               pci_cmd & ~PCI_COMMAND_SERR);
6441
6442         err = tg3_test_interrupt(tp);
6443
6444         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6445
6446         if (!err)
6447                 return 0;
6448
6449         /* other failures */
6450         if (err != -EIO)
6451                 return err;
6452
6453         /* MSI test failed, go back to INTx mode */
6454         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6455                "switching to INTx mode. Please report this failure to "
6456                "the PCI maintainer and include system chipset information.\n",
6457                        tp->dev->name);
6458
6459         free_irq(tp->pdev->irq, dev);
6460         pci_disable_msi(tp->pdev);
6461
6462         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6463
6464         {
6465                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6466                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6467                         fn = tg3_interrupt_tagged;
6468
6469                 err = request_irq(tp->pdev->irq, fn,
6470                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6471         }
6472         if (err)
6473                 return err;
6474
6475         /* Need to reset the chip because the MSI cycle may have terminated
6476          * with Master Abort.
6477          */
6478         tg3_full_lock(tp, 1);
6479
6480         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6481         err = tg3_init_hw(tp);
6482
6483         tg3_full_unlock(tp);
6484
6485         if (err)
6486                 free_irq(tp->pdev->irq, dev);
6487
6488         return err;
6489 }
6490
6491 static int tg3_open(struct net_device *dev)
6492 {
6493         struct tg3 *tp = netdev_priv(dev);
6494         int err;
6495
6496         tg3_full_lock(tp, 0);
6497
6498         err = tg3_set_power_state(tp, PCI_D0);
6499         if (err)
6500                 return err;
6501
6502         tg3_disable_ints(tp);
6503         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6504
6505         tg3_full_unlock(tp);
6506
6507         /* The placement of this call is tied
6508          * to the setup and use of Host TX descriptors.
6509          */
6510         err = tg3_alloc_consistent(tp);
6511         if (err)
6512                 return err;
6513
6514         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6515             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6516             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6517             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6518               (tp->pdev_peer == tp->pdev))) {
6519                 /* All MSI supporting chips should support tagged
6520                  * status.  Assert that this is the case.
6521                  */
6522                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6523                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6524                                "Not using MSI.\n", tp->dev->name);
6525                 } else if (pci_enable_msi(tp->pdev) == 0) {
6526                         u32 msi_mode;
6527
6528                         msi_mode = tr32(MSGINT_MODE);
6529                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6530                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6531                 }
6532         }
6533         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6534                 err = request_irq(tp->pdev->irq, tg3_msi,
6535                                   SA_SAMPLE_RANDOM, dev->name, dev);
6536         else {
6537                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6538                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6539                         fn = tg3_interrupt_tagged;
6540
6541                 err = request_irq(tp->pdev->irq, fn,
6542                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6543         }
6544
6545         if (err) {
6546                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6547                         pci_disable_msi(tp->pdev);
6548                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6549                 }
6550                 tg3_free_consistent(tp);
6551                 return err;
6552         }
6553
6554         tg3_full_lock(tp, 0);
6555
6556         err = tg3_init_hw(tp);
6557         if (err) {
6558                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6559                 tg3_free_rings(tp);
6560         } else {
6561                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6562                         tp->timer_offset = HZ;
6563                 else
6564                         tp->timer_offset = HZ / 10;
6565
6566                 BUG_ON(tp->timer_offset > HZ);
6567                 tp->timer_counter = tp->timer_multiplier =
6568                         (HZ / tp->timer_offset);
6569                 tp->asf_counter = tp->asf_multiplier =
6570                         ((HZ / tp->timer_offset) * 2);
6571
6572                 init_timer(&tp->timer);
6573                 tp->timer.expires = jiffies + tp->timer_offset;
6574                 tp->timer.data = (unsigned long) tp;
6575                 tp->timer.function = tg3_timer;
6576         }
6577
6578         tg3_full_unlock(tp);
6579
6580         if (err) {
6581                 free_irq(tp->pdev->irq, dev);
6582                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6583                         pci_disable_msi(tp->pdev);
6584                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6585                 }
6586                 tg3_free_consistent(tp);
6587                 return err;
6588         }
6589
6590         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6591                 err = tg3_test_msi(tp);
6592
6593                 if (err) {
6594                         tg3_full_lock(tp, 0);
6595
6596                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6597                                 pci_disable_msi(tp->pdev);
6598                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6599                         }
6600                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6601                         tg3_free_rings(tp);
6602                         tg3_free_consistent(tp);
6603
6604                         tg3_full_unlock(tp);
6605
6606                         return err;
6607                 }
6608         }
6609
6610         tg3_full_lock(tp, 0);
6611
6612         add_timer(&tp->timer);
6613         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6614         tg3_enable_ints(tp);
6615
6616         tg3_full_unlock(tp);
6617
6618         netif_start_queue(dev);
6619
6620         return 0;
6621 }
6622
6623 #if 0
6624 /*static*/ void tg3_dump_state(struct tg3 *tp)
6625 {
6626         u32 val32, val32_2, val32_3, val32_4, val32_5;
6627         u16 val16;
6628         int i;
6629
6630         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6631         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6632         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6633                val16, val32);
6634
6635         /* MAC block */
6636         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6637                tr32(MAC_MODE), tr32(MAC_STATUS));
6638         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6639                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6640         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6641                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6642         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6643                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6644
6645         /* Send data initiator control block */
6646         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6647                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6648         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6649                tr32(SNDDATAI_STATSCTRL));
6650
6651         /* Send data completion control block */
6652         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6653
6654         /* Send BD ring selector block */
6655         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6656                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6657
6658         /* Send BD initiator control block */
6659         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6660                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6661
6662         /* Send BD completion control block */
6663         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6664
6665         /* Receive list placement control block */
6666         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6667                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6668         printk("       RCVLPC_STATSCTRL[%08x]\n",
6669                tr32(RCVLPC_STATSCTRL));
6670
6671         /* Receive data and receive BD initiator control block */
6672         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6673                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6674
6675         /* Receive data completion control block */
6676         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6677                tr32(RCVDCC_MODE));
6678
6679         /* Receive BD initiator control block */
6680         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6681                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6682
6683         /* Receive BD completion control block */
6684         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6685                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6686
6687         /* Receive list selector control block */
6688         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6689                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6690
6691         /* Mbuf cluster free block */
6692         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6693                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6694
6695         /* Host coalescing control block */
6696         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6697                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6698         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6699                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6700                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6701         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6702                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6703                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6704         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6705                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6706         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6707                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6708
6709         /* Memory arbiter control block */
6710         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6711                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6712
6713         /* Buffer manager control block */
6714         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6715                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6716         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6717                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6718         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6719                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6720                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6721                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6722
6723         /* Read DMA control block */
6724         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6725                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6726
6727         /* Write DMA control block */
6728         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6729                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6730
6731         /* DMA completion block */
6732         printk("DEBUG: DMAC_MODE[%08x]\n",
6733                tr32(DMAC_MODE));
6734
6735         /* GRC block */
6736         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6737                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6738         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6739                tr32(GRC_LOCAL_CTRL));
6740
6741         /* TG3_BDINFOs */
6742         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6743                tr32(RCVDBDI_JUMBO_BD + 0x0),
6744                tr32(RCVDBDI_JUMBO_BD + 0x4),
6745                tr32(RCVDBDI_JUMBO_BD + 0x8),
6746                tr32(RCVDBDI_JUMBO_BD + 0xc));
6747         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6748                tr32(RCVDBDI_STD_BD + 0x0),
6749                tr32(RCVDBDI_STD_BD + 0x4),
6750                tr32(RCVDBDI_STD_BD + 0x8),
6751                tr32(RCVDBDI_STD_BD + 0xc));
6752         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6753                tr32(RCVDBDI_MINI_BD + 0x0),
6754                tr32(RCVDBDI_MINI_BD + 0x4),
6755                tr32(RCVDBDI_MINI_BD + 0x8),
6756                tr32(RCVDBDI_MINI_BD + 0xc));
6757
6758         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6759         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6760         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6761         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6762         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6763                val32, val32_2, val32_3, val32_4);
6764
6765         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6766         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6767         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6768         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6769         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6770                val32, val32_2, val32_3, val32_4);
6771
6772         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6773         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6774         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6775         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6776         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6777         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6778                val32, val32_2, val32_3, val32_4, val32_5);
6779
6780         /* SW status block */
6781         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6782                tp->hw_status->status,
6783                tp->hw_status->status_tag,
6784                tp->hw_status->rx_jumbo_consumer,
6785                tp->hw_status->rx_consumer,
6786                tp->hw_status->rx_mini_consumer,
6787                tp->hw_status->idx[0].rx_producer,
6788                tp->hw_status->idx[0].tx_consumer);
6789
6790         /* SW statistics block */
6791         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6792                ((u32 *)tp->hw_stats)[0],
6793                ((u32 *)tp->hw_stats)[1],
6794                ((u32 *)tp->hw_stats)[2],
6795                ((u32 *)tp->hw_stats)[3]);
6796
6797         /* Mailboxes */
6798         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6799                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6800                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6801                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6802                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6803
6804         /* NIC side send descriptors. */
6805         for (i = 0; i < 6; i++) {
6806                 unsigned long txd;
6807
6808                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6809                         + (i * sizeof(struct tg3_tx_buffer_desc));
6810                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6811                        i,
6812                        readl(txd + 0x0), readl(txd + 0x4),
6813                        readl(txd + 0x8), readl(txd + 0xc));
6814         }
6815
6816         /* NIC side RX descriptors. */
6817         for (i = 0; i < 6; i++) {
6818                 unsigned long rxd;
6819
6820                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6821                         + (i * sizeof(struct tg3_rx_buffer_desc));
6822                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6823                        i,
6824                        readl(rxd + 0x0), readl(rxd + 0x4),
6825                        readl(rxd + 0x8), readl(rxd + 0xc));
6826                 rxd += (4 * sizeof(u32));
6827                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6828                        i,
6829                        readl(rxd + 0x0), readl(rxd + 0x4),
6830                        readl(rxd + 0x8), readl(rxd + 0xc));
6831         }
6832
6833         for (i = 0; i < 6; i++) {
6834                 unsigned long rxd;
6835
6836                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6837                         + (i * sizeof(struct tg3_rx_buffer_desc));
6838                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6839                        i,
6840                        readl(rxd + 0x0), readl(rxd + 0x4),
6841                        readl(rxd + 0x8), readl(rxd + 0xc));
6842                 rxd += (4 * sizeof(u32));
6843                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6844                        i,
6845                        readl(rxd + 0x0), readl(rxd + 0x4),
6846                        readl(rxd + 0x8), readl(rxd + 0xc));
6847         }
6848 }
6849 #endif
6850
6851 static struct net_device_stats *tg3_get_stats(struct net_device *);
6852 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6853
6854 static int tg3_close(struct net_device *dev)
6855 {
6856         struct tg3 *tp = netdev_priv(dev);
6857
6858         /* Calling flush_scheduled_work() may deadlock because
6859          * linkwatch_event() may be on the workqueue and it will try to get
6860          * the rtnl_lock which we are holding.
6861          */
6862         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
6863                 msleep(1);
6864
6865         netif_stop_queue(dev);
6866
6867         del_timer_sync(&tp->timer);
6868
6869         tg3_full_lock(tp, 1);
6870 #if 0
6871         tg3_dump_state(tp);
6872 #endif
6873
6874         tg3_disable_ints(tp);
6875
6876         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6877         tg3_free_rings(tp);
6878         tp->tg3_flags &=
6879                 ~(TG3_FLAG_INIT_COMPLETE |
6880                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6881
6882         tg3_full_unlock(tp);
6883
6884         free_irq(tp->pdev->irq, dev);
6885         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6886                 pci_disable_msi(tp->pdev);
6887                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6888         }
6889
6890         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6891                sizeof(tp->net_stats_prev));
6892         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6893                sizeof(tp->estats_prev));
6894
6895         tg3_free_consistent(tp);
6896
6897         tg3_set_power_state(tp, PCI_D3hot);
6898
6899         netif_carrier_off(tp->dev);
6900
6901         return 0;
6902 }
6903
6904 static inline unsigned long get_stat64(tg3_stat64_t *val)
6905 {
6906         unsigned long ret;
6907
6908 #if (BITS_PER_LONG == 32)
6909         ret = val->low;
6910 #else
6911         ret = ((u64)val->high << 32) | ((u64)val->low);
6912 #endif
6913         return ret;
6914 }
6915
6916 static unsigned long calc_crc_errors(struct tg3 *tp)
6917 {
6918         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6919
6920         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6921             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6922              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6923                 u32 val;
6924
6925                 spin_lock_bh(&tp->lock);
6926                 if (!tg3_readphy(tp, 0x1e, &val)) {
6927                         tg3_writephy(tp, 0x1e, val | 0x8000);
6928                         tg3_readphy(tp, 0x14, &val);
6929                 } else
6930                         val = 0;
6931                 spin_unlock_bh(&tp->lock);
6932
6933                 tp->phy_crc_errors += val;
6934
6935                 return tp->phy_crc_errors;
6936         }
6937
6938         return get_stat64(&hw_stats->rx_fcs_errors);
6939 }
6940
6941 #define ESTAT_ADD(member) \
6942         estats->member =        old_estats->member + \
6943                                 get_stat64(&hw_stats->member)
6944
6945 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6946 {
6947         struct tg3_ethtool_stats *estats = &tp->estats;
6948         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6949         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6950
6951         if (!hw_stats)
6952                 return old_estats;
6953
6954         ESTAT_ADD(rx_octets);
6955         ESTAT_ADD(rx_fragments);
6956         ESTAT_ADD(rx_ucast_packets);
6957         ESTAT_ADD(rx_mcast_packets);
6958         ESTAT_ADD(rx_bcast_packets);
6959         ESTAT_ADD(rx_fcs_errors);
6960         ESTAT_ADD(rx_align_errors);
6961         ESTAT_ADD(rx_xon_pause_rcvd);
6962         ESTAT_ADD(rx_xoff_pause_rcvd);
6963         ESTAT_ADD(rx_mac_ctrl_rcvd);
6964         ESTAT_ADD(rx_xoff_entered);
6965         ESTAT_ADD(rx_frame_too_long_errors);
6966         ESTAT_ADD(rx_jabbers);
6967         ESTAT_ADD(rx_undersize_packets);
6968         ESTAT_ADD(rx_in_length_errors);
6969         ESTAT_ADD(rx_out_length_errors);
6970         ESTAT_ADD(rx_64_or_less_octet_packets);
6971         ESTAT_ADD(rx_65_to_127_octet_packets);
6972         ESTAT_ADD(rx_128_to_255_octet_packets);
6973         ESTAT_ADD(rx_256_to_511_octet_packets);
6974         ESTAT_ADD(rx_512_to_1023_octet_packets);
6975         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6976         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6977         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6978         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6979         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6980
6981         ESTAT_ADD(tx_octets);
6982         ESTAT_ADD(tx_collisions);
6983         ESTAT_ADD(tx_xon_sent);
6984         ESTAT_ADD(tx_xoff_sent);
6985         ESTAT_ADD(tx_flow_control);
6986         ESTAT_ADD(tx_mac_errors);
6987         ESTAT_ADD(tx_single_collisions);
6988         ESTAT_ADD(tx_mult_collisions);
6989         ESTAT_ADD(tx_deferred);
6990         ESTAT_ADD(tx_excessive_collisions);
6991         ESTAT_ADD(tx_late_collisions);
6992         ESTAT_ADD(tx_collide_2times);
6993         ESTAT_ADD(tx_collide_3times);
6994         ESTAT_ADD(tx_collide_4times);
6995         ESTAT_ADD(tx_collide_5times);
6996         ESTAT_ADD(tx_collide_6times);
6997         ESTAT_ADD(tx_collide_7times);
6998         ESTAT_ADD(tx_collide_8times);
6999         ESTAT_ADD(tx_collide_9times);
7000         ESTAT_ADD(tx_collide_10times);
7001         ESTAT_ADD(tx_collide_11times);
7002         ESTAT_ADD(tx_collide_12times);
7003         ESTAT_ADD(tx_collide_13times);
7004         ESTAT_ADD(tx_collide_14times);
7005         ESTAT_ADD(tx_collide_15times);
7006         ESTAT_ADD(tx_ucast_packets);
7007         ESTAT_ADD(tx_mcast_packets);
7008         ESTAT_ADD(tx_bcast_packets);
7009         ESTAT_ADD(tx_carrier_sense_errors);
7010         ESTAT_ADD(tx_discards);
7011         ESTAT_ADD(tx_errors);
7012
7013         ESTAT_ADD(dma_writeq_full);
7014         ESTAT_ADD(dma_write_prioq_full);
7015         ESTAT_ADD(rxbds_empty);
7016         ESTAT_ADD(rx_discards);
7017         ESTAT_ADD(rx_errors);
7018         ESTAT_ADD(rx_threshold_hit);
7019
7020         ESTAT_ADD(dma_readq_full);
7021         ESTAT_ADD(dma_read_prioq_full);
7022         ESTAT_ADD(tx_comp_queue_full);
7023
7024         ESTAT_ADD(ring_set_send_prod_index);
7025         ESTAT_ADD(ring_status_update);
7026         ESTAT_ADD(nic_irqs);
7027         ESTAT_ADD(nic_avoided_irqs);
7028         ESTAT_ADD(nic_tx_threshold_hit);
7029
7030         return estats;
7031 }
7032
7033 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7034 {
7035         struct tg3 *tp = netdev_priv(dev);
7036         struct net_device_stats *stats = &tp->net_stats;
7037         struct net_device_stats *old_stats = &tp->net_stats_prev;
7038         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7039
7040         if (!hw_stats)
7041                 return old_stats;
7042
7043         stats->rx_packets = old_stats->rx_packets +
7044                 get_stat64(&hw_stats->rx_ucast_packets) +
7045                 get_stat64(&hw_stats->rx_mcast_packets) +
7046                 get_stat64(&hw_stats->rx_bcast_packets);
7047                 
7048         stats->tx_packets = old_stats->tx_packets +
7049                 get_stat64(&hw_stats->tx_ucast_packets) +
7050                 get_stat64(&hw_stats->tx_mcast_packets) +
7051                 get_stat64(&hw_stats->tx_bcast_packets);
7052
7053         stats->rx_bytes = old_stats->rx_bytes +
7054                 get_stat64(&hw_stats->rx_octets);
7055         stats->tx_bytes = old_stats->tx_bytes +
7056                 get_stat64(&hw_stats->tx_octets);
7057
7058         stats->rx_errors = old_stats->rx_errors +
7059                 get_stat64(&hw_stats->rx_errors);
7060         stats->tx_errors = old_stats->tx_errors +
7061                 get_stat64(&hw_stats->tx_errors) +
7062                 get_stat64(&hw_stats->tx_mac_errors) +
7063                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7064                 get_stat64(&hw_stats->tx_discards);
7065
7066         stats->multicast = old_stats->multicast +
7067                 get_stat64(&hw_stats->rx_mcast_packets);
7068         stats->collisions = old_stats->collisions +
7069                 get_stat64(&hw_stats->tx_collisions);
7070
7071         stats->rx_length_errors = old_stats->rx_length_errors +
7072                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7073                 get_stat64(&hw_stats->rx_undersize_packets);
7074
7075         stats->rx_over_errors = old_stats->rx_over_errors +
7076                 get_stat64(&hw_stats->rxbds_empty);
7077         stats->rx_frame_errors = old_stats->rx_frame_errors +
7078                 get_stat64(&hw_stats->rx_align_errors);
7079         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7080                 get_stat64(&hw_stats->tx_discards);
7081         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7082                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7083
7084         stats->rx_crc_errors = old_stats->rx_crc_errors +
7085                 calc_crc_errors(tp);
7086
7087         stats->rx_missed_errors = old_stats->rx_missed_errors +
7088                 get_stat64(&hw_stats->rx_discards);
7089
7090         return stats;
7091 }
7092
7093 static inline u32 calc_crc(unsigned char *buf, int len)
7094 {
7095         u32 reg;
7096         u32 tmp;
7097         int j, k;
7098
7099         reg = 0xffffffff;
7100
7101         for (j = 0; j < len; j++) {
7102                 reg ^= buf[j];
7103
7104                 for (k = 0; k < 8; k++) {
7105                         tmp = reg & 0x01;
7106
7107                         reg >>= 1;
7108
7109                         if (tmp) {
7110                                 reg ^= 0xedb88320;
7111                         }
7112                 }
7113         }
7114
7115         return ~reg;
7116 }
7117
7118 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7119 {
7120         /* accept or reject all multicast frames */
7121         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7122         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7123         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7124         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7125 }
7126
7127 static void __tg3_set_rx_mode(struct net_device *dev)
7128 {
7129         struct tg3 *tp = netdev_priv(dev);
7130         u32 rx_mode;
7131
7132         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7133                                   RX_MODE_KEEP_VLAN_TAG);
7134
7135         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7136          * flag clear.
7137          */
7138 #if TG3_VLAN_TAG_USED
7139         if (!tp->vlgrp &&
7140             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7141                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7142 #else
7143         /* By definition, VLAN is disabled always in this
7144          * case.
7145          */
7146         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7147                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7148 #endif
7149
7150         if (dev->flags & IFF_PROMISC) {
7151                 /* Promiscuous mode. */
7152                 rx_mode |= RX_MODE_PROMISC;
7153         } else if (dev->flags & IFF_ALLMULTI) {
7154                 /* Accept all multicast. */
7155                 tg3_set_multi (tp, 1);
7156         } else if (dev->mc_count < 1) {
7157                 /* Reject all multicast. */
7158                 tg3_set_multi (tp, 0);
7159         } else {
7160                 /* Accept one or more multicast(s). */
7161                 struct dev_mc_list *mclist;
7162                 unsigned int i;
7163                 u32 mc_filter[4] = { 0, };
7164                 u32 regidx;
7165                 u32 bit;
7166                 u32 crc;
7167
7168                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7169                      i++, mclist = mclist->next) {
7170
7171                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7172                         bit = ~crc & 0x7f;
7173                         regidx = (bit & 0x60) >> 5;
7174                         bit &= 0x1f;
7175                         mc_filter[regidx] |= (1 << bit);
7176                 }
7177
7178                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7179                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7180                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7181                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7182         }
7183
7184         if (rx_mode != tp->rx_mode) {
7185                 tp->rx_mode = rx_mode;
7186                 tw32_f(MAC_RX_MODE, rx_mode);
7187                 udelay(10);
7188         }
7189 }
7190
7191 static void tg3_set_rx_mode(struct net_device *dev)
7192 {
7193         struct tg3 *tp = netdev_priv(dev);
7194
7195         tg3_full_lock(tp, 0);
7196         __tg3_set_rx_mode(dev);
7197         tg3_full_unlock(tp);
7198 }
7199
7200 #define TG3_REGDUMP_LEN         (32 * 1024)
7201
7202 static int tg3_get_regs_len(struct net_device *dev)
7203 {
7204         return TG3_REGDUMP_LEN;
7205 }
7206
7207 static void tg3_get_regs(struct net_device *dev,
7208                 struct ethtool_regs *regs, void *_p)
7209 {
7210         u32 *p = _p;
7211         struct tg3 *tp = netdev_priv(dev);
7212         u8 *orig_p = _p;
7213         int i;
7214
7215         regs->version = 0;
7216
7217         memset(p, 0, TG3_REGDUMP_LEN);
7218
7219         if (tp->link_config.phy_is_low_power)
7220                 return;
7221
7222         tg3_full_lock(tp, 0);
7223
7224 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7225 #define GET_REG32_LOOP(base,len)                \
7226 do {    p = (u32 *)(orig_p + (base));           \
7227         for (i = 0; i < len; i += 4)            \
7228                 __GET_REG32((base) + i);        \
7229 } while (0)
7230 #define GET_REG32_1(reg)                        \
7231 do {    p = (u32 *)(orig_p + (reg));            \
7232         __GET_REG32((reg));                     \
7233 } while (0)
7234
7235         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7236         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7237         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7238         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7239         GET_REG32_1(SNDDATAC_MODE);
7240         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7241         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7242         GET_REG32_1(SNDBDC_MODE);
7243         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7244         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7245         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7246         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7247         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7248         GET_REG32_1(RCVDCC_MODE);
7249         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7250         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7251         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7252         GET_REG32_1(MBFREE_MODE);
7253         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7254         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7255         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7256         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7257         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7258         GET_REG32_1(RX_CPU_MODE);
7259         GET_REG32_1(RX_CPU_STATE);
7260         GET_REG32_1(RX_CPU_PGMCTR);
7261         GET_REG32_1(RX_CPU_HWBKPT);
7262         GET_REG32_1(TX_CPU_MODE);
7263         GET_REG32_1(TX_CPU_STATE);
7264         GET_REG32_1(TX_CPU_PGMCTR);
7265         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7266         GET_REG32_LOOP(FTQ_RESET, 0x120);
7267         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7268         GET_REG32_1(DMAC_MODE);
7269         GET_REG32_LOOP(GRC_MODE, 0x4c);
7270         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7271                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7272
7273 #undef __GET_REG32
7274 #undef GET_REG32_LOOP
7275 #undef GET_REG32_1
7276
7277         tg3_full_unlock(tp);
7278 }
7279
7280 static int tg3_get_eeprom_len(struct net_device *dev)
7281 {
7282         struct tg3 *tp = netdev_priv(dev);
7283
7284         return tp->nvram_size;
7285 }
7286
7287 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7288
7289 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7290 {
7291         struct tg3 *tp = netdev_priv(dev);
7292         int ret;
7293         u8  *pd;
7294         u32 i, offset, len, val, b_offset, b_count;
7295
7296         if (tp->link_config.phy_is_low_power)
7297                 return -EAGAIN;
7298
7299         offset = eeprom->offset;
7300         len = eeprom->len;
7301         eeprom->len = 0;
7302
7303         eeprom->magic = TG3_EEPROM_MAGIC;
7304
7305         if (offset & 3) {
7306                 /* adjustments to start on required 4 byte boundary */
7307                 b_offset = offset & 3;
7308                 b_count = 4 - b_offset;
7309                 if (b_count > len) {
7310                         /* i.e. offset=1 len=2 */
7311                         b_count = len;
7312                 }
7313                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7314                 if (ret)
7315                         return ret;
7316                 val = cpu_to_le32(val);
7317                 memcpy(data, ((char*)&val) + b_offset, b_count);
7318                 len -= b_count;
7319                 offset += b_count;
7320                 eeprom->len += b_count;
7321         }
7322
7323         /* read bytes upto the last 4 byte boundary */
7324         pd = &data[eeprom->len];
7325         for (i = 0; i < (len - (len & 3)); i += 4) {
7326                 ret = tg3_nvram_read(tp, offset + i, &val);
7327                 if (ret) {
7328                         eeprom->len += i;
7329                         return ret;
7330                 }
7331                 val = cpu_to_le32(val);
7332                 memcpy(pd + i, &val, 4);
7333         }
7334         eeprom->len += i;
7335
7336         if (len & 3) {
7337                 /* read last bytes not ending on 4 byte boundary */
7338                 pd = &data[eeprom->len];
7339                 b_count = len & 3;
7340                 b_offset = offset + len - b_count;
7341                 ret = tg3_nvram_read(tp, b_offset, &val);
7342                 if (ret)
7343                         return ret;
7344                 val = cpu_to_le32(val);
7345                 memcpy(pd, ((char*)&val), b_count);
7346                 eeprom->len += b_count;
7347         }
7348         return 0;
7349 }
7350
7351 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7352
7353 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7354 {
7355         struct tg3 *tp = netdev_priv(dev);
7356         int ret;
7357         u32 offset, len, b_offset, odd_len, start, end;
7358         u8 *buf;
7359
7360         if (tp->link_config.phy_is_low_power)
7361                 return -EAGAIN;
7362
7363         if (eeprom->magic != TG3_EEPROM_MAGIC)
7364                 return -EINVAL;
7365
7366         offset = eeprom->offset;
7367         len = eeprom->len;
7368
7369         if ((b_offset = (offset & 3))) {
7370                 /* adjustments to start on required 4 byte boundary */
7371                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7372                 if (ret)
7373                         return ret;
7374                 start = cpu_to_le32(start);
7375                 len += b_offset;
7376                 offset &= ~3;
7377                 if (len < 4)
7378                         len = 4;
7379         }
7380
7381         odd_len = 0;
7382         if (len & 3) {
7383                 /* adjustments to end on required 4 byte boundary */
7384                 odd_len = 1;
7385                 len = (len + 3) & ~3;
7386                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7387                 if (ret)
7388                         return ret;
7389                 end = cpu_to_le32(end);
7390         }
7391
7392         buf = data;
7393         if (b_offset || odd_len) {
7394                 buf = kmalloc(len, GFP_KERNEL);
7395                 if (buf == 0)
7396                         return -ENOMEM;
7397                 if (b_offset)
7398                         memcpy(buf, &start, 4);
7399                 if (odd_len)
7400                         memcpy(buf+len-4, &end, 4);
7401                 memcpy(buf + b_offset, data, eeprom->len);
7402         }
7403
7404         ret = tg3_nvram_write_block(tp, offset, len, buf);
7405
7406         if (buf != data)
7407                 kfree(buf);
7408
7409         return ret;
7410 }
7411
7412 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7413 {
7414         struct tg3 *tp = netdev_priv(dev);
7415   
7416         cmd->supported = (SUPPORTED_Autoneg);
7417
7418         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7419                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7420                                    SUPPORTED_1000baseT_Full);
7421
7422         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
7423                 cmd->supported |= (SUPPORTED_100baseT_Half |
7424                                   SUPPORTED_100baseT_Full |
7425                                   SUPPORTED_10baseT_Half |
7426                                   SUPPORTED_10baseT_Full |
7427                                   SUPPORTED_MII);
7428         else
7429                 cmd->supported |= SUPPORTED_FIBRE;
7430   
7431         cmd->advertising = tp->link_config.advertising;
7432         if (netif_running(dev)) {
7433                 cmd->speed = tp->link_config.active_speed;
7434                 cmd->duplex = tp->link_config.active_duplex;
7435         }
7436         cmd->port = 0;
7437         cmd->phy_address = PHY_ADDR;
7438         cmd->transceiver = 0;
7439         cmd->autoneg = tp->link_config.autoneg;
7440         cmd->maxtxpkt = 0;
7441         cmd->maxrxpkt = 0;
7442         return 0;
7443 }
7444   
7445 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7446 {
7447         struct tg3 *tp = netdev_priv(dev);
7448   
7449         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 
7450                 /* These are the only valid advertisement bits allowed.  */
7451                 if (cmd->autoneg == AUTONEG_ENABLE &&
7452                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7453                                           ADVERTISED_1000baseT_Full |
7454                                           ADVERTISED_Autoneg |
7455                                           ADVERTISED_FIBRE)))
7456                         return -EINVAL;
7457                 /* Fiber can only do SPEED_1000.  */
7458                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7459                          (cmd->speed != SPEED_1000))
7460                         return -EINVAL;
7461         /* Copper cannot force SPEED_1000.  */
7462         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7463                    (cmd->speed == SPEED_1000))
7464                 return -EINVAL;
7465         else if ((cmd->speed == SPEED_1000) &&
7466                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7467                 return -EINVAL;
7468
7469         tg3_full_lock(tp, 0);
7470
7471         tp->link_config.autoneg = cmd->autoneg;
7472         if (cmd->autoneg == AUTONEG_ENABLE) {
7473                 tp->link_config.advertising = cmd->advertising;
7474                 tp->link_config.speed = SPEED_INVALID;
7475                 tp->link_config.duplex = DUPLEX_INVALID;
7476         } else {
7477                 tp->link_config.advertising = 0;
7478                 tp->link_config.speed = cmd->speed;
7479                 tp->link_config.duplex = cmd->duplex;
7480         }
7481   
7482         if (netif_running(dev))
7483                 tg3_setup_phy(tp, 1);
7484
7485         tg3_full_unlock(tp);
7486   
7487         return 0;
7488 }
7489   
7490 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7491 {
7492         struct tg3 *tp = netdev_priv(dev);
7493   
7494         strcpy(info->driver, DRV_MODULE_NAME);
7495         strcpy(info->version, DRV_MODULE_VERSION);
7496         strcpy(info->bus_info, pci_name(tp->pdev));
7497 }
7498   
7499 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7500 {
7501         struct tg3 *tp = netdev_priv(dev);
7502   
7503         wol->supported = WAKE_MAGIC;
7504         wol->wolopts = 0;
7505         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7506                 wol->wolopts = WAKE_MAGIC;
7507         memset(&wol->sopass, 0, sizeof(wol->sopass));
7508 }
7509   
7510 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7511 {
7512         struct tg3 *tp = netdev_priv(dev);
7513   
7514         if (wol->wolopts & ~WAKE_MAGIC)
7515                 return -EINVAL;
7516         if ((wol->wolopts & WAKE_MAGIC) &&
7517             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7518             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7519                 return -EINVAL;
7520   
7521         spin_lock_bh(&tp->lock);
7522         if (wol->wolopts & WAKE_MAGIC)
7523                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7524         else
7525                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7526         spin_unlock_bh(&tp->lock);
7527   
7528         return 0;
7529 }
7530   
7531 static u32 tg3_get_msglevel(struct net_device *dev)
7532 {
7533         struct tg3 *tp = netdev_priv(dev);
7534         return tp->msg_enable;
7535 }
7536   
7537 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7538 {
7539         struct tg3 *tp = netdev_priv(dev);
7540         tp->msg_enable = value;
7541 }
7542   
7543 #if TG3_TSO_SUPPORT != 0
7544 static int tg3_set_tso(struct net_device *dev, u32 value)
7545 {
7546         struct tg3 *tp = netdev_priv(dev);
7547
7548         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7549                 if (value)
7550                         return -EINVAL;
7551                 return 0;
7552         }
7553         return ethtool_op_set_tso(dev, value);
7554 }
7555 #endif
7556   
7557 static int tg3_nway_reset(struct net_device *dev)
7558 {
7559         struct tg3 *tp = netdev_priv(dev);
7560         u32 bmcr;
7561         int r;
7562   
7563         if (!netif_running(dev))
7564                 return -EAGAIN;
7565
7566         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7567                 return -EINVAL;
7568
7569         spin_lock_bh(&tp->lock);
7570         r = -EINVAL;
7571         tg3_readphy(tp, MII_BMCR, &bmcr);
7572         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7573             ((bmcr & BMCR_ANENABLE) ||
7574              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7575                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7576                                            BMCR_ANENABLE);
7577                 r = 0;
7578         }
7579         spin_unlock_bh(&tp->lock);
7580   
7581         return r;
7582 }
7583   
7584 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7585 {
7586         struct tg3 *tp = netdev_priv(dev);
7587   
7588         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7589         ering->rx_mini_max_pending = 0;
7590         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7591
7592         ering->rx_pending = tp->rx_pending;
7593         ering->rx_mini_pending = 0;
7594         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7595         ering->tx_pending = tp->tx_pending;
7596 }
7597   
7598 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7599 {
7600         struct tg3 *tp = netdev_priv(dev);
7601         int irq_sync = 0;
7602   
7603         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7604             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7605             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7606                 return -EINVAL;
7607   
7608         if (netif_running(dev)) {
7609                 tg3_netif_stop(tp);
7610                 irq_sync = 1;
7611         }
7612
7613         tg3_full_lock(tp, irq_sync);
7614   
7615         tp->rx_pending = ering->rx_pending;
7616
7617         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7618             tp->rx_pending > 63)
7619                 tp->rx_pending = 63;
7620         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7621         tp->tx_pending = ering->tx_pending;
7622
7623         if (netif_running(dev)) {
7624                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7625                 tg3_init_hw(tp);
7626                 tg3_netif_start(tp);
7627         }
7628
7629         tg3_full_unlock(tp);
7630   
7631         return 0;
7632 }
7633   
7634 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7635 {
7636         struct tg3 *tp = netdev_priv(dev);
7637   
7638         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7639         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7640         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7641 }
7642   
7643 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7644 {
7645         struct tg3 *tp = netdev_priv(dev);
7646         int irq_sync = 0;
7647   
7648         if (netif_running(dev)) {
7649                 tg3_netif_stop(tp);
7650                 irq_sync = 1;
7651         }
7652
7653         tg3_full_lock(tp, irq_sync);
7654
7655         if (epause->autoneg)
7656                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7657         else
7658                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7659         if (epause->rx_pause)
7660                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7661         else
7662                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7663         if (epause->tx_pause)
7664                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7665         else
7666                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7667
7668         if (netif_running(dev)) {
7669                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7670                 tg3_init_hw(tp);
7671                 tg3_netif_start(tp);
7672         }
7673
7674         tg3_full_unlock(tp);
7675   
7676         return 0;
7677 }
7678   
7679 static u32 tg3_get_rx_csum(struct net_device *dev)
7680 {
7681         struct tg3 *tp = netdev_priv(dev);
7682         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7683 }
7684   
7685 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7686 {
7687         struct tg3 *tp = netdev_priv(dev);
7688   
7689         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7690                 if (data != 0)
7691                         return -EINVAL;
7692                 return 0;
7693         }
7694   
7695         spin_lock_bh(&tp->lock);
7696         if (data)
7697                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7698         else
7699                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7700         spin_unlock_bh(&tp->lock);
7701   
7702         return 0;
7703 }
7704   
7705 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7706 {
7707         struct tg3 *tp = netdev_priv(dev);
7708   
7709         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7710                 if (data != 0)
7711                         return -EINVAL;
7712                 return 0;
7713         }
7714   
7715         if (data)
7716                 dev->features |= NETIF_F_IP_CSUM;
7717         else
7718                 dev->features &= ~NETIF_F_IP_CSUM;
7719
7720         return 0;
7721 }
7722
7723 static int tg3_get_stats_count (struct net_device *dev)
7724 {
7725         return TG3_NUM_STATS;
7726 }
7727
7728 static int tg3_get_test_count (struct net_device *dev)
7729 {
7730         return TG3_NUM_TEST;
7731 }
7732
7733 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7734 {
7735         switch (stringset) {
7736         case ETH_SS_STATS:
7737                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7738                 break;
7739         case ETH_SS_TEST:
7740                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7741                 break;
7742         default:
7743                 WARN_ON(1);     /* we need a WARN() */
7744                 break;
7745         }
7746 }
7747
7748 static int tg3_phys_id(struct net_device *dev, u32 data)
7749 {
7750         struct tg3 *tp = netdev_priv(dev);
7751         int i;
7752
7753         if (!netif_running(tp->dev))
7754                 return -EAGAIN;
7755
7756         if (data == 0)
7757                 data = 2;
7758
7759         for (i = 0; i < (data * 2); i++) {
7760                 if ((i % 2) == 0)
7761                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7762                                            LED_CTRL_1000MBPS_ON |
7763                                            LED_CTRL_100MBPS_ON |
7764                                            LED_CTRL_10MBPS_ON |
7765                                            LED_CTRL_TRAFFIC_OVERRIDE |
7766                                            LED_CTRL_TRAFFIC_BLINK |
7767                                            LED_CTRL_TRAFFIC_LED);
7768         
7769                 else
7770                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7771                                            LED_CTRL_TRAFFIC_OVERRIDE);
7772
7773                 if (msleep_interruptible(500))
7774                         break;
7775         }
7776         tw32(MAC_LED_CTRL, tp->led_ctrl);
7777         return 0;
7778 }
7779
7780 static void tg3_get_ethtool_stats (struct net_device *dev,
7781                                    struct ethtool_stats *estats, u64 *tmp_stats)
7782 {
7783         struct tg3 *tp = netdev_priv(dev);
7784         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7785 }
7786
7787 #define NVRAM_TEST_SIZE 0x100
7788
7789 static int tg3_test_nvram(struct tg3 *tp)
7790 {
7791         u32 *buf, csum;
7792         int i, j, err = 0;
7793
7794         buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7795         if (buf == NULL)
7796                 return -ENOMEM;
7797
7798         for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7799                 u32 val;
7800
7801                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7802                         break;
7803                 buf[j] = cpu_to_le32(val);
7804         }
7805         if (i < NVRAM_TEST_SIZE)
7806                 goto out;
7807
7808         err = -EIO;
7809         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7810                 goto out;
7811
7812         /* Bootstrap checksum at offset 0x10 */
7813         csum = calc_crc((unsigned char *) buf, 0x10);
7814         if(csum != cpu_to_le32(buf[0x10/4]))
7815                 goto out;
7816
7817         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7818         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7819         if (csum != cpu_to_le32(buf[0xfc/4]))
7820                  goto out;
7821
7822         err = 0;
7823
7824 out:
7825         kfree(buf);
7826         return err;
7827 }
7828
7829 #define TG3_SERDES_TIMEOUT_SEC  2
7830 #define TG3_COPPER_TIMEOUT_SEC  6
7831
7832 static int tg3_test_link(struct tg3 *tp)
7833 {
7834         int i, max;
7835
7836         if (!netif_running(tp->dev))
7837                 return -ENODEV;
7838
7839         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
7840                 max = TG3_SERDES_TIMEOUT_SEC;
7841         else
7842                 max = TG3_COPPER_TIMEOUT_SEC;
7843
7844         for (i = 0; i < max; i++) {
7845                 if (netif_carrier_ok(tp->dev))
7846                         return 0;
7847
7848                 if (msleep_interruptible(1000))
7849                         break;
7850         }
7851
7852         return -EIO;
7853 }
7854
7855 /* Only test the commonly used registers */
7856 static const int tg3_test_registers(struct tg3 *tp)
7857 {
7858         int i, is_5705;
7859         u32 offset, read_mask, write_mask, val, save_val, read_val;
7860         static struct {
7861                 u16 offset;
7862                 u16 flags;
7863 #define TG3_FL_5705     0x1
7864 #define TG3_FL_NOT_5705 0x2
7865 #define TG3_FL_NOT_5788 0x4
7866                 u32 read_mask;
7867                 u32 write_mask;
7868         } reg_tbl[] = {
7869                 /* MAC Control Registers */
7870                 { MAC_MODE, TG3_FL_NOT_5705,
7871                         0x00000000, 0x00ef6f8c },
7872                 { MAC_MODE, TG3_FL_5705,
7873                         0x00000000, 0x01ef6b8c },
7874                 { MAC_STATUS, TG3_FL_NOT_5705,
7875                         0x03800107, 0x00000000 },
7876                 { MAC_STATUS, TG3_FL_5705,
7877                         0x03800100, 0x00000000 },
7878                 { MAC_ADDR_0_HIGH, 0x0000,
7879                         0x00000000, 0x0000ffff },
7880                 { MAC_ADDR_0_LOW, 0x0000,
7881                         0x00000000, 0xffffffff },
7882                 { MAC_RX_MTU_SIZE, 0x0000,
7883                         0x00000000, 0x0000ffff },
7884                 { MAC_TX_MODE, 0x0000,
7885                         0x00000000, 0x00000070 },
7886                 { MAC_TX_LENGTHS, 0x0000,
7887                         0x00000000, 0x00003fff },
7888                 { MAC_RX_MODE, TG3_FL_NOT_5705,
7889                         0x00000000, 0x000007fc },
7890                 { MAC_RX_MODE, TG3_FL_5705,
7891                         0x00000000, 0x000007dc },
7892                 { MAC_HASH_REG_0, 0x0000,
7893                         0x00000000, 0xffffffff },
7894                 { MAC_HASH_REG_1, 0x0000,
7895                         0x00000000, 0xffffffff },
7896                 { MAC_HASH_REG_2, 0x0000,
7897                         0x00000000, 0xffffffff },
7898                 { MAC_HASH_REG_3, 0x0000,
7899                         0x00000000, 0xffffffff },
7900
7901                 /* Receive Data and Receive BD Initiator Control Registers. */
7902                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7903                         0x00000000, 0xffffffff },
7904                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7905                         0x00000000, 0xffffffff },
7906                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7907                         0x00000000, 0x00000003 },
7908                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7909                         0x00000000, 0xffffffff },
7910                 { RCVDBDI_STD_BD+0, 0x0000,
7911                         0x00000000, 0xffffffff },
7912                 { RCVDBDI_STD_BD+4, 0x0000,
7913                         0x00000000, 0xffffffff },
7914                 { RCVDBDI_STD_BD+8, 0x0000,
7915                         0x00000000, 0xffff0002 },
7916                 { RCVDBDI_STD_BD+0xc, 0x0000,
7917                         0x00000000, 0xffffffff },
7918         
7919                 /* Receive BD Initiator Control Registers. */
7920                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7921                         0x00000000, 0xffffffff },
7922                 { RCVBDI_STD_THRESH, TG3_FL_5705,
7923                         0x00000000, 0x000003ff },
7924                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7925                         0x00000000, 0xffffffff },
7926         
7927                 /* Host Coalescing Control Registers. */
7928                 { HOSTCC_MODE, TG3_FL_NOT_5705,
7929                         0x00000000, 0x00000004 },
7930                 { HOSTCC_MODE, TG3_FL_5705,
7931                         0x00000000, 0x000000f6 },
7932                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7933                         0x00000000, 0xffffffff },
7934                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7935                         0x00000000, 0x000003ff },
7936                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7937                         0x00000000, 0xffffffff },
7938                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7939                         0x00000000, 0x000003ff },
7940                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7941                         0x00000000, 0xffffffff },
7942                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7943                         0x00000000, 0x000000ff },
7944                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7945                         0x00000000, 0xffffffff },
7946                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7947                         0x00000000, 0x000000ff },
7948                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7949                         0x00000000, 0xffffffff },
7950                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7951                         0x00000000, 0xffffffff },
7952                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7953                         0x00000000, 0xffffffff },
7954                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7955                         0x00000000, 0x000000ff },
7956                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7957                         0x00000000, 0xffffffff },
7958                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7959                         0x00000000, 0x000000ff },
7960                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7961                         0x00000000, 0xffffffff },
7962                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7963                         0x00000000, 0xffffffff },
7964                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7965                         0x00000000, 0xffffffff },
7966                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7967                         0x00000000, 0xffffffff },
7968                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7969                         0x00000000, 0xffffffff },
7970                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7971                         0xffffffff, 0x00000000 },
7972                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7973                         0xffffffff, 0x00000000 },
7974
7975                 /* Buffer Manager Control Registers. */
7976                 { BUFMGR_MB_POOL_ADDR, 0x0000,
7977                         0x00000000, 0x007fff80 },
7978                 { BUFMGR_MB_POOL_SIZE, 0x0000,
7979                         0x00000000, 0x007fffff },
7980                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7981                         0x00000000, 0x0000003f },
7982                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7983                         0x00000000, 0x000001ff },
7984                 { BUFMGR_MB_HIGH_WATER, 0x0000,
7985                         0x00000000, 0x000001ff },
7986                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7987                         0xffffffff, 0x00000000 },
7988                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7989                         0xffffffff, 0x00000000 },
7990         
7991                 /* Mailbox Registers */
7992                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7993                         0x00000000, 0x000001ff },
7994                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7995                         0x00000000, 0x000001ff },
7996                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7997                         0x00000000, 0x000007ff },
7998                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7999                         0x00000000, 0x000001ff },
8000
8001                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8002         };
8003
8004         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8005                 is_5705 = 1;
8006         else
8007                 is_5705 = 0;
8008
8009         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8010                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8011                         continue;
8012
8013                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8014                         continue;
8015
8016                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8017                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8018                         continue;
8019
8020                 offset = (u32) reg_tbl[i].offset;
8021                 read_mask = reg_tbl[i].read_mask;
8022                 write_mask = reg_tbl[i].write_mask;
8023
8024                 /* Save the original register content */
8025                 save_val = tr32(offset);
8026
8027                 /* Determine the read-only value. */
8028                 read_val = save_val & read_mask;
8029
8030                 /* Write zero to the register, then make sure the read-only bits
8031                  * are not changed and the read/write bits are all zeros.
8032                  */
8033                 tw32(offset, 0);
8034
8035                 val = tr32(offset);
8036
8037                 /* Test the read-only and read/write bits. */
8038                 if (((val & read_mask) != read_val) || (val & write_mask))
8039                         goto out;
8040
8041                 /* Write ones to all the bits defined by RdMask and WrMask, then
8042                  * make sure the read-only bits are not changed and the
8043                  * read/write bits are all ones.
8044                  */
8045                 tw32(offset, read_mask | write_mask);
8046
8047                 val = tr32(offset);
8048
8049                 /* Test the read-only bits. */
8050                 if ((val & read_mask) != read_val)
8051                         goto out;
8052
8053                 /* Test the read/write bits. */
8054                 if ((val & write_mask) != write_mask)
8055                         goto out;
8056
8057                 tw32(offset, save_val);
8058         }
8059
8060         return 0;
8061
8062 out:
8063         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8064         tw32(offset, save_val);
8065         return -EIO;
8066 }
8067
8068 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8069 {
8070         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8071         int i;
8072         u32 j;
8073
8074         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8075                 for (j = 0; j < len; j += 4) {
8076                         u32 val;
8077
8078                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8079                         tg3_read_mem(tp, offset + j, &val);
8080                         if (val != test_pattern[i])
8081                                 return -EIO;
8082                 }
8083         }
8084         return 0;
8085 }
8086
8087 static int tg3_test_memory(struct tg3 *tp)
8088 {
8089         static struct mem_entry {
8090                 u32 offset;
8091                 u32 len;
8092         } mem_tbl_570x[] = {
8093                 { 0x00000000, 0x00b50},
8094                 { 0x00002000, 0x1c000},
8095                 { 0xffffffff, 0x00000}
8096         }, mem_tbl_5705[] = {
8097                 { 0x00000100, 0x0000c},
8098                 { 0x00000200, 0x00008},
8099                 { 0x00004000, 0x00800},
8100                 { 0x00006000, 0x01000},
8101                 { 0x00008000, 0x02000},
8102                 { 0x00010000, 0x0e000},
8103                 { 0xffffffff, 0x00000}
8104         };
8105         struct mem_entry *mem_tbl;
8106         int err = 0;
8107         int i;
8108
8109         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8110                 mem_tbl = mem_tbl_5705;
8111         else
8112                 mem_tbl = mem_tbl_570x;
8113
8114         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8115                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8116                     mem_tbl[i].len)) != 0)
8117                         break;
8118         }
8119         
8120         return err;
8121 }
8122
8123 #define TG3_MAC_LOOPBACK        0
8124 #define TG3_PHY_LOOPBACK        1
8125
8126 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8127 {
8128         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8129         u32 desc_idx;
8130         struct sk_buff *skb, *rx_skb;
8131         u8 *tx_data;
8132         dma_addr_t map;
8133         int num_pkts, tx_len, rx_len, i, err;
8134         struct tg3_rx_buffer_desc *desc;
8135
8136         if (loopback_mode == TG3_MAC_LOOPBACK) {
8137                 /* HW errata - mac loopback fails in some cases on 5780.
8138                  * Normal traffic and PHY loopback are not affected by
8139                  * errata.
8140                  */
8141                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8142                         return 0;
8143
8144                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8145                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8146                            MAC_MODE_PORT_MODE_GMII;
8147                 tw32(MAC_MODE, mac_mode);
8148         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8149                 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8150                                            BMCR_SPEED1000);
8151                 udelay(40);
8152                 /* reset to prevent losing 1st rx packet intermittently */
8153                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8154                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8155                         udelay(10);
8156                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8157                 }
8158                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8159                            MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8160                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
8161                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8162                 tw32(MAC_MODE, mac_mode);
8163         }
8164         else
8165                 return -EINVAL;
8166
8167         err = -EIO;
8168
8169         tx_len = 1514;
8170         skb = dev_alloc_skb(tx_len);
8171         tx_data = skb_put(skb, tx_len);
8172         memcpy(tx_data, tp->dev->dev_addr, 6);
8173         memset(tx_data + 6, 0x0, 8);
8174
8175         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8176
8177         for (i = 14; i < tx_len; i++)
8178                 tx_data[i] = (u8) (i & 0xff);
8179
8180         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8181
8182         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8183              HOSTCC_MODE_NOW);
8184
8185         udelay(10);
8186
8187         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8188
8189         num_pkts = 0;
8190
8191         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8192
8193         tp->tx_prod++;
8194         num_pkts++;
8195
8196         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8197                      tp->tx_prod);
8198         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8199
8200         udelay(10);
8201
8202         for (i = 0; i < 10; i++) {
8203                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8204                        HOSTCC_MODE_NOW);
8205
8206                 udelay(10);
8207
8208                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8209                 rx_idx = tp->hw_status->idx[0].rx_producer;
8210                 if ((tx_idx == tp->tx_prod) &&
8211                     (rx_idx == (rx_start_idx + num_pkts)))
8212                         break;
8213         }
8214
8215         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8216         dev_kfree_skb(skb);
8217
8218         if (tx_idx != tp->tx_prod)
8219                 goto out;
8220
8221         if (rx_idx != rx_start_idx + num_pkts)
8222                 goto out;
8223
8224         desc = &tp->rx_rcb[rx_start_idx];
8225         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8226         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8227         if (opaque_key != RXD_OPAQUE_RING_STD)
8228                 goto out;
8229
8230         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8231             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8232                 goto out;
8233
8234         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8235         if (rx_len != tx_len)
8236                 goto out;
8237
8238         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8239
8240         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8241         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8242
8243         for (i = 14; i < tx_len; i++) {
8244                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8245                         goto out;
8246         }
8247         err = 0;
8248         
8249         /* tg3_free_rings will unmap and free the rx_skb */
8250 out:
8251         return err;
8252 }
8253
8254 #define TG3_MAC_LOOPBACK_FAILED         1
8255 #define TG3_PHY_LOOPBACK_FAILED         2
8256 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8257                                          TG3_PHY_LOOPBACK_FAILED)
8258
8259 static int tg3_test_loopback(struct tg3 *tp)
8260 {
8261         int err = 0;
8262
8263         if (!netif_running(tp->dev))
8264                 return TG3_LOOPBACK_FAILED;
8265
8266         tg3_reset_hw(tp);
8267
8268         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8269                 err |= TG3_MAC_LOOPBACK_FAILED;
8270         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8271                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8272                         err |= TG3_PHY_LOOPBACK_FAILED;
8273         }
8274
8275         return err;
8276 }
8277
8278 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8279                           u64 *data)
8280 {
8281         struct tg3 *tp = netdev_priv(dev);
8282
8283         if (tp->link_config.phy_is_low_power)
8284                 tg3_set_power_state(tp, PCI_D0);
8285
8286         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8287
8288         if (tg3_test_nvram(tp) != 0) {
8289                 etest->flags |= ETH_TEST_FL_FAILED;
8290                 data[0] = 1;
8291         }
8292         if (tg3_test_link(tp) != 0) {
8293                 etest->flags |= ETH_TEST_FL_FAILED;
8294                 data[1] = 1;
8295         }
8296         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8297                 int err, irq_sync = 0;
8298
8299                 if (netif_running(dev)) {
8300                         tg3_netif_stop(tp);
8301                         irq_sync = 1;
8302                 }
8303
8304                 tg3_full_lock(tp, irq_sync);
8305
8306                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8307                 err = tg3_nvram_lock(tp);
8308                 tg3_halt_cpu(tp, RX_CPU_BASE);
8309                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8310                         tg3_halt_cpu(tp, TX_CPU_BASE);
8311                 if (!err)
8312                         tg3_nvram_unlock(tp);
8313
8314                 if (tg3_test_registers(tp) != 0) {
8315                         etest->flags |= ETH_TEST_FL_FAILED;
8316                         data[2] = 1;
8317                 }
8318                 if (tg3_test_memory(tp) != 0) {
8319                         etest->flags |= ETH_TEST_FL_FAILED;
8320                         data[3] = 1;
8321                 }
8322                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8323                         etest->flags |= ETH_TEST_FL_FAILED;
8324
8325                 tg3_full_unlock(tp);
8326
8327                 if (tg3_test_interrupt(tp) != 0) {
8328                         etest->flags |= ETH_TEST_FL_FAILED;
8329                         data[5] = 1;
8330                 }
8331
8332                 tg3_full_lock(tp, 0);
8333
8334                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8335                 if (netif_running(dev)) {
8336                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8337                         tg3_init_hw(tp);
8338                         tg3_netif_start(tp);
8339                 }
8340
8341                 tg3_full_unlock(tp);
8342         }
8343         if (tp->link_config.phy_is_low_power)
8344                 tg3_set_power_state(tp, PCI_D3hot);
8345
8346 }
8347
8348 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8349 {
8350         struct mii_ioctl_data *data = if_mii(ifr);
8351         struct tg3 *tp = netdev_priv(dev);
8352         int err;
8353
8354         switch(cmd) {
8355         case SIOCGMIIPHY:
8356                 data->phy_id = PHY_ADDR;
8357
8358                 /* fallthru */
8359         case SIOCGMIIREG: {
8360                 u32 mii_regval;
8361
8362                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8363                         break;                  /* We have no PHY */
8364
8365                 if (tp->link_config.phy_is_low_power)
8366                         return -EAGAIN;
8367
8368                 spin_lock_bh(&tp->lock);
8369                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8370                 spin_unlock_bh(&tp->lock);
8371
8372                 data->val_out = mii_regval;
8373
8374                 return err;
8375         }
8376
8377         case SIOCSMIIREG:
8378                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8379                         break;                  /* We have no PHY */
8380
8381                 if (!capable(CAP_NET_ADMIN))
8382                         return -EPERM;
8383
8384                 if (tp->link_config.phy_is_low_power)
8385                         return -EAGAIN;
8386
8387                 spin_lock_bh(&tp->lock);
8388                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8389                 spin_unlock_bh(&tp->lock);
8390
8391                 return err;
8392
8393         default:
8394                 /* do nothing */
8395                 break;
8396         }
8397         return -EOPNOTSUPP;
8398 }
8399
8400 #if TG3_VLAN_TAG_USED
8401 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8402 {
8403         struct tg3 *tp = netdev_priv(dev);
8404
8405         tg3_full_lock(tp, 0);
8406
8407         tp->vlgrp = grp;
8408
8409         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8410         __tg3_set_rx_mode(dev);
8411
8412         tg3_full_unlock(tp);
8413 }
8414
8415 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8416 {
8417         struct tg3 *tp = netdev_priv(dev);
8418
8419         tg3_full_lock(tp, 0);
8420         if (tp->vlgrp)
8421                 tp->vlgrp->vlan_devices[vid] = NULL;
8422         tg3_full_unlock(tp);
8423 }
8424 #endif
8425
8426 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8427 {
8428         struct tg3 *tp = netdev_priv(dev);
8429
8430         memcpy(ec, &tp->coal, sizeof(*ec));
8431         return 0;
8432 }
8433
8434 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8435 {
8436         struct tg3 *tp = netdev_priv(dev);
8437         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8438         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8439
8440         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8441                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8442                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8443                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8444                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8445         }
8446
8447         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8448             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8449             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8450             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8451             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8452             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8453             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8454             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8455             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8456             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8457                 return -EINVAL;
8458
8459         /* No rx interrupts will be generated if both are zero */
8460         if ((ec->rx_coalesce_usecs == 0) &&
8461             (ec->rx_max_coalesced_frames == 0))
8462                 return -EINVAL;
8463
8464         /* No tx interrupts will be generated if both are zero */
8465         if ((ec->tx_coalesce_usecs == 0) &&
8466             (ec->tx_max_coalesced_frames == 0))
8467                 return -EINVAL;
8468
8469         /* Only copy relevant parameters, ignore all others. */
8470         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8471         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8472         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8473         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8474         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8475         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8476         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8477         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8478         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8479
8480         if (netif_running(dev)) {
8481                 tg3_full_lock(tp, 0);
8482                 __tg3_set_coalesce(tp, &tp->coal);
8483                 tg3_full_unlock(tp);
8484         }
8485         return 0;
8486 }
8487
8488 static struct ethtool_ops tg3_ethtool_ops = {
8489         .get_settings           = tg3_get_settings,
8490         .set_settings           = tg3_set_settings,
8491         .get_drvinfo            = tg3_get_drvinfo,
8492         .get_regs_len           = tg3_get_regs_len,
8493         .get_regs               = tg3_get_regs,
8494         .get_wol                = tg3_get_wol,
8495         .set_wol                = tg3_set_wol,
8496         .get_msglevel           = tg3_get_msglevel,
8497         .set_msglevel           = tg3_set_msglevel,
8498         .nway_reset             = tg3_nway_reset,
8499         .get_link               = ethtool_op_get_link,
8500         .get_eeprom_len         = tg3_get_eeprom_len,
8501         .get_eeprom             = tg3_get_eeprom,
8502         .set_eeprom             = tg3_set_eeprom,
8503         .get_ringparam          = tg3_get_ringparam,
8504         .set_ringparam          = tg3_set_ringparam,
8505         .get_pauseparam         = tg3_get_pauseparam,
8506         .set_pauseparam         = tg3_set_pauseparam,
8507         .get_rx_csum            = tg3_get_rx_csum,
8508         .set_rx_csum            = tg3_set_rx_csum,
8509         .get_tx_csum            = ethtool_op_get_tx_csum,
8510         .set_tx_csum            = tg3_set_tx_csum,
8511         .get_sg                 = ethtool_op_get_sg,
8512         .set_sg                 = ethtool_op_set_sg,
8513 #if TG3_TSO_SUPPORT != 0
8514         .get_tso                = ethtool_op_get_tso,
8515         .set_tso                = tg3_set_tso,
8516 #endif
8517         .self_test_count        = tg3_get_test_count,
8518         .self_test              = tg3_self_test,
8519         .get_strings            = tg3_get_strings,
8520         .phys_id                = tg3_phys_id,
8521         .get_stats_count        = tg3_get_stats_count,
8522         .get_ethtool_stats      = tg3_get_ethtool_stats,
8523         .get_coalesce           = tg3_get_coalesce,
8524         .set_coalesce           = tg3_set_coalesce,
8525         .get_perm_addr          = ethtool_op_get_perm_addr,
8526 };
8527
8528 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8529 {
8530         u32 cursize, val;
8531
8532         tp->nvram_size = EEPROM_CHIP_SIZE;
8533
8534         if (tg3_nvram_read(tp, 0, &val) != 0)
8535                 return;
8536
8537         if (swab32(val) != TG3_EEPROM_MAGIC)
8538                 return;
8539
8540         /*
8541          * Size the chip by reading offsets at increasing powers of two.
8542          * When we encounter our validation signature, we know the addressing
8543          * has wrapped around, and thus have our chip size.
8544          */
8545         cursize = 0x800;
8546
8547         while (cursize < tp->nvram_size) {
8548                 if (tg3_nvram_read(tp, cursize, &val) != 0)
8549                         return;
8550
8551                 if (swab32(val) == TG3_EEPROM_MAGIC)
8552                         break;
8553
8554                 cursize <<= 1;
8555         }
8556
8557         tp->nvram_size = cursize;
8558 }
8559                 
8560 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8561 {
8562         u32 val;
8563
8564         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8565                 if (val != 0) {
8566                         tp->nvram_size = (val >> 16) * 1024;
8567                         return;
8568                 }
8569         }
8570         tp->nvram_size = 0x20000;
8571 }
8572
8573 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8574 {
8575         u32 nvcfg1;
8576
8577         nvcfg1 = tr32(NVRAM_CFG1);
8578         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8579                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8580         }
8581         else {
8582                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8583                 tw32(NVRAM_CFG1, nvcfg1);
8584         }
8585
8586         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8587             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8588                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8589                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8590                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8591                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8592                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8593                                 break;
8594                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8595                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8596                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8597                                 break;
8598                         case FLASH_VENDOR_ATMEL_EEPROM:
8599                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8600                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8601                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8602                                 break;
8603                         case FLASH_VENDOR_ST:
8604                                 tp->nvram_jedecnum = JEDEC_ST;
8605                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8606                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8607                                 break;
8608                         case FLASH_VENDOR_SAIFUN:
8609                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
8610                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8611                                 break;
8612                         case FLASH_VENDOR_SST_SMALL:
8613                         case FLASH_VENDOR_SST_LARGE:
8614                                 tp->nvram_jedecnum = JEDEC_SST;
8615                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8616                                 break;
8617                 }
8618         }
8619         else {
8620                 tp->nvram_jedecnum = JEDEC_ATMEL;
8621                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8622                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8623         }
8624 }
8625
8626 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8627 {
8628         u32 nvcfg1;
8629
8630         nvcfg1 = tr32(NVRAM_CFG1);
8631
8632         /* NVRAM protection for TPM */
8633         if (nvcfg1 & (1 << 27))
8634                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8635
8636         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8637                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8638                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8639                         tp->nvram_jedecnum = JEDEC_ATMEL;
8640                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8641                         break;
8642                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8643                         tp->nvram_jedecnum = JEDEC_ATMEL;
8644                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8645                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8646                         break;
8647                 case FLASH_5752VENDOR_ST_M45PE10:
8648                 case FLASH_5752VENDOR_ST_M45PE20:
8649                 case FLASH_5752VENDOR_ST_M45PE40:
8650                         tp->nvram_jedecnum = JEDEC_ST;
8651                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8652                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8653                         break;
8654         }
8655
8656         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8657                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8658                         case FLASH_5752PAGE_SIZE_256:
8659                                 tp->nvram_pagesize = 256;
8660                                 break;
8661                         case FLASH_5752PAGE_SIZE_512:
8662                                 tp->nvram_pagesize = 512;
8663                                 break;
8664                         case FLASH_5752PAGE_SIZE_1K:
8665                                 tp->nvram_pagesize = 1024;
8666                                 break;
8667                         case FLASH_5752PAGE_SIZE_2K:
8668                                 tp->nvram_pagesize = 2048;
8669                                 break;
8670                         case FLASH_5752PAGE_SIZE_4K:
8671                                 tp->nvram_pagesize = 4096;
8672                                 break;
8673                         case FLASH_5752PAGE_SIZE_264:
8674                                 tp->nvram_pagesize = 264;
8675                                 break;
8676                 }
8677         }
8678         else {
8679                 /* For eeprom, set pagesize to maximum eeprom size */
8680                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8681
8682                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8683                 tw32(NVRAM_CFG1, nvcfg1);
8684         }
8685 }
8686
8687 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
8688 static void __devinit tg3_nvram_init(struct tg3 *tp)
8689 {
8690         int j;
8691
8692         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8693                 return;
8694
8695         tw32_f(GRC_EEPROM_ADDR,
8696              (EEPROM_ADDR_FSM_RESET |
8697               (EEPROM_DEFAULT_CLOCK_PERIOD <<
8698                EEPROM_ADDR_CLKPERD_SHIFT)));
8699
8700         /* XXX schedule_timeout() ... */
8701         for (j = 0; j < 100; j++)
8702                 udelay(10);
8703
8704         /* Enable seeprom accesses. */
8705         tw32_f(GRC_LOCAL_CTRL,
8706              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8707         udelay(100);
8708
8709         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8710             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8711                 tp->tg3_flags |= TG3_FLAG_NVRAM;
8712
8713                 if (tg3_nvram_lock(tp)) {
8714                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
8715                                "tg3_nvram_init failed.\n", tp->dev->name);
8716                         return;
8717                 }
8718                 tg3_enable_nvram_access(tp);
8719
8720                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8721                         tg3_get_5752_nvram_info(tp);
8722                 else
8723                         tg3_get_nvram_info(tp);
8724
8725                 tg3_get_nvram_size(tp);
8726
8727                 tg3_disable_nvram_access(tp);
8728                 tg3_nvram_unlock(tp);
8729
8730         } else {
8731                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
8732
8733                 tg3_get_eeprom_size(tp);
8734         }
8735 }
8736
8737 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
8738                                         u32 offset, u32 *val)
8739 {
8740         u32 tmp;
8741         int i;
8742
8743         if (offset > EEPROM_ADDR_ADDR_MASK ||
8744             (offset % 4) != 0)
8745                 return -EINVAL;
8746
8747         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
8748                                         EEPROM_ADDR_DEVID_MASK |
8749                                         EEPROM_ADDR_READ);
8750         tw32(GRC_EEPROM_ADDR,
8751              tmp |
8752              (0 << EEPROM_ADDR_DEVID_SHIFT) |
8753              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
8754               EEPROM_ADDR_ADDR_MASK) |
8755              EEPROM_ADDR_READ | EEPROM_ADDR_START);
8756
8757         for (i = 0; i < 10000; i++) {
8758                 tmp = tr32(GRC_EEPROM_ADDR);
8759
8760                 if (tmp & EEPROM_ADDR_COMPLETE)
8761                         break;
8762                 udelay(100);
8763         }
8764         if (!(tmp & EEPROM_ADDR_COMPLETE))
8765                 return -EBUSY;
8766
8767         *val = tr32(GRC_EEPROM_DATA);
8768         return 0;
8769 }
8770
8771 #define NVRAM_CMD_TIMEOUT 10000
8772
8773 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
8774 {
8775         int i;
8776
8777         tw32(NVRAM_CMD, nvram_cmd);
8778         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
8779                 udelay(10);
8780                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
8781                         udelay(10);
8782                         break;
8783                 }
8784         }
8785         if (i == NVRAM_CMD_TIMEOUT) {
8786                 return -EBUSY;
8787         }
8788         return 0;
8789 }
8790
8791 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8792 {
8793         int ret;
8794
8795         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8796                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
8797                 return -EINVAL;
8798         }
8799
8800         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
8801                 return tg3_nvram_read_using_eeprom(tp, offset, val);
8802
8803         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
8804                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8805                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8806
8807                 offset = ((offset / tp->nvram_pagesize) <<
8808                           ATMEL_AT45DB0X1B_PAGE_POS) +
8809                         (offset % tp->nvram_pagesize);
8810         }
8811
8812         if (offset > NVRAM_ADDR_MSK)
8813                 return -EINVAL;
8814
8815         ret = tg3_nvram_lock(tp);
8816         if (ret)
8817                 return ret;
8818
8819         tg3_enable_nvram_access(tp);
8820
8821         tw32(NVRAM_ADDR, offset);
8822         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
8823                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
8824
8825         if (ret == 0)
8826                 *val = swab32(tr32(NVRAM_RDDATA));
8827
8828         tg3_disable_nvram_access(tp);
8829
8830         tg3_nvram_unlock(tp);
8831
8832         return ret;
8833 }
8834
8835 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
8836                                     u32 offset, u32 len, u8 *buf)
8837 {
8838         int i, j, rc = 0;
8839         u32 val;
8840
8841         for (i = 0; i < len; i += 4) {
8842                 u32 addr, data;
8843
8844                 addr = offset + i;
8845
8846                 memcpy(&data, buf + i, 4);
8847
8848                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
8849
8850                 val = tr32(GRC_EEPROM_ADDR);
8851                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
8852
8853                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
8854                         EEPROM_ADDR_READ);
8855                 tw32(GRC_EEPROM_ADDR, val |
8856                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
8857                         (addr & EEPROM_ADDR_ADDR_MASK) |
8858                         EEPROM_ADDR_START |
8859                         EEPROM_ADDR_WRITE);
8860                 
8861                 for (j = 0; j < 10000; j++) {
8862                         val = tr32(GRC_EEPROM_ADDR);
8863
8864                         if (val & EEPROM_ADDR_COMPLETE)
8865                                 break;
8866                         udelay(100);
8867                 }
8868                 if (!(val & EEPROM_ADDR_COMPLETE)) {
8869                         rc = -EBUSY;
8870                         break;
8871                 }
8872         }
8873
8874         return rc;
8875 }
8876
8877 /* offset and length are dword aligned */
8878 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8879                 u8 *buf)
8880 {
8881         int ret = 0;
8882         u32 pagesize = tp->nvram_pagesize;
8883         u32 pagemask = pagesize - 1;
8884         u32 nvram_cmd;
8885         u8 *tmp;
8886
8887         tmp = kmalloc(pagesize, GFP_KERNEL);
8888         if (tmp == NULL)
8889                 return -ENOMEM;
8890
8891         while (len) {
8892                 int j;
8893                 u32 phy_addr, page_off, size;
8894
8895                 phy_addr = offset & ~pagemask;
8896         
8897                 for (j = 0; j < pagesize; j += 4) {
8898                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
8899                                                 (u32 *) (tmp + j))))
8900                                 break;
8901                 }
8902                 if (ret)
8903                         break;
8904
8905                 page_off = offset & pagemask;
8906                 size = pagesize;
8907                 if (len < size)
8908                         size = len;
8909
8910                 len -= size;
8911
8912                 memcpy(tmp + page_off, buf, size);
8913
8914                 offset = offset + (pagesize - page_off);
8915
8916                 tg3_enable_nvram_access(tp);
8917
8918                 /*
8919                  * Before we can erase the flash page, we need
8920                  * to issue a special "write enable" command.
8921                  */
8922                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8923
8924                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8925                         break;
8926
8927                 /* Erase the target page */
8928                 tw32(NVRAM_ADDR, phy_addr);
8929
8930                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
8931                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
8932
8933                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8934                         break;
8935
8936                 /* Issue another write enable to start the write. */
8937                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8938
8939                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8940                         break;
8941
8942                 for (j = 0; j < pagesize; j += 4) {
8943                         u32 data;
8944
8945                         data = *((u32 *) (tmp + j));
8946                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
8947
8948                         tw32(NVRAM_ADDR, phy_addr + j);
8949
8950                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
8951                                 NVRAM_CMD_WR;
8952
8953                         if (j == 0)
8954                                 nvram_cmd |= NVRAM_CMD_FIRST;
8955                         else if (j == (pagesize - 4))
8956                                 nvram_cmd |= NVRAM_CMD_LAST;
8957
8958                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8959                                 break;
8960                 }
8961                 if (ret)
8962                         break;
8963         }
8964
8965         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8966         tg3_nvram_exec_cmd(tp, nvram_cmd);
8967
8968         kfree(tmp);
8969
8970         return ret;
8971 }
8972
8973 /* offset and length are dword aligned */
8974 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
8975                 u8 *buf)
8976 {
8977         int i, ret = 0;
8978
8979         for (i = 0; i < len; i += 4, offset += 4) {
8980                 u32 data, page_off, phy_addr, nvram_cmd;
8981
8982                 memcpy(&data, buf + i, 4);
8983                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8984
8985                 page_off = offset % tp->nvram_pagesize;
8986
8987                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8988                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8989
8990                         phy_addr = ((offset / tp->nvram_pagesize) <<
8991                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
8992                 }
8993                 else {
8994                         phy_addr = offset;
8995                 }
8996
8997                 tw32(NVRAM_ADDR, phy_addr);
8998
8999                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9000
9001                 if ((page_off == 0) || (i == 0))
9002                         nvram_cmd |= NVRAM_CMD_FIRST;
9003                 else if (page_off == (tp->nvram_pagesize - 4))
9004                         nvram_cmd |= NVRAM_CMD_LAST;
9005
9006                 if (i == (len - 4))
9007                         nvram_cmd |= NVRAM_CMD_LAST;
9008
9009                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9010                     (tp->nvram_jedecnum == JEDEC_ST) &&
9011                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9012
9013                         if ((ret = tg3_nvram_exec_cmd(tp,
9014                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9015                                 NVRAM_CMD_DONE)))
9016
9017                                 break;
9018                 }
9019                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9020                         /* We always do complete word writes to eeprom. */
9021                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9022                 }
9023
9024                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9025                         break;
9026         }
9027         return ret;
9028 }
9029
9030 /* offset and length are dword aligned */
9031 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9032 {
9033         int ret;
9034
9035         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9036                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
9037                 return -EINVAL;
9038         }
9039
9040         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9041                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9042                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9043                 udelay(40);
9044         }
9045
9046         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9047                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9048         }
9049         else {
9050                 u32 grc_mode;
9051
9052                 ret = tg3_nvram_lock(tp);
9053                 if (ret)
9054                         return ret;
9055
9056                 tg3_enable_nvram_access(tp);
9057                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9058                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9059                         tw32(NVRAM_WRITE1, 0x406);
9060
9061                 grc_mode = tr32(GRC_MODE);
9062                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9063
9064                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9065                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9066
9067                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9068                                 buf);
9069                 }
9070                 else {
9071                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9072                                 buf);
9073                 }
9074
9075                 grc_mode = tr32(GRC_MODE);
9076                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9077
9078                 tg3_disable_nvram_access(tp);
9079                 tg3_nvram_unlock(tp);
9080         }
9081
9082         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9083                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9084                 udelay(40);
9085         }
9086
9087         return ret;
9088 }
9089
9090 struct subsys_tbl_ent {
9091         u16 subsys_vendor, subsys_devid;
9092         u32 phy_id;
9093 };
9094
9095 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9096         /* Broadcom boards. */
9097         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9098         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9099         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9100         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9101         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9102         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9103         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9104         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9105         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9106         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9107         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9108
9109         /* 3com boards. */
9110         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9111         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9112         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9113         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9114         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9115
9116         /* DELL boards. */
9117         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9118         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9119         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9120         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9121
9122         /* Compaq boards. */
9123         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9124         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9125         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9126         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9127         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9128
9129         /* IBM boards. */
9130         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9131 };
9132
9133 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9134 {
9135         int i;
9136
9137         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9138                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9139                      tp->pdev->subsystem_vendor) &&
9140                     (subsys_id_to_phy_id[i].subsys_devid ==
9141                      tp->pdev->subsystem_device))
9142                         return &subsys_id_to_phy_id[i];
9143         }
9144         return NULL;
9145 }
9146
9147 /* Since this function may be called in D3-hot power state during
9148  * tg3_init_one(), only config cycles are allowed.
9149  */
9150 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9151 {
9152         u32 val;
9153
9154         /* Make sure register accesses (indirect or otherwise)
9155          * will function correctly.
9156          */
9157         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9158                                tp->misc_host_ctrl);
9159
9160         tp->phy_id = PHY_ID_INVALID;
9161         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9162
9163         /* Do not even try poking around in here on Sun parts.  */
9164         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
9165                 return;
9166
9167         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9168         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9169                 u32 nic_cfg, led_cfg;
9170                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9171                 int eeprom_phy_serdes = 0;
9172
9173                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9174                 tp->nic_sram_data_cfg = nic_cfg;
9175
9176                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9177                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9178                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9179                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9180                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9181                     (ver > 0) && (ver < 0x100))
9182                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9183
9184                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9185                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9186                         eeprom_phy_serdes = 1;
9187
9188                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9189                 if (nic_phy_id != 0) {
9190                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9191                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9192
9193                         eeprom_phy_id  = (id1 >> 16) << 10;
9194                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9195                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9196                 } else
9197                         eeprom_phy_id = 0;
9198
9199                 tp->phy_id = eeprom_phy_id;
9200                 if (eeprom_phy_serdes) {
9201                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9202                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9203                         else
9204                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9205                 }
9206
9207                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9208                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9209                                     SHASTA_EXT_LED_MODE_MASK);
9210                 else
9211                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9212
9213                 switch (led_cfg) {
9214                 default:
9215                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9216                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9217                         break;
9218
9219                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9220                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9221                         break;
9222
9223                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9224                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9225
9226                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9227                          * read on some older 5700/5701 bootcode.
9228                          */
9229                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9230                             ASIC_REV_5700 ||
9231                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9232                             ASIC_REV_5701)
9233                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9234
9235                         break;
9236
9237                 case SHASTA_EXT_LED_SHARED:
9238                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9239                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9240                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9241                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9242                                                  LED_CTRL_MODE_PHY_2);
9243                         break;
9244
9245                 case SHASTA_EXT_LED_MAC:
9246                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9247                         break;
9248
9249                 case SHASTA_EXT_LED_COMBO:
9250                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9251                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9252                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9253                                                  LED_CTRL_MODE_PHY_2);
9254                         break;
9255
9256                 };
9257
9258                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9259                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9260                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9261                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9262
9263                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9264                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9265                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
9266                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9267
9268                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9269                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9270                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9271                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9272                 }
9273                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9274                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9275
9276                 if (cfg2 & (1 << 17))
9277                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9278
9279                 /* serdes signal pre-emphasis in register 0x590 set by */
9280                 /* bootcode if bit 18 is set */
9281                 if (cfg2 & (1 << 18))
9282                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9283         }
9284 }
9285
9286 static int __devinit tg3_phy_probe(struct tg3 *tp)
9287 {
9288         u32 hw_phy_id_1, hw_phy_id_2;
9289         u32 hw_phy_id, hw_phy_id_masked;
9290         int err;
9291
9292         /* Reading the PHY ID register can conflict with ASF
9293          * firwmare access to the PHY hardware.
9294          */
9295         err = 0;
9296         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9297                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9298         } else {
9299                 /* Now read the physical PHY_ID from the chip and verify
9300                  * that it is sane.  If it doesn't look good, we fall back
9301                  * to either the hard-coded table based PHY_ID and failing
9302                  * that the value found in the eeprom area.
9303                  */
9304                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9305                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9306
9307                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9308                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9309                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9310
9311                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9312         }
9313
9314         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9315                 tp->phy_id = hw_phy_id;
9316                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9317                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9318                 else
9319                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9320         } else {
9321                 if (tp->phy_id != PHY_ID_INVALID) {
9322                         /* Do nothing, phy ID already set up in
9323                          * tg3_get_eeprom_hw_cfg().
9324                          */
9325                 } else {
9326                         struct subsys_tbl_ent *p;
9327
9328                         /* No eeprom signature?  Try the hardcoded
9329                          * subsys device table.
9330                          */
9331                         p = lookup_by_subsys(tp);
9332                         if (!p)
9333                                 return -ENODEV;
9334
9335                         tp->phy_id = p->phy_id;
9336                         if (!tp->phy_id ||
9337                             tp->phy_id == PHY_ID_BCM8002)
9338                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9339                 }
9340         }
9341
9342         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9343             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9344                 u32 bmsr, adv_reg, tg3_ctrl;
9345
9346                 tg3_readphy(tp, MII_BMSR, &bmsr);
9347                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9348                     (bmsr & BMSR_LSTATUS))
9349                         goto skip_phy_reset;
9350                     
9351                 err = tg3_phy_reset(tp);
9352                 if (err)
9353                         return err;
9354
9355                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9356                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9357                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9358                 tg3_ctrl = 0;
9359                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9360                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9361                                     MII_TG3_CTRL_ADV_1000_FULL);
9362                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9363                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9364                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9365                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9366                 }
9367
9368                 if (!tg3_copper_is_advertising_all(tp)) {
9369                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9370
9371                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9372                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9373
9374                         tg3_writephy(tp, MII_BMCR,
9375                                      BMCR_ANENABLE | BMCR_ANRESTART);
9376                 }
9377                 tg3_phy_set_wirespeed(tp);
9378
9379                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9380                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9381                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9382         }
9383
9384 skip_phy_reset:
9385         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9386                 err = tg3_init_5401phy_dsp(tp);
9387                 if (err)
9388                         return err;
9389         }
9390
9391         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9392                 err = tg3_init_5401phy_dsp(tp);
9393         }
9394
9395         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9396                 tp->link_config.advertising =
9397                         (ADVERTISED_1000baseT_Half |
9398                          ADVERTISED_1000baseT_Full |
9399                          ADVERTISED_Autoneg |
9400                          ADVERTISED_FIBRE);
9401         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9402                 tp->link_config.advertising &=
9403                         ~(ADVERTISED_1000baseT_Half |
9404                           ADVERTISED_1000baseT_Full);
9405
9406         return err;
9407 }
9408
9409 static void __devinit tg3_read_partno(struct tg3 *tp)
9410 {
9411         unsigned char vpd_data[256];
9412         int i;
9413
9414         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9415                 /* Sun decided not to put the necessary bits in the
9416                  * NVRAM of their onboard tg3 parts :(
9417                  */
9418                 strcpy(tp->board_part_number, "Sun 570X");
9419                 return;
9420         }
9421
9422         for (i = 0; i < 256; i += 4) {
9423                 u32 tmp;
9424
9425                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9426                         goto out_not_found;
9427
9428                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
9429                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
9430                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9431                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9432         }
9433
9434         /* Now parse and find the part number. */
9435         for (i = 0; i < 256; ) {
9436                 unsigned char val = vpd_data[i];
9437                 int block_end;
9438
9439                 if (val == 0x82 || val == 0x91) {
9440                         i = (i + 3 +
9441                              (vpd_data[i + 1] +
9442                               (vpd_data[i + 2] << 8)));
9443                         continue;
9444                 }
9445
9446                 if (val != 0x90)
9447                         goto out_not_found;
9448
9449                 block_end = (i + 3 +
9450                              (vpd_data[i + 1] +
9451                               (vpd_data[i + 2] << 8)));
9452                 i += 3;
9453                 while (i < block_end) {
9454                         if (vpd_data[i + 0] == 'P' &&
9455                             vpd_data[i + 1] == 'N') {
9456                                 int partno_len = vpd_data[i + 2];
9457
9458                                 if (partno_len > 24)
9459                                         goto out_not_found;
9460
9461                                 memcpy(tp->board_part_number,
9462                                        &vpd_data[i + 3],
9463                                        partno_len);
9464
9465                                 /* Success. */
9466                                 return;
9467                         }
9468                 }
9469
9470                 /* Part number not found. */
9471                 goto out_not_found;
9472         }
9473
9474 out_not_found:
9475         strcpy(tp->board_part_number, "none");
9476 }
9477
9478 #ifdef CONFIG_SPARC64
9479 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9480 {
9481         struct pci_dev *pdev = tp->pdev;
9482         struct pcidev_cookie *pcp = pdev->sysdata;
9483
9484         if (pcp != NULL) {
9485                 int node = pcp->prom_node;
9486                 u32 venid;
9487                 int err;
9488
9489                 err = prom_getproperty(node, "subsystem-vendor-id",
9490                                        (char *) &venid, sizeof(venid));
9491                 if (err == 0 || err == -1)
9492                         return 0;
9493                 if (venid == PCI_VENDOR_ID_SUN)
9494                         return 1;
9495
9496                 /* TG3 chips onboard the SunBlade-2500 don't have the
9497                  * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
9498                  * are distinguishable from non-Sun variants by being
9499                  * named "network" by the firmware.  Non-Sun cards will
9500                  * show up as being named "ethernet".
9501                  */
9502                 if (!strcmp(pcp->prom_name, "network"))
9503                         return 1;
9504         }
9505         return 0;
9506 }
9507 #endif
9508
9509 static int __devinit tg3_get_invariants(struct tg3 *tp)
9510 {
9511         static struct pci_device_id write_reorder_chipsets[] = {
9512                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9513                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9514                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9515                              PCI_DEVICE_ID_VIA_8385_0) },
9516                 { },
9517         };
9518         u32 misc_ctrl_reg;
9519         u32 cacheline_sz_reg;
9520         u32 pci_state_reg, grc_misc_cfg;
9521         u32 val;
9522         u16 pci_cmd;
9523         int err;
9524
9525 #ifdef CONFIG_SPARC64
9526         if (tg3_is_sun_570X(tp))
9527                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9528 #endif
9529
9530         /* Force memory write invalidate off.  If we leave it on,
9531          * then on 5700_BX chips we have to enable a workaround.
9532          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9533          * to match the cacheline size.  The Broadcom driver have this
9534          * workaround but turns MWI off all the times so never uses
9535          * it.  This seems to suggest that the workaround is insufficient.
9536          */
9537         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9538         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9539         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9540
9541         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9542          * has the register indirect write enable bit set before
9543          * we try to access any of the MMIO registers.  It is also
9544          * critical that the PCI-X hw workaround situation is decided
9545          * before that as well.
9546          */
9547         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9548                               &misc_ctrl_reg);
9549
9550         tp->pci_chip_rev_id = (misc_ctrl_reg >>
9551                                MISC_HOST_CTRL_CHIPREV_SHIFT);
9552
9553         /* Wrong chip ID in 5752 A0. This code can be removed later
9554          * as A0 is not in production.
9555          */
9556         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9557                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9558
9559         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
9560          * we need to disable memory and use config. cycles
9561          * only to access all registers. The 5702/03 chips
9562          * can mistakenly decode the special cycles from the
9563          * ICH chipsets as memory write cycles, causing corruption
9564          * of register and memory space. Only certain ICH bridges
9565          * will drive special cycles with non-zero data during the
9566          * address phase which can fall within the 5703's address
9567          * range. This is not an ICH bug as the PCI spec allows
9568          * non-zero address during special cycles. However, only
9569          * these ICH bridges are known to drive non-zero addresses
9570          * during special cycles.
9571          *
9572          * Since special cycles do not cross PCI bridges, we only
9573          * enable this workaround if the 5703 is on the secondary
9574          * bus of these ICH bridges.
9575          */
9576         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
9577             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
9578                 static struct tg3_dev_id {
9579                         u32     vendor;
9580                         u32     device;
9581                         u32     rev;
9582                 } ich_chipsets[] = {
9583                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
9584                           PCI_ANY_ID },
9585                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
9586                           PCI_ANY_ID },
9587                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
9588                           0xa },
9589                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
9590                           PCI_ANY_ID },
9591                         { },
9592                 };
9593                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
9594                 struct pci_dev *bridge = NULL;
9595
9596                 while (pci_id->vendor != 0) {
9597                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
9598                                                 bridge);
9599                         if (!bridge) {
9600                                 pci_id++;
9601                                 continue;
9602                         }
9603                         if (pci_id->rev != PCI_ANY_ID) {
9604                                 u8 rev;
9605
9606                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
9607                                                      &rev);
9608                                 if (rev > pci_id->rev)
9609                                         continue;
9610                         }
9611                         if (bridge->subordinate &&
9612                             (bridge->subordinate->number ==
9613                              tp->pdev->bus->number)) {
9614
9615                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
9616                                 pci_dev_put(bridge);
9617                                 break;
9618                         }
9619                 }
9620         }
9621
9622         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
9623          * DMA addresses > 40-bit. This bridge may have other additional
9624          * 57xx devices behind it in some 4-port NIC designs for example.
9625          * Any tg3 device found behind the bridge will also need the 40-bit
9626          * DMA workaround.
9627          */
9628         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
9629             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9630                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
9631                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
9632                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
9633         }
9634         else {
9635                 struct pci_dev *bridge = NULL;
9636
9637                 do {
9638                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
9639                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
9640                                                 bridge);
9641                         if (bridge && bridge->subordinate &&
9642                             (bridge->subordinate->number <=
9643                              tp->pdev->bus->number) &&
9644                             (bridge->subordinate->subordinate >=
9645                              tp->pdev->bus->number)) {
9646                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
9647                                 pci_dev_put(bridge);
9648                                 break;
9649                         }
9650                 } while (bridge);
9651         }
9652
9653         /* Initialize misc host control in PCI block. */
9654         tp->misc_host_ctrl |= (misc_ctrl_reg &
9655                                MISC_HOST_CTRL_CHIPREV);
9656         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9657                                tp->misc_host_ctrl);
9658
9659         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9660                               &cacheline_sz_reg);
9661
9662         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
9663         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
9664         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
9665         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
9666
9667         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
9668             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
9669             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
9670                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
9671
9672         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
9673             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
9674                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
9675
9676         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9677                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
9678
9679         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
9680             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
9681             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
9682                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
9683
9684         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9685                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9686
9687         /* If we have an AMD 762 or VIA K8T800 chipset, write
9688          * reordering to the mailbox registers done by the host
9689          * controller can cause major troubles.  We read back from
9690          * every mailbox register write to force the writes to be
9691          * posted to the chip in order.
9692          */
9693         if (pci_dev_present(write_reorder_chipsets) &&
9694             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9695                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9696
9697         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9698             tp->pci_lat_timer < 64) {
9699                 tp->pci_lat_timer = 64;
9700
9701                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
9702                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
9703                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
9704                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
9705
9706                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9707                                        cacheline_sz_reg);
9708         }
9709
9710         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9711                               &pci_state_reg);
9712
9713         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
9714                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
9715
9716                 /* If this is a 5700 BX chipset, and we are in PCI-X
9717                  * mode, enable register write workaround.
9718                  *
9719                  * The workaround is to use indirect register accesses
9720                  * for all chip writes not to mailbox registers.
9721                  */
9722                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
9723                         u32 pm_reg;
9724                         u16 pci_cmd;
9725
9726                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9727
9728                         /* The chip can have it's power management PCI config
9729                          * space registers clobbered due to this bug.
9730                          * So explicitly force the chip into D0 here.
9731                          */
9732                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9733                                               &pm_reg);
9734                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
9735                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9736                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9737                                                pm_reg);
9738
9739                         /* Also, force SERR#/PERR# in PCI command. */
9740                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9741                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
9742                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9743                 }
9744         }
9745
9746         /* 5700 BX chips need to have their TX producer index mailboxes
9747          * written twice to workaround a bug.
9748          */
9749         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9750                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9751
9752         /* Back to back register writes can cause problems on this chip,
9753          * the workaround is to read back all reg writes except those to
9754          * mailbox regs.  See tg3_write_indirect_reg32().
9755          *
9756          * PCI Express 5750_A0 rev chips need this workaround too.
9757          */
9758         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9759             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
9760              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
9761                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
9762
9763         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
9764                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
9765         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
9766                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
9767
9768         /* Chip-specific fixup from Broadcom driver */
9769         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
9770             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
9771                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
9772                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9773         }
9774
9775         /* Default fast path register access methods */
9776         tp->read32 = tg3_read32;
9777         tp->write32 = tg3_write32;
9778         tp->read32_mbox = tg3_read32;
9779         tp->write32_mbox = tg3_write32;
9780         tp->write32_tx_mbox = tg3_write32;
9781         tp->write32_rx_mbox = tg3_write32;
9782
9783         /* Various workaround register access methods */
9784         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
9785                 tp->write32 = tg3_write_indirect_reg32;
9786         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
9787                 tp->write32 = tg3_write_flush_reg32;
9788
9789         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
9790             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
9791                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9792                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
9793                         tp->write32_rx_mbox = tg3_write_flush_reg32;
9794         }
9795
9796         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
9797                 tp->read32 = tg3_read_indirect_reg32;
9798                 tp->write32 = tg3_write_indirect_reg32;
9799                 tp->read32_mbox = tg3_read_indirect_mbox;
9800                 tp->write32_mbox = tg3_write_indirect_mbox;
9801                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
9802                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
9803
9804                 iounmap(tp->regs);
9805                 tp->regs = NULL;
9806
9807                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9808                 pci_cmd &= ~PCI_COMMAND_MEMORY;
9809                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9810         }
9811
9812         /* Get eeprom hw config before calling tg3_set_power_state().
9813          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9814          * determined before calling tg3_set_power_state() so that
9815          * we know whether or not to switch out of Vaux power.
9816          * When the flag is set, it means that GPIO1 is used for eeprom
9817          * write protect and also implies that it is a LOM where GPIOs
9818          * are not used to switch power.
9819          */ 
9820         tg3_get_eeprom_hw_cfg(tp);
9821
9822         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9823          * GPIO1 driven high will bring 5700's external PHY out of reset.
9824          * It is also used as eeprom write protect on LOMs.
9825          */
9826         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
9827         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9828             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
9829                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9830                                        GRC_LCLCTRL_GPIO_OUTPUT1);
9831         /* Unused GPIO3 must be driven as output on 5752 because there
9832          * are no pull-up resistors on unused GPIO pins.
9833          */
9834         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9835                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
9836
9837         /* Force the chip into D0. */
9838         err = tg3_set_power_state(tp, PCI_D0);
9839         if (err) {
9840                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
9841                        pci_name(tp->pdev));
9842                 return err;
9843         }
9844
9845         /* 5700 B0 chips do not support checksumming correctly due
9846          * to hardware bugs.
9847          */
9848         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
9849                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
9850
9851         /* Pseudo-header checksum is done by hardware logic and not
9852          * the offload processers, so make the chip do the pseudo-
9853          * header checksums on receive.  For transmit it is more
9854          * convenient to do the pseudo-header checksum in software
9855          * as Linux does that on transmit for us in all cases.
9856          */
9857         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
9858         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
9859
9860         /* Derive initial jumbo mode from MTU assigned in
9861          * ether_setup() via the alloc_etherdev() call
9862          */
9863         if (tp->dev->mtu > ETH_DATA_LEN &&
9864             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
9865                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
9866
9867         /* Determine WakeOnLan speed to use. */
9868         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9869             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9870             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
9871             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
9872                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
9873         } else {
9874                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
9875         }
9876
9877         /* A few boards don't want Ethernet@WireSpeed phy feature */
9878         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9879             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
9880              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
9881              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
9882             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
9883                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
9884
9885         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
9886             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
9887                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
9888         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
9889                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
9890
9891         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
9892                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
9893
9894         tp->coalesce_mode = 0;
9895         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
9896             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
9897                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
9898
9899         /* Initialize MAC MI mode, polling disabled. */
9900         tw32_f(MAC_MI_MODE, tp->mi_mode);
9901         udelay(80);
9902
9903         /* Initialize data/descriptor byte/word swapping. */
9904         val = tr32(GRC_MODE);
9905         val &= GRC_MODE_HOST_STACKUP;
9906         tw32(GRC_MODE, val | tp->grc_mode);
9907
9908         tg3_switch_clocks(tp);
9909
9910         /* Clear this out for sanity. */
9911         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9912
9913         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9914                               &pci_state_reg);
9915         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
9916             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
9917                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
9918
9919                 if (chiprevid == CHIPREV_ID_5701_A0 ||
9920                     chiprevid == CHIPREV_ID_5701_B0 ||
9921                     chiprevid == CHIPREV_ID_5701_B2 ||
9922                     chiprevid == CHIPREV_ID_5701_B5) {
9923                         void __iomem *sram_base;
9924
9925                         /* Write some dummy words into the SRAM status block
9926                          * area, see if it reads back correctly.  If the return
9927                          * value is bad, force enable the PCIX workaround.
9928                          */
9929                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
9930
9931                         writel(0x00000000, sram_base);
9932                         writel(0x00000000, sram_base + 4);
9933                         writel(0xffffffff, sram_base + 4);
9934                         if (readl(sram_base) != 0x00000000)
9935                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9936                 }
9937         }
9938
9939         udelay(50);
9940         tg3_nvram_init(tp);
9941
9942         grc_misc_cfg = tr32(GRC_MISC_CFG);
9943         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
9944
9945         /* Broadcom's driver says that CIOBE multisplit has a bug */
9946 #if 0
9947         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9948             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
9949                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
9950                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
9951         }
9952 #endif
9953         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9954             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
9955              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
9956                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
9957
9958         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9959             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9960                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9961         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9962                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9963                                       HOSTCC_MODE_CLRTICK_TXBD);
9964
9965                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9966                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9967                                        tp->misc_host_ctrl);
9968         }
9969
9970         /* these are limited to 10/100 only */
9971         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9972              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
9973             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9974              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9975              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
9976               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
9977               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
9978             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9979              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
9980               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
9981                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
9982
9983         err = tg3_phy_probe(tp);
9984         if (err) {
9985                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
9986                        pci_name(tp->pdev), err);
9987                 /* ... but do not return immediately ... */
9988         }
9989
9990         tg3_read_partno(tp);
9991
9992         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
9993                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9994         } else {
9995                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9996                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
9997                 else
9998                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9999         }
10000
10001         /* 5700 {AX,BX} chips have a broken status block link
10002          * change bit implementation, so we must use the
10003          * status register in those cases.
10004          */
10005         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10006                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10007         else
10008                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10009
10010         /* The led_ctrl is set during tg3_phy_probe, here we might
10011          * have to force the link status polling mechanism based
10012          * upon subsystem IDs.
10013          */
10014         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10015             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10016                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10017                                   TG3_FLAG_USE_LINKCHG_REG);
10018         }
10019
10020         /* For all SERDES we poll the MAC status register. */
10021         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10022                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10023         else
10024                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10025
10026         /* It seems all chips can get confused if TX buffers
10027          * straddle the 4GB address boundary in some cases.
10028          */
10029         tp->dev->hard_start_xmit = tg3_start_xmit;
10030
10031         tp->rx_offset = 2;
10032         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10033             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10034                 tp->rx_offset = 0;
10035
10036         /* By default, disable wake-on-lan.  User can change this
10037          * using ETHTOOL_SWOL.
10038          */
10039         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10040
10041         return err;
10042 }
10043
10044 #ifdef CONFIG_SPARC64
10045 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10046 {
10047         struct net_device *dev = tp->dev;
10048         struct pci_dev *pdev = tp->pdev;
10049         struct pcidev_cookie *pcp = pdev->sysdata;
10050
10051         if (pcp != NULL) {
10052                 int node = pcp->prom_node;
10053
10054                 if (prom_getproplen(node, "local-mac-address") == 6) {
10055                         prom_getproperty(node, "local-mac-address",
10056                                          dev->dev_addr, 6);
10057                         memcpy(dev->perm_addr, dev->dev_addr, 6);
10058                         return 0;
10059                 }
10060         }
10061         return -ENODEV;
10062 }
10063
10064 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10065 {
10066         struct net_device *dev = tp->dev;
10067
10068         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10069         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10070         return 0;
10071 }
10072 #endif
10073
10074 static int __devinit tg3_get_device_address(struct tg3 *tp)
10075 {
10076         struct net_device *dev = tp->dev;
10077         u32 hi, lo, mac_offset;
10078
10079 #ifdef CONFIG_SPARC64
10080         if (!tg3_get_macaddr_sparc(tp))
10081                 return 0;
10082 #endif
10083
10084         mac_offset = 0x7c;
10085         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10086              !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
10087             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10088                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10089                         mac_offset = 0xcc;
10090                 if (tg3_nvram_lock(tp))
10091                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10092                 else
10093                         tg3_nvram_unlock(tp);
10094         }
10095
10096         /* First try to get it from MAC address mailbox. */
10097         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10098         if ((hi >> 16) == 0x484b) {
10099                 dev->dev_addr[0] = (hi >>  8) & 0xff;
10100                 dev->dev_addr[1] = (hi >>  0) & 0xff;
10101
10102                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10103                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10104                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10105                 dev->dev_addr[4] = (lo >>  8) & 0xff;
10106                 dev->dev_addr[5] = (lo >>  0) & 0xff;
10107         }
10108         /* Next, try NVRAM. */
10109         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
10110                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10111                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10112                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
10113                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
10114                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
10115                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
10116                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
10117                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
10118         }
10119         /* Finally just fetch it out of the MAC control regs. */
10120         else {
10121                 hi = tr32(MAC_ADDR_0_HIGH);
10122                 lo = tr32(MAC_ADDR_0_LOW);
10123
10124                 dev->dev_addr[5] = lo & 0xff;
10125                 dev->dev_addr[4] = (lo >> 8) & 0xff;
10126                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10127                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10128                 dev->dev_addr[1] = hi & 0xff;
10129                 dev->dev_addr[0] = (hi >> 8) & 0xff;
10130         }
10131
10132         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10133 #ifdef CONFIG_SPARC64
10134                 if (!tg3_get_default_macaddr_sparc(tp))
10135                         return 0;
10136 #endif
10137                 return -EINVAL;
10138         }
10139         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10140         return 0;
10141 }
10142
10143 #define BOUNDARY_SINGLE_CACHELINE       1
10144 #define BOUNDARY_MULTI_CACHELINE        2
10145
10146 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10147 {
10148         int cacheline_size;
10149         u8 byte;
10150         int goal;
10151
10152         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10153         if (byte == 0)
10154                 cacheline_size = 1024;
10155         else
10156                 cacheline_size = (int) byte * 4;
10157
10158         /* On 5703 and later chips, the boundary bits have no
10159          * effect.
10160          */
10161         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10162             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10163             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10164                 goto out;
10165
10166 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10167         goal = BOUNDARY_MULTI_CACHELINE;
10168 #else
10169 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10170         goal = BOUNDARY_SINGLE_CACHELINE;
10171 #else
10172         goal = 0;
10173 #endif
10174 #endif
10175
10176         if (!goal)
10177                 goto out;
10178
10179         /* PCI controllers on most RISC systems tend to disconnect
10180          * when a device tries to burst across a cache-line boundary.
10181          * Therefore, letting tg3 do so just wastes PCI bandwidth.
10182          *
10183          * Unfortunately, for PCI-E there are only limited
10184          * write-side controls for this, and thus for reads
10185          * we will still get the disconnects.  We'll also waste
10186          * these PCI cycles for both read and write for chips
10187          * other than 5700 and 5701 which do not implement the
10188          * boundary bits.
10189          */
10190         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10191             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10192                 switch (cacheline_size) {
10193                 case 16:
10194                 case 32:
10195                 case 64:
10196                 case 128:
10197                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10198                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10199                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10200                         } else {
10201                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10202                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10203                         }
10204                         break;
10205
10206                 case 256:
10207                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10208                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10209                         break;
10210
10211                 default:
10212                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10213                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10214                         break;
10215                 };
10216         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10217                 switch (cacheline_size) {
10218                 case 16:
10219                 case 32:
10220                 case 64:
10221                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10222                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10223                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10224                                 break;
10225                         }
10226                         /* fallthrough */
10227                 case 128:
10228                 default:
10229                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10230                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10231                         break;
10232                 };
10233         } else {
10234                 switch (cacheline_size) {
10235                 case 16:
10236                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10237                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10238                                         DMA_RWCTRL_WRITE_BNDRY_16);
10239                                 break;
10240                         }
10241                         /* fallthrough */
10242                 case 32:
10243                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10244                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10245                                         DMA_RWCTRL_WRITE_BNDRY_32);
10246                                 break;
10247                         }
10248                         /* fallthrough */
10249                 case 64:
10250                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10251                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10252                                         DMA_RWCTRL_WRITE_BNDRY_64);
10253                                 break;
10254                         }
10255                         /* fallthrough */
10256                 case 128:
10257                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10258                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10259                                         DMA_RWCTRL_WRITE_BNDRY_128);
10260                                 break;
10261                         }
10262                         /* fallthrough */
10263                 case 256:
10264                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10265                                 DMA_RWCTRL_WRITE_BNDRY_256);
10266                         break;
10267                 case 512:
10268                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10269                                 DMA_RWCTRL_WRITE_BNDRY_512);
10270                         break;
10271                 case 1024:
10272                 default:
10273                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10274                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10275                         break;
10276                 };
10277         }
10278
10279 out:
10280         return val;
10281 }
10282
10283 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10284 {
10285         struct tg3_internal_buffer_desc test_desc;
10286         u32 sram_dma_descs;
10287         int i, ret;
10288
10289         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10290
10291         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10292         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10293         tw32(RDMAC_STATUS, 0);
10294         tw32(WDMAC_STATUS, 0);
10295
10296         tw32(BUFMGR_MODE, 0);
10297         tw32(FTQ_RESET, 0);
10298
10299         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10300         test_desc.addr_lo = buf_dma & 0xffffffff;
10301         test_desc.nic_mbuf = 0x00002100;
10302         test_desc.len = size;
10303
10304         /*
10305          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10306          * the *second* time the tg3 driver was getting loaded after an
10307          * initial scan.
10308          *
10309          * Broadcom tells me:
10310          *   ...the DMA engine is connected to the GRC block and a DMA
10311          *   reset may affect the GRC block in some unpredictable way...
10312          *   The behavior of resets to individual blocks has not been tested.
10313          *
10314          * Broadcom noted the GRC reset will also reset all sub-components.
10315          */
10316         if (to_device) {
10317                 test_desc.cqid_sqid = (13 << 8) | 2;
10318
10319                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10320                 udelay(40);
10321         } else {
10322                 test_desc.cqid_sqid = (16 << 8) | 7;
10323
10324                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10325                 udelay(40);
10326         }
10327         test_desc.flags = 0x00000005;
10328
10329         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10330                 u32 val;
10331
10332                 val = *(((u32 *)&test_desc) + i);
10333                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10334                                        sram_dma_descs + (i * sizeof(u32)));
10335                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10336         }
10337         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10338
10339         if (to_device) {
10340                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10341         } else {
10342                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10343         }
10344
10345         ret = -ENODEV;
10346         for (i = 0; i < 40; i++) {
10347                 u32 val;
10348
10349                 if (to_device)
10350                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10351                 else
10352                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10353                 if ((val & 0xffff) == sram_dma_descs) {
10354                         ret = 0;
10355                         break;
10356                 }
10357
10358                 udelay(100);
10359         }
10360
10361         return ret;
10362 }
10363
10364 #define TEST_BUFFER_SIZE        0x2000
10365
10366 static int __devinit tg3_test_dma(struct tg3 *tp)
10367 {
10368         dma_addr_t buf_dma;
10369         u32 *buf, saved_dma_rwctrl;
10370         int ret;
10371
10372         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10373         if (!buf) {
10374                 ret = -ENOMEM;
10375                 goto out_nofree;
10376         }
10377
10378         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10379                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10380
10381         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
10382
10383         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10384                 /* DMA read watermark not used on PCIE */
10385                 tp->dma_rwctrl |= 0x00180000;
10386         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
10387                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10388                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
10389                         tp->dma_rwctrl |= 0x003f0000;
10390                 else
10391                         tp->dma_rwctrl |= 0x003f000f;
10392         } else {
10393                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10394                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10395                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10396
10397                         /* If the 5704 is behind the EPB bridge, we can
10398                          * do the less restrictive ONE_DMA workaround for
10399                          * better performance.
10400                          */
10401                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
10402                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10403                                 tp->dma_rwctrl |= 0x8000;
10404                         else if (ccval == 0x6 || ccval == 0x7)
10405                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10406
10407                         /* Set bit 23 to enable PCIX hw bug fix */
10408                         tp->dma_rwctrl |= 0x009f0000;
10409                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10410                         /* 5780 always in PCIX mode */
10411                         tp->dma_rwctrl |= 0x00144000;
10412                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10413                         /* 5714 always in PCIX mode */
10414                         tp->dma_rwctrl |= 0x00148000;
10415                 } else {
10416                         tp->dma_rwctrl |= 0x001b000f;
10417                 }
10418         }
10419
10420         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10421             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10422                 tp->dma_rwctrl &= 0xfffffff0;
10423
10424         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10425             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10426                 /* Remove this if it causes problems for some boards. */
10427                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10428
10429                 /* On 5700/5701 chips, we need to set this bit.
10430                  * Otherwise the chip will issue cacheline transactions
10431                  * to streamable DMA memory with not all the byte
10432                  * enables turned on.  This is an error on several
10433                  * RISC PCI controllers, in particular sparc64.
10434                  *
10435                  * On 5703/5704 chips, this bit has been reassigned
10436                  * a different meaning.  In particular, it is used
10437                  * on those chips to enable a PCI-X workaround.
10438                  */
10439                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10440         }
10441
10442         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10443
10444 #if 0
10445         /* Unneeded, already done by tg3_get_invariants.  */
10446         tg3_switch_clocks(tp);
10447 #endif
10448
10449         ret = 0;
10450         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10451             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10452                 goto out;
10453
10454         /* It is best to perform DMA test with maximum write burst size
10455          * to expose the 5700/5701 write DMA bug.
10456          */
10457         saved_dma_rwctrl = tp->dma_rwctrl;
10458         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10459         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10460
10461         while (1) {
10462                 u32 *p = buf, i;
10463
10464                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10465                         p[i] = i;
10466
10467                 /* Send the buffer to the chip. */
10468                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10469                 if (ret) {
10470                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10471                         break;
10472                 }
10473
10474 #if 0
10475                 /* validate data reached card RAM correctly. */
10476                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10477                         u32 val;
10478                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
10479                         if (le32_to_cpu(val) != p[i]) {
10480                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
10481                                 /* ret = -ENODEV here? */
10482                         }
10483                         p[i] = 0;
10484                 }
10485 #endif
10486                 /* Now read it back. */
10487                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10488                 if (ret) {
10489                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10490
10491                         break;
10492                 }
10493
10494                 /* Verify it. */
10495                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10496                         if (p[i] == i)
10497                                 continue;
10498
10499                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10500                             DMA_RWCTRL_WRITE_BNDRY_16) {
10501                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10502                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10503                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10504                                 break;
10505                         } else {
10506                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10507                                 ret = -ENODEV;
10508                                 goto out;
10509                         }
10510                 }
10511
10512                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10513                         /* Success. */
10514                         ret = 0;
10515                         break;
10516                 }
10517         }
10518         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10519             DMA_RWCTRL_WRITE_BNDRY_16) {
10520                 static struct pci_device_id dma_wait_state_chipsets[] = {
10521                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10522                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10523                         { },
10524                 };
10525
10526                 /* DMA test passed without adjusting DMA boundary,
10527                  * now look for chipsets that are known to expose the
10528                  * DMA bug without failing the test.
10529                  */
10530                 if (pci_dev_present(dma_wait_state_chipsets)) {
10531                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10532                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10533                 }
10534                 else
10535                         /* Safe to use the calculated DMA boundary. */
10536                         tp->dma_rwctrl = saved_dma_rwctrl;
10537
10538                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10539         }
10540
10541 out:
10542         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
10543 out_nofree:
10544         return ret;
10545 }
10546
10547 static void __devinit tg3_init_link_config(struct tg3 *tp)
10548 {
10549         tp->link_config.advertising =
10550                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10551                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10552                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
10553                  ADVERTISED_Autoneg | ADVERTISED_MII);
10554         tp->link_config.speed = SPEED_INVALID;
10555         tp->link_config.duplex = DUPLEX_INVALID;
10556         tp->link_config.autoneg = AUTONEG_ENABLE;
10557         netif_carrier_off(tp->dev);
10558         tp->link_config.active_speed = SPEED_INVALID;
10559         tp->link_config.active_duplex = DUPLEX_INVALID;
10560         tp->link_config.phy_is_low_power = 0;
10561         tp->link_config.orig_speed = SPEED_INVALID;
10562         tp->link_config.orig_duplex = DUPLEX_INVALID;
10563         tp->link_config.orig_autoneg = AUTONEG_INVALID;
10564 }
10565
10566 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
10567 {
10568         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10569                 tp->bufmgr_config.mbuf_read_dma_low_water =
10570                         DEFAULT_MB_RDMA_LOW_WATER_5705;
10571                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10572                         DEFAULT_MB_MACRX_LOW_WATER_5705;
10573                 tp->bufmgr_config.mbuf_high_water =
10574                         DEFAULT_MB_HIGH_WATER_5705;
10575
10576                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10577                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
10578                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10579                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
10580                 tp->bufmgr_config.mbuf_high_water_jumbo =
10581                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
10582         } else {
10583                 tp->bufmgr_config.mbuf_read_dma_low_water =
10584                         DEFAULT_MB_RDMA_LOW_WATER;
10585                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10586                         DEFAULT_MB_MACRX_LOW_WATER;
10587                 tp->bufmgr_config.mbuf_high_water =
10588                         DEFAULT_MB_HIGH_WATER;
10589
10590                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10591                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
10592                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10593                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
10594                 tp->bufmgr_config.mbuf_high_water_jumbo =
10595                         DEFAULT_MB_HIGH_WATER_JUMBO;
10596         }
10597
10598         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
10599         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
10600 }
10601
10602 static char * __devinit tg3_phy_string(struct tg3 *tp)
10603 {
10604         switch (tp->phy_id & PHY_ID_MASK) {
10605         case PHY_ID_BCM5400:    return "5400";
10606         case PHY_ID_BCM5401:    return "5401";
10607         case PHY_ID_BCM5411:    return "5411";
10608         case PHY_ID_BCM5701:    return "5701";
10609         case PHY_ID_BCM5703:    return "5703";
10610         case PHY_ID_BCM5704:    return "5704";
10611         case PHY_ID_BCM5705:    return "5705";
10612         case PHY_ID_BCM5750:    return "5750";
10613         case PHY_ID_BCM5752:    return "5752";
10614         case PHY_ID_BCM5714:    return "5714";
10615         case PHY_ID_BCM5780:    return "5780";
10616         case PHY_ID_BCM8002:    return "8002/serdes";
10617         case 0:                 return "serdes";
10618         default:                return "unknown";
10619         };
10620 }
10621
10622 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
10623 {
10624         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10625                 strcpy(str, "PCI Express");
10626                 return str;
10627         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
10628                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
10629
10630                 strcpy(str, "PCIX:");
10631
10632                 if ((clock_ctrl == 7) ||
10633                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
10634                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
10635                         strcat(str, "133MHz");
10636                 else if (clock_ctrl == 0)
10637                         strcat(str, "33MHz");
10638                 else if (clock_ctrl == 2)
10639                         strcat(str, "50MHz");
10640                 else if (clock_ctrl == 4)
10641                         strcat(str, "66MHz");
10642                 else if (clock_ctrl == 6)
10643                         strcat(str, "100MHz");
10644         } else {
10645                 strcpy(str, "PCI:");
10646                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
10647                         strcat(str, "66MHz");
10648                 else
10649                         strcat(str, "33MHz");
10650         }
10651         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
10652                 strcat(str, ":32-bit");
10653         else
10654                 strcat(str, ":64-bit");
10655         return str;
10656 }
10657
10658 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
10659 {
10660         struct pci_dev *peer;
10661         unsigned int func, devnr = tp->pdev->devfn & ~7;
10662
10663         for (func = 0; func < 8; func++) {
10664                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
10665                 if (peer && peer != tp->pdev)
10666                         break;
10667                 pci_dev_put(peer);
10668         }
10669         /* 5704 can be configured in single-port mode, set peer to
10670          * tp->pdev in that case.
10671          */
10672         if (!peer) {
10673                 peer = tp->pdev;
10674                 return peer;
10675         }
10676
10677         /*
10678          * We don't need to keep the refcount elevated; there's no way
10679          * to remove one half of this device without removing the other
10680          */
10681         pci_dev_put(peer);
10682
10683         return peer;
10684 }
10685
10686 static void __devinit tg3_init_coal(struct tg3 *tp)
10687 {
10688         struct ethtool_coalesce *ec = &tp->coal;
10689
10690         memset(ec, 0, sizeof(*ec));
10691         ec->cmd = ETHTOOL_GCOALESCE;
10692         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
10693         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
10694         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
10695         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
10696         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
10697         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
10698         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
10699         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
10700         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
10701
10702         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
10703                                  HOSTCC_MODE_CLRTICK_TXBD)) {
10704                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
10705                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
10706                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
10707                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
10708         }
10709
10710         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10711                 ec->rx_coalesce_usecs_irq = 0;
10712                 ec->tx_coalesce_usecs_irq = 0;
10713                 ec->stats_block_coalesce_usecs = 0;
10714         }
10715 }
10716
10717 static int __devinit tg3_init_one(struct pci_dev *pdev,
10718                                   const struct pci_device_id *ent)
10719 {
10720         static int tg3_version_printed = 0;
10721         unsigned long tg3reg_base, tg3reg_len;
10722         struct net_device *dev;
10723         struct tg3 *tp;
10724         int i, err, pm_cap;
10725         char str[40];
10726         u64 dma_mask, persist_dma_mask;
10727
10728         if (tg3_version_printed++ == 0)
10729                 printk(KERN_INFO "%s", version);
10730
10731         err = pci_enable_device(pdev);
10732         if (err) {
10733                 printk(KERN_ERR PFX "Cannot enable PCI device, "
10734                        "aborting.\n");
10735                 return err;
10736         }
10737
10738         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10739                 printk(KERN_ERR PFX "Cannot find proper PCI device "
10740                        "base address, aborting.\n");
10741                 err = -ENODEV;
10742                 goto err_out_disable_pdev;
10743         }
10744
10745         err = pci_request_regions(pdev, DRV_MODULE_NAME);
10746         if (err) {
10747                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
10748                        "aborting.\n");
10749                 goto err_out_disable_pdev;
10750         }
10751
10752         pci_set_master(pdev);
10753
10754         /* Find power-management capability. */
10755         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10756         if (pm_cap == 0) {
10757                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
10758                        "aborting.\n");
10759                 err = -EIO;
10760                 goto err_out_free_res;
10761         }
10762
10763         tg3reg_base = pci_resource_start(pdev, 0);
10764         tg3reg_len = pci_resource_len(pdev, 0);
10765
10766         dev = alloc_etherdev(sizeof(*tp));
10767         if (!dev) {
10768                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
10769                 err = -ENOMEM;
10770                 goto err_out_free_res;
10771         }
10772
10773         SET_MODULE_OWNER(dev);
10774         SET_NETDEV_DEV(dev, &pdev->dev);
10775
10776         dev->features |= NETIF_F_LLTX;
10777 #if TG3_VLAN_TAG_USED
10778         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
10779         dev->vlan_rx_register = tg3_vlan_rx_register;
10780         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
10781 #endif
10782
10783         tp = netdev_priv(dev);
10784         tp->pdev = pdev;
10785         tp->dev = dev;
10786         tp->pm_cap = pm_cap;
10787         tp->mac_mode = TG3_DEF_MAC_MODE;
10788         tp->rx_mode = TG3_DEF_RX_MODE;
10789         tp->tx_mode = TG3_DEF_TX_MODE;
10790         tp->mi_mode = MAC_MI_MODE_BASE;
10791         if (tg3_debug > 0)
10792                 tp->msg_enable = tg3_debug;
10793         else
10794                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
10795
10796         /* The word/byte swap controls here control register access byte
10797          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
10798          * setting below.
10799          */
10800         tp->misc_host_ctrl =
10801                 MISC_HOST_CTRL_MASK_PCI_INT |
10802                 MISC_HOST_CTRL_WORD_SWAP |
10803                 MISC_HOST_CTRL_INDIR_ACCESS |
10804                 MISC_HOST_CTRL_PCISTATE_RW;
10805
10806         /* The NONFRM (non-frame) byte/word swap controls take effect
10807          * on descriptor entries, anything which isn't packet data.
10808          *
10809          * The StrongARM chips on the board (one for tx, one for rx)
10810          * are running in big-endian mode.
10811          */
10812         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
10813                         GRC_MODE_WSWAP_NONFRM_DATA);
10814 #ifdef __BIG_ENDIAN
10815         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
10816 #endif
10817         spin_lock_init(&tp->lock);
10818         spin_lock_init(&tp->tx_lock);
10819         spin_lock_init(&tp->indirect_lock);
10820         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
10821
10822         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
10823         if (tp->regs == 0UL) {
10824                 printk(KERN_ERR PFX "Cannot map device registers, "
10825                        "aborting.\n");
10826                 err = -ENOMEM;
10827                 goto err_out_free_dev;
10828         }
10829
10830         tg3_init_link_config(tp);
10831
10832         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
10833         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
10834         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
10835
10836         dev->open = tg3_open;
10837         dev->stop = tg3_close;
10838         dev->get_stats = tg3_get_stats;
10839         dev->set_multicast_list = tg3_set_rx_mode;
10840         dev->set_mac_address = tg3_set_mac_addr;
10841         dev->do_ioctl = tg3_ioctl;
10842         dev->tx_timeout = tg3_tx_timeout;
10843         dev->poll = tg3_poll;
10844         dev->ethtool_ops = &tg3_ethtool_ops;
10845         dev->weight = 64;
10846         dev->watchdog_timeo = TG3_TX_TIMEOUT;
10847         dev->change_mtu = tg3_change_mtu;
10848         dev->irq = pdev->irq;
10849 #ifdef CONFIG_NET_POLL_CONTROLLER
10850         dev->poll_controller = tg3_poll_controller;
10851 #endif
10852
10853         err = tg3_get_invariants(tp);
10854         if (err) {
10855                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
10856                        "aborting.\n");
10857                 goto err_out_iounmap;
10858         }
10859
10860         /* The EPB bridge inside 5714, 5715, and 5780 and any
10861          * device behind the EPB cannot support DMA addresses > 40-bit.
10862          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
10863          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
10864          * do DMA address check in tg3_start_xmit().
10865          */
10866         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
10867                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
10868         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
10869                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
10870 #ifdef CONFIG_HIGHMEM
10871                 dma_mask = DMA_64BIT_MASK;
10872 #endif
10873         } else
10874                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
10875
10876         /* Configure DMA attributes. */
10877         if (dma_mask > DMA_32BIT_MASK) {
10878                 err = pci_set_dma_mask(pdev, dma_mask);
10879                 if (!err) {
10880                         dev->features |= NETIF_F_HIGHDMA;
10881                         err = pci_set_consistent_dma_mask(pdev,
10882                                                           persist_dma_mask);
10883                         if (err < 0) {
10884                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
10885                                        "DMA for consistent allocations\n");
10886                                 goto err_out_iounmap;
10887                         }
10888                 }
10889         }
10890         if (err || dma_mask == DMA_32BIT_MASK) {
10891                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
10892                 if (err) {
10893                         printk(KERN_ERR PFX "No usable DMA configuration, "
10894                                "aborting.\n");
10895                         goto err_out_iounmap;
10896                 }
10897         }
10898
10899         tg3_init_bufmgr_config(tp);
10900
10901 #if TG3_TSO_SUPPORT != 0
10902         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
10903                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10904         }
10905         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10906             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10907             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
10908             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
10909                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
10910         } else {
10911                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10912         }
10913
10914         /* TSO is on by default on chips that support hardware TSO.
10915          * Firmware TSO on older chips gives lower performance, so it
10916          * is off by default, but can be enabled using ethtool.
10917          */
10918         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
10919                 dev->features |= NETIF_F_TSO;
10920
10921 #endif
10922
10923         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
10924             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
10925             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
10926                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
10927                 tp->rx_pending = 63;
10928         }
10929
10930         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10931             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
10932                 tp->pdev_peer = tg3_find_peer(tp);
10933
10934         err = tg3_get_device_address(tp);
10935         if (err) {
10936                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
10937                        "aborting.\n");
10938                 goto err_out_iounmap;
10939         }
10940
10941         /*
10942          * Reset chip in case UNDI or EFI driver did not shutdown
10943          * DMA self test will enable WDMAC and we'll see (spurious)
10944          * pending DMA on the PCI bus at that point.
10945          */
10946         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
10947             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10948                 pci_save_state(tp->pdev);
10949                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
10950                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10951         }
10952
10953         err = tg3_test_dma(tp);
10954         if (err) {
10955                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
10956                 goto err_out_iounmap;
10957         }
10958
10959         /* Tigon3 can do ipv4 only... and some chips have buggy
10960          * checksumming.
10961          */
10962         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
10963                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
10964                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10965         } else
10966                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10967
10968         /* flow control autonegotiation is default behavior */
10969         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10970
10971         tg3_init_coal(tp);
10972
10973         /* Now that we have fully setup the chip, save away a snapshot
10974          * of the PCI config space.  We need to restore this after
10975          * GRC_MISC_CFG core clock resets and some resume events.
10976          */
10977         pci_save_state(tp->pdev);
10978
10979         err = register_netdev(dev);
10980         if (err) {
10981                 printk(KERN_ERR PFX "Cannot register net device, "
10982                        "aborting.\n");
10983                 goto err_out_iounmap;
10984         }
10985
10986         pci_set_drvdata(pdev, dev);
10987
10988         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
10989                dev->name,
10990                tp->board_part_number,
10991                tp->pci_chip_rev_id,
10992                tg3_phy_string(tp),
10993                tg3_bus_string(tp, str),
10994                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
10995
10996         for (i = 0; i < 6; i++)
10997                 printk("%2.2x%c", dev->dev_addr[i],
10998                        i == 5 ? '\n' : ':');
10999
11000         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11001                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11002                "TSOcap[%d] \n",
11003                dev->name,
11004                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11005                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11006                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11007                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11008                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11009                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11010                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11011         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11012                dev->name, tp->dma_rwctrl,
11013                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11014                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11015
11016         return 0;
11017
11018 err_out_iounmap:
11019         if (tp->regs) {
11020                 iounmap(tp->regs);
11021                 tp->regs = NULL;
11022         }
11023
11024 err_out_free_dev:
11025         free_netdev(dev);
11026
11027 err_out_free_res:
11028         pci_release_regions(pdev);
11029
11030 err_out_disable_pdev:
11031         pci_disable_device(pdev);
11032         pci_set_drvdata(pdev, NULL);
11033         return err;
11034 }
11035
11036 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11037 {
11038         struct net_device *dev = pci_get_drvdata(pdev);
11039
11040         if (dev) {
11041                 struct tg3 *tp = netdev_priv(dev);
11042
11043                 flush_scheduled_work();
11044                 unregister_netdev(dev);
11045                 if (tp->regs) {
11046                         iounmap(tp->regs);
11047                         tp->regs = NULL;
11048                 }
11049                 free_netdev(dev);
11050                 pci_release_regions(pdev);
11051                 pci_disable_device(pdev);
11052                 pci_set_drvdata(pdev, NULL);
11053         }
11054 }
11055
11056 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11057 {
11058         struct net_device *dev = pci_get_drvdata(pdev);
11059         struct tg3 *tp = netdev_priv(dev);
11060         int err;
11061
11062         if (!netif_running(dev))
11063                 return 0;
11064
11065         flush_scheduled_work();
11066         tg3_netif_stop(tp);
11067
11068         del_timer_sync(&tp->timer);
11069
11070         tg3_full_lock(tp, 1);
11071         tg3_disable_ints(tp);
11072         tg3_full_unlock(tp);
11073
11074         netif_device_detach(dev);
11075
11076         tg3_full_lock(tp, 0);
11077         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11078         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11079         tg3_full_unlock(tp);
11080
11081         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11082         if (err) {
11083                 tg3_full_lock(tp, 0);
11084
11085                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11086                 tg3_init_hw(tp);
11087
11088                 tp->timer.expires = jiffies + tp->timer_offset;
11089                 add_timer(&tp->timer);
11090
11091                 netif_device_attach(dev);
11092                 tg3_netif_start(tp);
11093
11094                 tg3_full_unlock(tp);
11095         }
11096
11097         return err;
11098 }
11099
11100 static int tg3_resume(struct pci_dev *pdev)
11101 {
11102         struct net_device *dev = pci_get_drvdata(pdev);
11103         struct tg3 *tp = netdev_priv(dev);
11104         int err;
11105
11106         if (!netif_running(dev))
11107                 return 0;
11108
11109         pci_restore_state(tp->pdev);
11110
11111         err = tg3_set_power_state(tp, PCI_D0);
11112         if (err)
11113                 return err;
11114
11115         netif_device_attach(dev);
11116
11117         tg3_full_lock(tp, 0);
11118
11119         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11120         tg3_init_hw(tp);
11121
11122         tp->timer.expires = jiffies + tp->timer_offset;
11123         add_timer(&tp->timer);
11124
11125         tg3_netif_start(tp);
11126
11127         tg3_full_unlock(tp);
11128
11129         return 0;
11130 }
11131
11132 static struct pci_driver tg3_driver = {
11133         .name           = DRV_MODULE_NAME,
11134         .id_table       = tg3_pci_tbl,
11135         .probe          = tg3_init_one,
11136         .remove         = __devexit_p(tg3_remove_one),
11137         .suspend        = tg3_suspend,
11138         .resume         = tg3_resume
11139 };
11140
11141 static int __init tg3_init(void)
11142 {
11143         return pci_module_init(&tg3_driver);
11144 }
11145
11146 static void __exit tg3_cleanup(void)
11147 {
11148         pci_unregister_driver(&tg3_driver);
11149 }
11150
11151 module_init(tg3_init);
11152 module_exit(tg3_cleanup);