]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/tg3.c
Merge branch 'master'
[net-next-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40
41 #include <net/checksum.h>
42
43 #include <asm/system.h>
44 #include <asm/io.h>
45 #include <asm/byteorder.h>
46 #include <asm/uaccess.h>
47
48 #ifdef CONFIG_SPARC64
49 #include <asm/idprom.h>
50 #include <asm/oplib.h>
51 #include <asm/pbm.h>
52 #endif
53
54 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
55 #define TG3_VLAN_TAG_USED 1
56 #else
57 #define TG3_VLAN_TAG_USED 0
58 #endif
59
60 #ifdef NETIF_F_TSO
61 #define TG3_TSO_SUPPORT 1
62 #else
63 #define TG3_TSO_SUPPORT 0
64 #endif
65
66 #include "tg3.h"
67
68 #define DRV_MODULE_NAME         "tg3"
69 #define PFX DRV_MODULE_NAME     ": "
70 #define DRV_MODULE_VERSION      "3.43"
71 #define DRV_MODULE_RELDATE      "Oct 24, 2005"
72
73 #define TG3_DEF_MAC_MODE        0
74 #define TG3_DEF_RX_MODE         0
75 #define TG3_DEF_TX_MODE         0
76 #define TG3_DEF_MSG_ENABLE        \
77         (NETIF_MSG_DRV          | \
78          NETIF_MSG_PROBE        | \
79          NETIF_MSG_LINK         | \
80          NETIF_MSG_TIMER        | \
81          NETIF_MSG_IFDOWN       | \
82          NETIF_MSG_IFUP         | \
83          NETIF_MSG_RX_ERR       | \
84          NETIF_MSG_TX_ERR)
85
86 /* length of time before we decide the hardware is borked,
87  * and dev->tx_timeout() should be called to fix the problem
88  */
89 #define TG3_TX_TIMEOUT                  (5 * HZ)
90
91 /* hardware minimum and maximum for a single frame's data payload */
92 #define TG3_MIN_MTU                     60
93 #define TG3_MAX_MTU(tp) \
94         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
95
96 /* These numbers seem to be hard coded in the NIC firmware somehow.
97  * You can't change the ring sizes, but you can change where you place
98  * them in the NIC onboard memory.
99  */
100 #define TG3_RX_RING_SIZE                512
101 #define TG3_DEF_RX_RING_PENDING         200
102 #define TG3_RX_JUMBO_RING_SIZE          256
103 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
104
105 /* Do not place this n-ring entries value into the tp struct itself,
106  * we really want to expose these constants to GCC so that modulo et
107  * al.  operations are done with shifts and masks instead of with
108  * hw multiply/modulo instructions.  Another solution would be to
109  * replace things like '% foo' with '& (foo - 1)'.
110  */
111 #define TG3_RX_RCB_RING_SIZE(tp)        \
112         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
113
114 #define TG3_TX_RING_SIZE                512
115 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
116
117 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
118                                  TG3_RX_RING_SIZE)
119 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120                                  TG3_RX_JUMBO_RING_SIZE)
121 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
122                                    TG3_RX_RCB_RING_SIZE(tp))
123 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
124                                  TG3_TX_RING_SIZE)
125 #define TX_BUFFS_AVAIL(TP)                                              \
126         ((TP)->tx_pending -                                             \
127          (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
128 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
129
130 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
131 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
132
133 /* minimum number of free TX descriptors required to wake up TX process */
134 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
135
136 /* number of ETHTOOL_GSTATS u64's */
137 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
138
139 #define TG3_NUM_TEST            6
140
141 static char version[] __devinitdata =
142         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
143
144 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
145 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
146 MODULE_LICENSE("GPL");
147 MODULE_VERSION(DRV_MODULE_VERSION);
148
149 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
150 module_param(tg3_debug, int, 0);
151 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
152
153 static struct pci_device_id tg3_pci_tbl[] = {
154         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
155           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
245           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246         { 0, }
247 };
248
249 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
250
251 static struct {
252         const char string[ETH_GSTRING_LEN];
253 } ethtool_stats_keys[TG3_NUM_STATS] = {
254         { "rx_octets" },
255         { "rx_fragments" },
256         { "rx_ucast_packets" },
257         { "rx_mcast_packets" },
258         { "rx_bcast_packets" },
259         { "rx_fcs_errors" },
260         { "rx_align_errors" },
261         { "rx_xon_pause_rcvd" },
262         { "rx_xoff_pause_rcvd" },
263         { "rx_mac_ctrl_rcvd" },
264         { "rx_xoff_entered" },
265         { "rx_frame_too_long_errors" },
266         { "rx_jabbers" },
267         { "rx_undersize_packets" },
268         { "rx_in_length_errors" },
269         { "rx_out_length_errors" },
270         { "rx_64_or_less_octet_packets" },
271         { "rx_65_to_127_octet_packets" },
272         { "rx_128_to_255_octet_packets" },
273         { "rx_256_to_511_octet_packets" },
274         { "rx_512_to_1023_octet_packets" },
275         { "rx_1024_to_1522_octet_packets" },
276         { "rx_1523_to_2047_octet_packets" },
277         { "rx_2048_to_4095_octet_packets" },
278         { "rx_4096_to_8191_octet_packets" },
279         { "rx_8192_to_9022_octet_packets" },
280
281         { "tx_octets" },
282         { "tx_collisions" },
283
284         { "tx_xon_sent" },
285         { "tx_xoff_sent" },
286         { "tx_flow_control" },
287         { "tx_mac_errors" },
288         { "tx_single_collisions" },
289         { "tx_mult_collisions" },
290         { "tx_deferred" },
291         { "tx_excessive_collisions" },
292         { "tx_late_collisions" },
293         { "tx_collide_2times" },
294         { "tx_collide_3times" },
295         { "tx_collide_4times" },
296         { "tx_collide_5times" },
297         { "tx_collide_6times" },
298         { "tx_collide_7times" },
299         { "tx_collide_8times" },
300         { "tx_collide_9times" },
301         { "tx_collide_10times" },
302         { "tx_collide_11times" },
303         { "tx_collide_12times" },
304         { "tx_collide_13times" },
305         { "tx_collide_14times" },
306         { "tx_collide_15times" },
307         { "tx_ucast_packets" },
308         { "tx_mcast_packets" },
309         { "tx_bcast_packets" },
310         { "tx_carrier_sense_errors" },
311         { "tx_discards" },
312         { "tx_errors" },
313
314         { "dma_writeq_full" },
315         { "dma_write_prioq_full" },
316         { "rxbds_empty" },
317         { "rx_discards" },
318         { "rx_errors" },
319         { "rx_threshold_hit" },
320
321         { "dma_readq_full" },
322         { "dma_read_prioq_full" },
323         { "tx_comp_queue_full" },
324
325         { "ring_set_send_prod_index" },
326         { "ring_status_update" },
327         { "nic_irqs" },
328         { "nic_avoided_irqs" },
329         { "nic_tx_threshold_hit" }
330 };
331
332 static struct {
333         const char string[ETH_GSTRING_LEN];
334 } ethtool_test_keys[TG3_NUM_TEST] = {
335         { "nvram test     (online) " },
336         { "link test      (online) " },
337         { "register test  (offline)" },
338         { "memory test    (offline)" },
339         { "loopback test  (offline)" },
340         { "interrupt test (offline)" },
341 };
342
343 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
344 {
345         unsigned long flags;
346
347         spin_lock_irqsave(&tp->indirect_lock, flags);
348         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
349         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
350         spin_unlock_irqrestore(&tp->indirect_lock, flags);
351 }
352
353 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
354 {
355         writel(val, tp->regs + off);
356         readl(tp->regs + off);
357 }
358
359 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
360 {
361         unsigned long flags;
362         u32 val;
363
364         spin_lock_irqsave(&tp->indirect_lock, flags);
365         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
366         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
367         spin_unlock_irqrestore(&tp->indirect_lock, flags);
368         return val;
369 }
370
371 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
372 {
373         unsigned long flags;
374
375         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
376                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
377                                        TG3_64BIT_REG_LOW, val);
378                 return;
379         }
380         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
381                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
382                                        TG3_64BIT_REG_LOW, val);
383                 return;
384         }
385
386         spin_lock_irqsave(&tp->indirect_lock, flags);
387         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
388         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
389         spin_unlock_irqrestore(&tp->indirect_lock, flags);
390
391         /* In indirect mode when disabling interrupts, we also need
392          * to clear the interrupt bit in the GRC local ctrl register.
393          */
394         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
395             (val == 0x1)) {
396                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
397                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
398         }
399 }
400
401 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
402 {
403         unsigned long flags;
404         u32 val;
405
406         spin_lock_irqsave(&tp->indirect_lock, flags);
407         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
408         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
409         spin_unlock_irqrestore(&tp->indirect_lock, flags);
410         return val;
411 }
412
413 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
414 {
415         tp->write32(tp, off, val);
416         if (!(tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) &&
417             !(tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) &&
418             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
419                 tp->read32(tp, off);    /* flush */
420 }
421
422 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
423 {
424         tp->write32_mbox(tp, off, val);
425         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
426             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
427                 tp->read32_mbox(tp, off);
428 }
429
430 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
431 {
432         void __iomem *mbox = tp->regs + off;
433         writel(val, mbox);
434         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
435                 writel(val, mbox);
436         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
437                 readl(mbox);
438 }
439
440 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
441 {
442         writel(val, tp->regs + off);
443 }
444
445 static u32 tg3_read32(struct tg3 *tp, u32 off)
446 {
447         return (readl(tp->regs + off)); 
448 }
449
450 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
451 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
452 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
453 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
454 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
455
456 #define tw32(reg,val)           tp->write32(tp, reg, val)
457 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
458 #define tr32(reg)               tp->read32(tp, reg)
459
460 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
461 {
462         unsigned long flags;
463
464         spin_lock_irqsave(&tp->indirect_lock, flags);
465         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
466         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
467
468         /* Always leave this as zero. */
469         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
470         spin_unlock_irqrestore(&tp->indirect_lock, flags);
471 }
472
473 static void tg3_write_mem_fast(struct tg3 *tp, u32 off, u32 val)
474 {
475         /* If no workaround is needed, write to mem space directly */
476         if (tp->write32 != tg3_write_indirect_reg32)
477                 tw32(NIC_SRAM_WIN_BASE + off, val);
478         else
479                 tg3_write_mem(tp, off, val);
480 }
481
482 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
483 {
484         unsigned long flags;
485
486         spin_lock_irqsave(&tp->indirect_lock, flags);
487         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
488         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
489
490         /* Always leave this as zero. */
491         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
492         spin_unlock_irqrestore(&tp->indirect_lock, flags);
493 }
494
495 static void tg3_disable_ints(struct tg3 *tp)
496 {
497         tw32(TG3PCI_MISC_HOST_CTRL,
498              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
499         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
500 }
501
502 static inline void tg3_cond_int(struct tg3 *tp)
503 {
504         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
505             (tp->hw_status->status & SD_STATUS_UPDATED))
506                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
507 }
508
509 static void tg3_enable_ints(struct tg3 *tp)
510 {
511         tp->irq_sync = 0;
512         wmb();
513
514         tw32(TG3PCI_MISC_HOST_CTRL,
515              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
516         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
517                        (tp->last_tag << 24));
518         tg3_cond_int(tp);
519 }
520
521 static inline unsigned int tg3_has_work(struct tg3 *tp)
522 {
523         struct tg3_hw_status *sblk = tp->hw_status;
524         unsigned int work_exists = 0;
525
526         /* check for phy events */
527         if (!(tp->tg3_flags &
528               (TG3_FLAG_USE_LINKCHG_REG |
529                TG3_FLAG_POLL_SERDES))) {
530                 if (sblk->status & SD_STATUS_LINK_CHG)
531                         work_exists = 1;
532         }
533         /* check for RX/TX work to do */
534         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
535             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
536                 work_exists = 1;
537
538         return work_exists;
539 }
540
541 /* tg3_restart_ints
542  *  similar to tg3_enable_ints, but it accurately determines whether there
543  *  is new work pending and can return without flushing the PIO write
544  *  which reenables interrupts 
545  */
546 static void tg3_restart_ints(struct tg3 *tp)
547 {
548         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
549                      tp->last_tag << 24);
550         mmiowb();
551
552         /* When doing tagged status, this work check is unnecessary.
553          * The last_tag we write above tells the chip which piece of
554          * work we've completed.
555          */
556         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
557             tg3_has_work(tp))
558                 tw32(HOSTCC_MODE, tp->coalesce_mode |
559                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
560 }
561
562 static inline void tg3_netif_stop(struct tg3 *tp)
563 {
564         tp->dev->trans_start = jiffies; /* prevent tx timeout */
565         netif_poll_disable(tp->dev);
566         netif_tx_disable(tp->dev);
567 }
568
569 static inline void tg3_netif_start(struct tg3 *tp)
570 {
571         netif_wake_queue(tp->dev);
572         /* NOTE: unconditional netif_wake_queue is only appropriate
573          * so long as all callers are assured to have free tx slots
574          * (such as after tg3_init_hw)
575          */
576         netif_poll_enable(tp->dev);
577         tp->hw_status->status |= SD_STATUS_UPDATED;
578         tg3_enable_ints(tp);
579 }
580
581 static void tg3_switch_clocks(struct tg3 *tp)
582 {
583         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
584         u32 orig_clock_ctrl;
585
586         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
587                 return;
588
589         orig_clock_ctrl = clock_ctrl;
590         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
591                        CLOCK_CTRL_CLKRUN_OENABLE |
592                        0x1f);
593         tp->pci_clock_ctrl = clock_ctrl;
594
595         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
596                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
597                         tw32_f(TG3PCI_CLOCK_CTRL,
598                                clock_ctrl | CLOCK_CTRL_625_CORE);
599                         udelay(40);
600                 }
601         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
602                 tw32_f(TG3PCI_CLOCK_CTRL,
603                      clock_ctrl |
604                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
605                 udelay(40);
606                 tw32_f(TG3PCI_CLOCK_CTRL,
607                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
608                 udelay(40);
609         }
610         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
611         udelay(40);
612 }
613
614 #define PHY_BUSY_LOOPS  5000
615
616 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
617 {
618         u32 frame_val;
619         unsigned int loops;
620         int ret;
621
622         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
623                 tw32_f(MAC_MI_MODE,
624                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
625                 udelay(80);
626         }
627
628         *val = 0x0;
629
630         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
631                       MI_COM_PHY_ADDR_MASK);
632         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
633                       MI_COM_REG_ADDR_MASK);
634         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
635         
636         tw32_f(MAC_MI_COM, frame_val);
637
638         loops = PHY_BUSY_LOOPS;
639         while (loops != 0) {
640                 udelay(10);
641                 frame_val = tr32(MAC_MI_COM);
642
643                 if ((frame_val & MI_COM_BUSY) == 0) {
644                         udelay(5);
645                         frame_val = tr32(MAC_MI_COM);
646                         break;
647                 }
648                 loops -= 1;
649         }
650
651         ret = -EBUSY;
652         if (loops != 0) {
653                 *val = frame_val & MI_COM_DATA_MASK;
654                 ret = 0;
655         }
656
657         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
658                 tw32_f(MAC_MI_MODE, tp->mi_mode);
659                 udelay(80);
660         }
661
662         return ret;
663 }
664
665 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
666 {
667         u32 frame_val;
668         unsigned int loops;
669         int ret;
670
671         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
672                 tw32_f(MAC_MI_MODE,
673                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
674                 udelay(80);
675         }
676
677         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
678                       MI_COM_PHY_ADDR_MASK);
679         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
680                       MI_COM_REG_ADDR_MASK);
681         frame_val |= (val & MI_COM_DATA_MASK);
682         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
683         
684         tw32_f(MAC_MI_COM, frame_val);
685
686         loops = PHY_BUSY_LOOPS;
687         while (loops != 0) {
688                 udelay(10);
689                 frame_val = tr32(MAC_MI_COM);
690                 if ((frame_val & MI_COM_BUSY) == 0) {
691                         udelay(5);
692                         frame_val = tr32(MAC_MI_COM);
693                         break;
694                 }
695                 loops -= 1;
696         }
697
698         ret = -EBUSY;
699         if (loops != 0)
700                 ret = 0;
701
702         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
703                 tw32_f(MAC_MI_MODE, tp->mi_mode);
704                 udelay(80);
705         }
706
707         return ret;
708 }
709
710 static void tg3_phy_set_wirespeed(struct tg3 *tp)
711 {
712         u32 val;
713
714         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
715                 return;
716
717         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
718             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
719                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
720                              (val | (1 << 15) | (1 << 4)));
721 }
722
723 static int tg3_bmcr_reset(struct tg3 *tp)
724 {
725         u32 phy_control;
726         int limit, err;
727
728         /* OK, reset it, and poll the BMCR_RESET bit until it
729          * clears or we time out.
730          */
731         phy_control = BMCR_RESET;
732         err = tg3_writephy(tp, MII_BMCR, phy_control);
733         if (err != 0)
734                 return -EBUSY;
735
736         limit = 5000;
737         while (limit--) {
738                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
739                 if (err != 0)
740                         return -EBUSY;
741
742                 if ((phy_control & BMCR_RESET) == 0) {
743                         udelay(40);
744                         break;
745                 }
746                 udelay(10);
747         }
748         if (limit <= 0)
749                 return -EBUSY;
750
751         return 0;
752 }
753
754 static int tg3_wait_macro_done(struct tg3 *tp)
755 {
756         int limit = 100;
757
758         while (limit--) {
759                 u32 tmp32;
760
761                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
762                         if ((tmp32 & 0x1000) == 0)
763                                 break;
764                 }
765         }
766         if (limit <= 0)
767                 return -EBUSY;
768
769         return 0;
770 }
771
772 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
773 {
774         static const u32 test_pat[4][6] = {
775         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
776         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
777         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
778         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
779         };
780         int chan;
781
782         for (chan = 0; chan < 4; chan++) {
783                 int i;
784
785                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
786                              (chan * 0x2000) | 0x0200);
787                 tg3_writephy(tp, 0x16, 0x0002);
788
789                 for (i = 0; i < 6; i++)
790                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
791                                      test_pat[chan][i]);
792
793                 tg3_writephy(tp, 0x16, 0x0202);
794                 if (tg3_wait_macro_done(tp)) {
795                         *resetp = 1;
796                         return -EBUSY;
797                 }
798
799                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
800                              (chan * 0x2000) | 0x0200);
801                 tg3_writephy(tp, 0x16, 0x0082);
802                 if (tg3_wait_macro_done(tp)) {
803                         *resetp = 1;
804                         return -EBUSY;
805                 }
806
807                 tg3_writephy(tp, 0x16, 0x0802);
808                 if (tg3_wait_macro_done(tp)) {
809                         *resetp = 1;
810                         return -EBUSY;
811                 }
812
813                 for (i = 0; i < 6; i += 2) {
814                         u32 low, high;
815
816                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
817                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
818                             tg3_wait_macro_done(tp)) {
819                                 *resetp = 1;
820                                 return -EBUSY;
821                         }
822                         low &= 0x7fff;
823                         high &= 0x000f;
824                         if (low != test_pat[chan][i] ||
825                             high != test_pat[chan][i+1]) {
826                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
827                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
828                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
829
830                                 return -EBUSY;
831                         }
832                 }
833         }
834
835         return 0;
836 }
837
838 static int tg3_phy_reset_chanpat(struct tg3 *tp)
839 {
840         int chan;
841
842         for (chan = 0; chan < 4; chan++) {
843                 int i;
844
845                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
846                              (chan * 0x2000) | 0x0200);
847                 tg3_writephy(tp, 0x16, 0x0002);
848                 for (i = 0; i < 6; i++)
849                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
850                 tg3_writephy(tp, 0x16, 0x0202);
851                 if (tg3_wait_macro_done(tp))
852                         return -EBUSY;
853         }
854
855         return 0;
856 }
857
858 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
859 {
860         u32 reg32, phy9_orig;
861         int retries, do_phy_reset, err;
862
863         retries = 10;
864         do_phy_reset = 1;
865         do {
866                 if (do_phy_reset) {
867                         err = tg3_bmcr_reset(tp);
868                         if (err)
869                                 return err;
870                         do_phy_reset = 0;
871                 }
872
873                 /* Disable transmitter and interrupt.  */
874                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
875                         continue;
876
877                 reg32 |= 0x3000;
878                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
879
880                 /* Set full-duplex, 1000 mbps.  */
881                 tg3_writephy(tp, MII_BMCR,
882                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
883
884                 /* Set to master mode.  */
885                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
886                         continue;
887
888                 tg3_writephy(tp, MII_TG3_CTRL,
889                              (MII_TG3_CTRL_AS_MASTER |
890                               MII_TG3_CTRL_ENABLE_AS_MASTER));
891
892                 /* Enable SM_DSP_CLOCK and 6dB.  */
893                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
894
895                 /* Block the PHY control access.  */
896                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
897                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
898
899                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
900                 if (!err)
901                         break;
902         } while (--retries);
903
904         err = tg3_phy_reset_chanpat(tp);
905         if (err)
906                 return err;
907
908         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
909         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
910
911         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
912         tg3_writephy(tp, 0x16, 0x0000);
913
914         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
915             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
916                 /* Set Extended packet length bit for jumbo frames */
917                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
918         }
919         else {
920                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
921         }
922
923         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
924
925         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
926                 reg32 &= ~0x3000;
927                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
928         } else if (!err)
929                 err = -EBUSY;
930
931         return err;
932 }
933
934 /* This will reset the tigon3 PHY if there is no valid
935  * link unless the FORCE argument is non-zero.
936  */
937 static int tg3_phy_reset(struct tg3 *tp)
938 {
939         u32 phy_status;
940         int err;
941
942         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
943         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
944         if (err != 0)
945                 return -EBUSY;
946
947         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
948             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
949             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
950                 err = tg3_phy_reset_5703_4_5(tp);
951                 if (err)
952                         return err;
953                 goto out;
954         }
955
956         err = tg3_bmcr_reset(tp);
957         if (err)
958                 return err;
959
960 out:
961         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
962                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
963                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
964                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
965                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
966                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
967                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
968         }
969         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
970                 tg3_writephy(tp, 0x1c, 0x8d68);
971                 tg3_writephy(tp, 0x1c, 0x8d68);
972         }
973         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
974                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
975                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
976                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
977                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
978                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
979                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
980                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
981                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
982         }
983         /* Set Extended packet length bit (bit 14) on all chips that */
984         /* support jumbo frames */
985         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
986                 /* Cannot do read-modify-write on 5401 */
987                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
988         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
989                 u32 phy_reg;
990
991                 /* Set bit 14 with read-modify-write to preserve other bits */
992                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
993                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
994                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
995         }
996
997         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
998          * jumbo frames transmission.
999          */
1000         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1001                 u32 phy_reg;
1002
1003                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1004                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1005                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1006         }
1007
1008         tg3_phy_set_wirespeed(tp);
1009         return 0;
1010 }
1011
1012 static void tg3_frob_aux_power(struct tg3 *tp)
1013 {
1014         struct tg3 *tp_peer = tp;
1015
1016         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1017                 return;
1018
1019         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1020                 tp_peer = pci_get_drvdata(tp->pdev_peer);
1021                 if (!tp_peer)
1022                         BUG();
1023         }
1024
1025
1026         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1027             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
1028                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1029                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1030                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1031                              (GRC_LCLCTRL_GPIO_OE0 |
1032                               GRC_LCLCTRL_GPIO_OE1 |
1033                               GRC_LCLCTRL_GPIO_OE2 |
1034                               GRC_LCLCTRL_GPIO_OUTPUT0 |
1035                               GRC_LCLCTRL_GPIO_OUTPUT1));
1036                         udelay(100);
1037                 } else {
1038                         u32 no_gpio2;
1039                         u32 grc_local_ctrl;
1040
1041                         if (tp_peer != tp &&
1042                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1043                                 return;
1044
1045                         /* On 5753 and variants, GPIO2 cannot be used. */
1046                         no_gpio2 = tp->nic_sram_data_cfg &
1047                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1048
1049                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1050                                          GRC_LCLCTRL_GPIO_OE1 |
1051                                          GRC_LCLCTRL_GPIO_OE2 |
1052                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1053                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1054                         if (no_gpio2) {
1055                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1056                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1057                         }
1058                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1059                                                 grc_local_ctrl);
1060                         udelay(100);
1061
1062                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1063
1064                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1065                                                 grc_local_ctrl);
1066                         udelay(100);
1067
1068                         if (!no_gpio2) {
1069                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1070                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1071                                        grc_local_ctrl);
1072                                 udelay(100);
1073                         }
1074                 }
1075         } else {
1076                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1077                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1078                         if (tp_peer != tp &&
1079                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1080                                 return;
1081
1082                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1083                              (GRC_LCLCTRL_GPIO_OE1 |
1084                               GRC_LCLCTRL_GPIO_OUTPUT1));
1085                         udelay(100);
1086
1087                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1088                              (GRC_LCLCTRL_GPIO_OE1));
1089                         udelay(100);
1090
1091                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1092                              (GRC_LCLCTRL_GPIO_OE1 |
1093                               GRC_LCLCTRL_GPIO_OUTPUT1));
1094                         udelay(100);
1095                 }
1096         }
1097 }
1098
1099 static int tg3_setup_phy(struct tg3 *, int);
1100
1101 #define RESET_KIND_SHUTDOWN     0
1102 #define RESET_KIND_INIT         1
1103 #define RESET_KIND_SUSPEND      2
1104
1105 static void tg3_write_sig_post_reset(struct tg3 *, int);
1106 static int tg3_halt_cpu(struct tg3 *, u32);
1107
1108 static int tg3_set_power_state(struct tg3 *tp, int state)
1109 {
1110         u32 misc_host_ctrl;
1111         u16 power_control, power_caps;
1112         int pm = tp->pm_cap;
1113
1114         /* Make sure register accesses (indirect or otherwise)
1115          * will function correctly.
1116          */
1117         pci_write_config_dword(tp->pdev,
1118                                TG3PCI_MISC_HOST_CTRL,
1119                                tp->misc_host_ctrl);
1120
1121         pci_read_config_word(tp->pdev,
1122                              pm + PCI_PM_CTRL,
1123                              &power_control);
1124         power_control |= PCI_PM_CTRL_PME_STATUS;
1125         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1126         switch (state) {
1127         case 0:
1128                 power_control |= 0;
1129                 pci_write_config_word(tp->pdev,
1130                                       pm + PCI_PM_CTRL,
1131                                       power_control);
1132                 udelay(100);    /* Delay after power state change */
1133
1134                 /* Switch out of Vaux if it is not a LOM */
1135                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1136                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1137                         udelay(100);
1138                 }
1139
1140                 return 0;
1141
1142         case 1:
1143                 power_control |= 1;
1144                 break;
1145
1146         case 2:
1147                 power_control |= 2;
1148                 break;
1149
1150         case 3:
1151                 power_control |= 3;
1152                 break;
1153
1154         default:
1155                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1156                        "requested.\n",
1157                        tp->dev->name, state);
1158                 return -EINVAL;
1159         };
1160
1161         power_control |= PCI_PM_CTRL_PME_ENABLE;
1162
1163         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1164         tw32(TG3PCI_MISC_HOST_CTRL,
1165              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1166
1167         if (tp->link_config.phy_is_low_power == 0) {
1168                 tp->link_config.phy_is_low_power = 1;
1169                 tp->link_config.orig_speed = tp->link_config.speed;
1170                 tp->link_config.orig_duplex = tp->link_config.duplex;
1171                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1172         }
1173
1174         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1175                 tp->link_config.speed = SPEED_10;
1176                 tp->link_config.duplex = DUPLEX_HALF;
1177                 tp->link_config.autoneg = AUTONEG_ENABLE;
1178                 tg3_setup_phy(tp, 0);
1179         }
1180
1181         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1182
1183         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1184                 u32 mac_mode;
1185
1186                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1187                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1188                         udelay(40);
1189
1190                         mac_mode = MAC_MODE_PORT_MODE_MII;
1191
1192                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1193                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1194                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1195                 } else {
1196                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1197                 }
1198
1199                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1200                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1201
1202                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1203                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1204                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1205
1206                 tw32_f(MAC_MODE, mac_mode);
1207                 udelay(100);
1208
1209                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1210                 udelay(10);
1211         }
1212
1213         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1214             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1215              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1216                 u32 base_val;
1217
1218                 base_val = tp->pci_clock_ctrl;
1219                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1220                              CLOCK_CTRL_TXCLK_DISABLE);
1221
1222                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1223                      CLOCK_CTRL_ALTCLK |
1224                      CLOCK_CTRL_PWRDOWN_PLL133);
1225                 udelay(40);
1226         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1227                 /* do nothing */
1228         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1229                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1230                 u32 newbits1, newbits2;
1231
1232                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1233                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1234                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1235                                     CLOCK_CTRL_TXCLK_DISABLE |
1236                                     CLOCK_CTRL_ALTCLK);
1237                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1238                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1239                         newbits1 = CLOCK_CTRL_625_CORE;
1240                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1241                 } else {
1242                         newbits1 = CLOCK_CTRL_ALTCLK;
1243                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1244                 }
1245
1246                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1247                 udelay(40);
1248
1249                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1250                 udelay(40);
1251
1252                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1253                         u32 newbits3;
1254
1255                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1256                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1257                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1258                                             CLOCK_CTRL_TXCLK_DISABLE |
1259                                             CLOCK_CTRL_44MHZ_CORE);
1260                         } else {
1261                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1262                         }
1263
1264                         tw32_f(TG3PCI_CLOCK_CTRL,
1265                                          tp->pci_clock_ctrl | newbits3);
1266                         udelay(40);
1267                 }
1268         }
1269
1270         tg3_frob_aux_power(tp);
1271
1272         /* Workaround for unstable PLL clock */
1273         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1274             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1275                 u32 val = tr32(0x7d00);
1276
1277                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1278                 tw32(0x7d00, val);
1279                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1280                         tg3_halt_cpu(tp, RX_CPU_BASE);
1281         }
1282
1283         /* Finally, set the new power state. */
1284         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1285         udelay(100);    /* Delay after power state change */
1286
1287         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1288
1289         return 0;
1290 }
1291
1292 static void tg3_link_report(struct tg3 *tp)
1293 {
1294         if (!netif_carrier_ok(tp->dev)) {
1295                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1296         } else {
1297                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1298                        tp->dev->name,
1299                        (tp->link_config.active_speed == SPEED_1000 ?
1300                         1000 :
1301                         (tp->link_config.active_speed == SPEED_100 ?
1302                          100 : 10)),
1303                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1304                         "full" : "half"));
1305
1306                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1307                        "%s for RX.\n",
1308                        tp->dev->name,
1309                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1310                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1311         }
1312 }
1313
1314 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1315 {
1316         u32 new_tg3_flags = 0;
1317         u32 old_rx_mode = tp->rx_mode;
1318         u32 old_tx_mode = tp->tx_mode;
1319
1320         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1321
1322                 /* Convert 1000BaseX flow control bits to 1000BaseT
1323                  * bits before resolving flow control.
1324                  */
1325                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1326                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1327                                        ADVERTISE_PAUSE_ASYM);
1328                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1329
1330                         if (local_adv & ADVERTISE_1000XPAUSE)
1331                                 local_adv |= ADVERTISE_PAUSE_CAP;
1332                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1333                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1334                         if (remote_adv & LPA_1000XPAUSE)
1335                                 remote_adv |= LPA_PAUSE_CAP;
1336                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1337                                 remote_adv |= LPA_PAUSE_ASYM;
1338                 }
1339
1340                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1341                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1342                                 if (remote_adv & LPA_PAUSE_CAP)
1343                                         new_tg3_flags |=
1344                                                 (TG3_FLAG_RX_PAUSE |
1345                                                 TG3_FLAG_TX_PAUSE);
1346                                 else if (remote_adv & LPA_PAUSE_ASYM)
1347                                         new_tg3_flags |=
1348                                                 (TG3_FLAG_RX_PAUSE);
1349                         } else {
1350                                 if (remote_adv & LPA_PAUSE_CAP)
1351                                         new_tg3_flags |=
1352                                                 (TG3_FLAG_RX_PAUSE |
1353                                                 TG3_FLAG_TX_PAUSE);
1354                         }
1355                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1356                         if ((remote_adv & LPA_PAUSE_CAP) &&
1357                         (remote_adv & LPA_PAUSE_ASYM))
1358                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1359                 }
1360
1361                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1362                 tp->tg3_flags |= new_tg3_flags;
1363         } else {
1364                 new_tg3_flags = tp->tg3_flags;
1365         }
1366
1367         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1368                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1369         else
1370                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1371
1372         if (old_rx_mode != tp->rx_mode) {
1373                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1374         }
1375         
1376         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1377                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1378         else
1379                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1380
1381         if (old_tx_mode != tp->tx_mode) {
1382                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1383         }
1384 }
1385
1386 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1387 {
1388         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1389         case MII_TG3_AUX_STAT_10HALF:
1390                 *speed = SPEED_10;
1391                 *duplex = DUPLEX_HALF;
1392                 break;
1393
1394         case MII_TG3_AUX_STAT_10FULL:
1395                 *speed = SPEED_10;
1396                 *duplex = DUPLEX_FULL;
1397                 break;
1398
1399         case MII_TG3_AUX_STAT_100HALF:
1400                 *speed = SPEED_100;
1401                 *duplex = DUPLEX_HALF;
1402                 break;
1403
1404         case MII_TG3_AUX_STAT_100FULL:
1405                 *speed = SPEED_100;
1406                 *duplex = DUPLEX_FULL;
1407                 break;
1408
1409         case MII_TG3_AUX_STAT_1000HALF:
1410                 *speed = SPEED_1000;
1411                 *duplex = DUPLEX_HALF;
1412                 break;
1413
1414         case MII_TG3_AUX_STAT_1000FULL:
1415                 *speed = SPEED_1000;
1416                 *duplex = DUPLEX_FULL;
1417                 break;
1418
1419         default:
1420                 *speed = SPEED_INVALID;
1421                 *duplex = DUPLEX_INVALID;
1422                 break;
1423         };
1424 }
1425
1426 static void tg3_phy_copper_begin(struct tg3 *tp)
1427 {
1428         u32 new_adv;
1429         int i;
1430
1431         if (tp->link_config.phy_is_low_power) {
1432                 /* Entering low power mode.  Disable gigabit and
1433                  * 100baseT advertisements.
1434                  */
1435                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1436
1437                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1438                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1439                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1440                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1441
1442                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1443         } else if (tp->link_config.speed == SPEED_INVALID) {
1444                 tp->link_config.advertising =
1445                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1446                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1447                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1448                          ADVERTISED_Autoneg | ADVERTISED_MII);
1449
1450                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1451                         tp->link_config.advertising &=
1452                                 ~(ADVERTISED_1000baseT_Half |
1453                                   ADVERTISED_1000baseT_Full);
1454
1455                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1456                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1457                         new_adv |= ADVERTISE_10HALF;
1458                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1459                         new_adv |= ADVERTISE_10FULL;
1460                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1461                         new_adv |= ADVERTISE_100HALF;
1462                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1463                         new_adv |= ADVERTISE_100FULL;
1464                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1465
1466                 if (tp->link_config.advertising &
1467                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1468                         new_adv = 0;
1469                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1470                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1471                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1472                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1473                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1474                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1475                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1476                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1477                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1478                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1479                 } else {
1480                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1481                 }
1482         } else {
1483                 /* Asking for a specific link mode. */
1484                 if (tp->link_config.speed == SPEED_1000) {
1485                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1486                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1487
1488                         if (tp->link_config.duplex == DUPLEX_FULL)
1489                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1490                         else
1491                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1492                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1493                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1494                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1495                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1496                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1497                 } else {
1498                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1499
1500                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1501                         if (tp->link_config.speed == SPEED_100) {
1502                                 if (tp->link_config.duplex == DUPLEX_FULL)
1503                                         new_adv |= ADVERTISE_100FULL;
1504                                 else
1505                                         new_adv |= ADVERTISE_100HALF;
1506                         } else {
1507                                 if (tp->link_config.duplex == DUPLEX_FULL)
1508                                         new_adv |= ADVERTISE_10FULL;
1509                                 else
1510                                         new_adv |= ADVERTISE_10HALF;
1511                         }
1512                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1513                 }
1514         }
1515
1516         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1517             tp->link_config.speed != SPEED_INVALID) {
1518                 u32 bmcr, orig_bmcr;
1519
1520                 tp->link_config.active_speed = tp->link_config.speed;
1521                 tp->link_config.active_duplex = tp->link_config.duplex;
1522
1523                 bmcr = 0;
1524                 switch (tp->link_config.speed) {
1525                 default:
1526                 case SPEED_10:
1527                         break;
1528
1529                 case SPEED_100:
1530                         bmcr |= BMCR_SPEED100;
1531                         break;
1532
1533                 case SPEED_1000:
1534                         bmcr |= TG3_BMCR_SPEED1000;
1535                         break;
1536                 };
1537
1538                 if (tp->link_config.duplex == DUPLEX_FULL)
1539                         bmcr |= BMCR_FULLDPLX;
1540
1541                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1542                     (bmcr != orig_bmcr)) {
1543                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1544                         for (i = 0; i < 1500; i++) {
1545                                 u32 tmp;
1546
1547                                 udelay(10);
1548                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1549                                     tg3_readphy(tp, MII_BMSR, &tmp))
1550                                         continue;
1551                                 if (!(tmp & BMSR_LSTATUS)) {
1552                                         udelay(40);
1553                                         break;
1554                                 }
1555                         }
1556                         tg3_writephy(tp, MII_BMCR, bmcr);
1557                         udelay(40);
1558                 }
1559         } else {
1560                 tg3_writephy(tp, MII_BMCR,
1561                              BMCR_ANENABLE | BMCR_ANRESTART);
1562         }
1563 }
1564
1565 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1566 {
1567         int err;
1568
1569         /* Turn off tap power management. */
1570         /* Set Extended packet length bit */
1571         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1572
1573         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1574         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1575
1576         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1577         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1578
1579         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1580         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1581
1582         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1583         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1584
1585         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1586         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1587
1588         udelay(40);
1589
1590         return err;
1591 }
1592
1593 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1594 {
1595         u32 adv_reg, all_mask;
1596
1597         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1598                 return 0;
1599
1600         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1601                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1602         if ((adv_reg & all_mask) != all_mask)
1603                 return 0;
1604         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1605                 u32 tg3_ctrl;
1606
1607                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1608                         return 0;
1609
1610                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1611                             MII_TG3_CTRL_ADV_1000_FULL);
1612                 if ((tg3_ctrl & all_mask) != all_mask)
1613                         return 0;
1614         }
1615         return 1;
1616 }
1617
1618 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1619 {
1620         int current_link_up;
1621         u32 bmsr, dummy;
1622         u16 current_speed;
1623         u8 current_duplex;
1624         int i, err;
1625
1626         tw32(MAC_EVENT, 0);
1627
1628         tw32_f(MAC_STATUS,
1629              (MAC_STATUS_SYNC_CHANGED |
1630               MAC_STATUS_CFG_CHANGED |
1631               MAC_STATUS_MI_COMPLETION |
1632               MAC_STATUS_LNKSTATE_CHANGED));
1633         udelay(40);
1634
1635         tp->mi_mode = MAC_MI_MODE_BASE;
1636         tw32_f(MAC_MI_MODE, tp->mi_mode);
1637         udelay(80);
1638
1639         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1640
1641         /* Some third-party PHYs need to be reset on link going
1642          * down.
1643          */
1644         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1645              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1646              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1647             netif_carrier_ok(tp->dev)) {
1648                 tg3_readphy(tp, MII_BMSR, &bmsr);
1649                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1650                     !(bmsr & BMSR_LSTATUS))
1651                         force_reset = 1;
1652         }
1653         if (force_reset)
1654                 tg3_phy_reset(tp);
1655
1656         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1657                 tg3_readphy(tp, MII_BMSR, &bmsr);
1658                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1659                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1660                         bmsr = 0;
1661
1662                 if (!(bmsr & BMSR_LSTATUS)) {
1663                         err = tg3_init_5401phy_dsp(tp);
1664                         if (err)
1665                                 return err;
1666
1667                         tg3_readphy(tp, MII_BMSR, &bmsr);
1668                         for (i = 0; i < 1000; i++) {
1669                                 udelay(10);
1670                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1671                                     (bmsr & BMSR_LSTATUS)) {
1672                                         udelay(40);
1673                                         break;
1674                                 }
1675                         }
1676
1677                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1678                             !(bmsr & BMSR_LSTATUS) &&
1679                             tp->link_config.active_speed == SPEED_1000) {
1680                                 err = tg3_phy_reset(tp);
1681                                 if (!err)
1682                                         err = tg3_init_5401phy_dsp(tp);
1683                                 if (err)
1684                                         return err;
1685                         }
1686                 }
1687         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1688                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1689                 /* 5701 {A0,B0} CRC bug workaround */
1690                 tg3_writephy(tp, 0x15, 0x0a75);
1691                 tg3_writephy(tp, 0x1c, 0x8c68);
1692                 tg3_writephy(tp, 0x1c, 0x8d68);
1693                 tg3_writephy(tp, 0x1c, 0x8c68);
1694         }
1695
1696         /* Clear pending interrupts... */
1697         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1698         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1699
1700         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1701                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1702         else
1703                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1704
1705         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1706             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1707                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1708                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1709                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1710                 else
1711                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1712         }
1713
1714         current_link_up = 0;
1715         current_speed = SPEED_INVALID;
1716         current_duplex = DUPLEX_INVALID;
1717
1718         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1719                 u32 val;
1720
1721                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1722                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1723                 if (!(val & (1 << 10))) {
1724                         val |= (1 << 10);
1725                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1726                         goto relink;
1727                 }
1728         }
1729
1730         bmsr = 0;
1731         for (i = 0; i < 100; i++) {
1732                 tg3_readphy(tp, MII_BMSR, &bmsr);
1733                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1734                     (bmsr & BMSR_LSTATUS))
1735                         break;
1736                 udelay(40);
1737         }
1738
1739         if (bmsr & BMSR_LSTATUS) {
1740                 u32 aux_stat, bmcr;
1741
1742                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1743                 for (i = 0; i < 2000; i++) {
1744                         udelay(10);
1745                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1746                             aux_stat)
1747                                 break;
1748                 }
1749
1750                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1751                                              &current_speed,
1752                                              &current_duplex);
1753
1754                 bmcr = 0;
1755                 for (i = 0; i < 200; i++) {
1756                         tg3_readphy(tp, MII_BMCR, &bmcr);
1757                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1758                                 continue;
1759                         if (bmcr && bmcr != 0x7fff)
1760                                 break;
1761                         udelay(10);
1762                 }
1763
1764                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1765                         if (bmcr & BMCR_ANENABLE) {
1766                                 current_link_up = 1;
1767
1768                                 /* Force autoneg restart if we are exiting
1769                                  * low power mode.
1770                                  */
1771                                 if (!tg3_copper_is_advertising_all(tp))
1772                                         current_link_up = 0;
1773                         } else {
1774                                 current_link_up = 0;
1775                         }
1776                 } else {
1777                         if (!(bmcr & BMCR_ANENABLE) &&
1778                             tp->link_config.speed == current_speed &&
1779                             tp->link_config.duplex == current_duplex) {
1780                                 current_link_up = 1;
1781                         } else {
1782                                 current_link_up = 0;
1783                         }
1784                 }
1785
1786                 tp->link_config.active_speed = current_speed;
1787                 tp->link_config.active_duplex = current_duplex;
1788         }
1789
1790         if (current_link_up == 1 &&
1791             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1792             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1793                 u32 local_adv, remote_adv;
1794
1795                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1796                         local_adv = 0;
1797                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1798
1799                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1800                         remote_adv = 0;
1801
1802                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1803
1804                 /* If we are not advertising full pause capability,
1805                  * something is wrong.  Bring the link down and reconfigure.
1806                  */
1807                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1808                         current_link_up = 0;
1809                 } else {
1810                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1811                 }
1812         }
1813 relink:
1814         if (current_link_up == 0) {
1815                 u32 tmp;
1816
1817                 tg3_phy_copper_begin(tp);
1818
1819                 tg3_readphy(tp, MII_BMSR, &tmp);
1820                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1821                     (tmp & BMSR_LSTATUS))
1822                         current_link_up = 1;
1823         }
1824
1825         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1826         if (current_link_up == 1) {
1827                 if (tp->link_config.active_speed == SPEED_100 ||
1828                     tp->link_config.active_speed == SPEED_10)
1829                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1830                 else
1831                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1832         } else
1833                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1834
1835         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1836         if (tp->link_config.active_duplex == DUPLEX_HALF)
1837                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1838
1839         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1840         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1841                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1842                     (current_link_up == 1 &&
1843                      tp->link_config.active_speed == SPEED_10))
1844                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1845         } else {
1846                 if (current_link_up == 1)
1847                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1848         }
1849
1850         /* ??? Without this setting Netgear GA302T PHY does not
1851          * ??? send/receive packets...
1852          */
1853         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1854             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1855                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1856                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1857                 udelay(80);
1858         }
1859
1860         tw32_f(MAC_MODE, tp->mac_mode);
1861         udelay(40);
1862
1863         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1864                 /* Polled via timer. */
1865                 tw32_f(MAC_EVENT, 0);
1866         } else {
1867                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1868         }
1869         udelay(40);
1870
1871         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1872             current_link_up == 1 &&
1873             tp->link_config.active_speed == SPEED_1000 &&
1874             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1875              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1876                 udelay(120);
1877                 tw32_f(MAC_STATUS,
1878                      (MAC_STATUS_SYNC_CHANGED |
1879                       MAC_STATUS_CFG_CHANGED));
1880                 udelay(40);
1881                 tg3_write_mem(tp,
1882                               NIC_SRAM_FIRMWARE_MBOX,
1883                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1884         }
1885
1886         if (current_link_up != netif_carrier_ok(tp->dev)) {
1887                 if (current_link_up)
1888                         netif_carrier_on(tp->dev);
1889                 else
1890                         netif_carrier_off(tp->dev);
1891                 tg3_link_report(tp);
1892         }
1893
1894         return 0;
1895 }
1896
1897 struct tg3_fiber_aneginfo {
1898         int state;
1899 #define ANEG_STATE_UNKNOWN              0
1900 #define ANEG_STATE_AN_ENABLE            1
1901 #define ANEG_STATE_RESTART_INIT         2
1902 #define ANEG_STATE_RESTART              3
1903 #define ANEG_STATE_DISABLE_LINK_OK      4
1904 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1905 #define ANEG_STATE_ABILITY_DETECT       6
1906 #define ANEG_STATE_ACK_DETECT_INIT      7
1907 #define ANEG_STATE_ACK_DETECT           8
1908 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1909 #define ANEG_STATE_COMPLETE_ACK         10
1910 #define ANEG_STATE_IDLE_DETECT_INIT     11
1911 #define ANEG_STATE_IDLE_DETECT          12
1912 #define ANEG_STATE_LINK_OK              13
1913 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1914 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1915
1916         u32 flags;
1917 #define MR_AN_ENABLE            0x00000001
1918 #define MR_RESTART_AN           0x00000002
1919 #define MR_AN_COMPLETE          0x00000004
1920 #define MR_PAGE_RX              0x00000008
1921 #define MR_NP_LOADED            0x00000010
1922 #define MR_TOGGLE_TX            0x00000020
1923 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1924 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1925 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1926 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1927 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1928 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1929 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1930 #define MR_TOGGLE_RX            0x00002000
1931 #define MR_NP_RX                0x00004000
1932
1933 #define MR_LINK_OK              0x80000000
1934
1935         unsigned long link_time, cur_time;
1936
1937         u32 ability_match_cfg;
1938         int ability_match_count;
1939
1940         char ability_match, idle_match, ack_match;
1941
1942         u32 txconfig, rxconfig;
1943 #define ANEG_CFG_NP             0x00000080
1944 #define ANEG_CFG_ACK            0x00000040
1945 #define ANEG_CFG_RF2            0x00000020
1946 #define ANEG_CFG_RF1            0x00000010
1947 #define ANEG_CFG_PS2            0x00000001
1948 #define ANEG_CFG_PS1            0x00008000
1949 #define ANEG_CFG_HD             0x00004000
1950 #define ANEG_CFG_FD             0x00002000
1951 #define ANEG_CFG_INVAL          0x00001f06
1952
1953 };
1954 #define ANEG_OK         0
1955 #define ANEG_DONE       1
1956 #define ANEG_TIMER_ENAB 2
1957 #define ANEG_FAILED     -1
1958
1959 #define ANEG_STATE_SETTLE_TIME  10000
1960
1961 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1962                                    struct tg3_fiber_aneginfo *ap)
1963 {
1964         unsigned long delta;
1965         u32 rx_cfg_reg;
1966         int ret;
1967
1968         if (ap->state == ANEG_STATE_UNKNOWN) {
1969                 ap->rxconfig = 0;
1970                 ap->link_time = 0;
1971                 ap->cur_time = 0;
1972                 ap->ability_match_cfg = 0;
1973                 ap->ability_match_count = 0;
1974                 ap->ability_match = 0;
1975                 ap->idle_match = 0;
1976                 ap->ack_match = 0;
1977         }
1978         ap->cur_time++;
1979
1980         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1981                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1982
1983                 if (rx_cfg_reg != ap->ability_match_cfg) {
1984                         ap->ability_match_cfg = rx_cfg_reg;
1985                         ap->ability_match = 0;
1986                         ap->ability_match_count = 0;
1987                 } else {
1988                         if (++ap->ability_match_count > 1) {
1989                                 ap->ability_match = 1;
1990                                 ap->ability_match_cfg = rx_cfg_reg;
1991                         }
1992                 }
1993                 if (rx_cfg_reg & ANEG_CFG_ACK)
1994                         ap->ack_match = 1;
1995                 else
1996                         ap->ack_match = 0;
1997
1998                 ap->idle_match = 0;
1999         } else {
2000                 ap->idle_match = 1;
2001                 ap->ability_match_cfg = 0;
2002                 ap->ability_match_count = 0;
2003                 ap->ability_match = 0;
2004                 ap->ack_match = 0;
2005
2006                 rx_cfg_reg = 0;
2007         }
2008
2009         ap->rxconfig = rx_cfg_reg;
2010         ret = ANEG_OK;
2011
2012         switch(ap->state) {
2013         case ANEG_STATE_UNKNOWN:
2014                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2015                         ap->state = ANEG_STATE_AN_ENABLE;
2016
2017                 /* fallthru */
2018         case ANEG_STATE_AN_ENABLE:
2019                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2020                 if (ap->flags & MR_AN_ENABLE) {
2021                         ap->link_time = 0;
2022                         ap->cur_time = 0;
2023                         ap->ability_match_cfg = 0;
2024                         ap->ability_match_count = 0;
2025                         ap->ability_match = 0;
2026                         ap->idle_match = 0;
2027                         ap->ack_match = 0;
2028
2029                         ap->state = ANEG_STATE_RESTART_INIT;
2030                 } else {
2031                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2032                 }
2033                 break;
2034
2035         case ANEG_STATE_RESTART_INIT:
2036                 ap->link_time = ap->cur_time;
2037                 ap->flags &= ~(MR_NP_LOADED);
2038                 ap->txconfig = 0;
2039                 tw32(MAC_TX_AUTO_NEG, 0);
2040                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2041                 tw32_f(MAC_MODE, tp->mac_mode);
2042                 udelay(40);
2043
2044                 ret = ANEG_TIMER_ENAB;
2045                 ap->state = ANEG_STATE_RESTART;
2046
2047                 /* fallthru */
2048         case ANEG_STATE_RESTART:
2049                 delta = ap->cur_time - ap->link_time;
2050                 if (delta > ANEG_STATE_SETTLE_TIME) {
2051                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2052                 } else {
2053                         ret = ANEG_TIMER_ENAB;
2054                 }
2055                 break;
2056
2057         case ANEG_STATE_DISABLE_LINK_OK:
2058                 ret = ANEG_DONE;
2059                 break;
2060
2061         case ANEG_STATE_ABILITY_DETECT_INIT:
2062                 ap->flags &= ~(MR_TOGGLE_TX);
2063                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2064                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2065                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2066                 tw32_f(MAC_MODE, tp->mac_mode);
2067                 udelay(40);
2068
2069                 ap->state = ANEG_STATE_ABILITY_DETECT;
2070                 break;
2071
2072         case ANEG_STATE_ABILITY_DETECT:
2073                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2074                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2075                 }
2076                 break;
2077
2078         case ANEG_STATE_ACK_DETECT_INIT:
2079                 ap->txconfig |= ANEG_CFG_ACK;
2080                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2081                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2082                 tw32_f(MAC_MODE, tp->mac_mode);
2083                 udelay(40);
2084
2085                 ap->state = ANEG_STATE_ACK_DETECT;
2086
2087                 /* fallthru */
2088         case ANEG_STATE_ACK_DETECT:
2089                 if (ap->ack_match != 0) {
2090                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2091                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2092                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2093                         } else {
2094                                 ap->state = ANEG_STATE_AN_ENABLE;
2095                         }
2096                 } else if (ap->ability_match != 0 &&
2097                            ap->rxconfig == 0) {
2098                         ap->state = ANEG_STATE_AN_ENABLE;
2099                 }
2100                 break;
2101
2102         case ANEG_STATE_COMPLETE_ACK_INIT:
2103                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2104                         ret = ANEG_FAILED;
2105                         break;
2106                 }
2107                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2108                                MR_LP_ADV_HALF_DUPLEX |
2109                                MR_LP_ADV_SYM_PAUSE |
2110                                MR_LP_ADV_ASYM_PAUSE |
2111                                MR_LP_ADV_REMOTE_FAULT1 |
2112                                MR_LP_ADV_REMOTE_FAULT2 |
2113                                MR_LP_ADV_NEXT_PAGE |
2114                                MR_TOGGLE_RX |
2115                                MR_NP_RX);
2116                 if (ap->rxconfig & ANEG_CFG_FD)
2117                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2118                 if (ap->rxconfig & ANEG_CFG_HD)
2119                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2120                 if (ap->rxconfig & ANEG_CFG_PS1)
2121                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2122                 if (ap->rxconfig & ANEG_CFG_PS2)
2123                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2124                 if (ap->rxconfig & ANEG_CFG_RF1)
2125                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2126                 if (ap->rxconfig & ANEG_CFG_RF2)
2127                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2128                 if (ap->rxconfig & ANEG_CFG_NP)
2129                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2130
2131                 ap->link_time = ap->cur_time;
2132
2133                 ap->flags ^= (MR_TOGGLE_TX);
2134                 if (ap->rxconfig & 0x0008)
2135                         ap->flags |= MR_TOGGLE_RX;
2136                 if (ap->rxconfig & ANEG_CFG_NP)
2137                         ap->flags |= MR_NP_RX;
2138                 ap->flags |= MR_PAGE_RX;
2139
2140                 ap->state = ANEG_STATE_COMPLETE_ACK;
2141                 ret = ANEG_TIMER_ENAB;
2142                 break;
2143
2144         case ANEG_STATE_COMPLETE_ACK:
2145                 if (ap->ability_match != 0 &&
2146                     ap->rxconfig == 0) {
2147                         ap->state = ANEG_STATE_AN_ENABLE;
2148                         break;
2149                 }
2150                 delta = ap->cur_time - ap->link_time;
2151                 if (delta > ANEG_STATE_SETTLE_TIME) {
2152                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2153                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2154                         } else {
2155                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2156                                     !(ap->flags & MR_NP_RX)) {
2157                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2158                                 } else {
2159                                         ret = ANEG_FAILED;
2160                                 }
2161                         }
2162                 }
2163                 break;
2164
2165         case ANEG_STATE_IDLE_DETECT_INIT:
2166                 ap->link_time = ap->cur_time;
2167                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2168                 tw32_f(MAC_MODE, tp->mac_mode);
2169                 udelay(40);
2170
2171                 ap->state = ANEG_STATE_IDLE_DETECT;
2172                 ret = ANEG_TIMER_ENAB;
2173                 break;
2174
2175         case ANEG_STATE_IDLE_DETECT:
2176                 if (ap->ability_match != 0 &&
2177                     ap->rxconfig == 0) {
2178                         ap->state = ANEG_STATE_AN_ENABLE;
2179                         break;
2180                 }
2181                 delta = ap->cur_time - ap->link_time;
2182                 if (delta > ANEG_STATE_SETTLE_TIME) {
2183                         /* XXX another gem from the Broadcom driver :( */
2184                         ap->state = ANEG_STATE_LINK_OK;
2185                 }
2186                 break;
2187
2188         case ANEG_STATE_LINK_OK:
2189                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2190                 ret = ANEG_DONE;
2191                 break;
2192
2193         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2194                 /* ??? unimplemented */
2195                 break;
2196
2197         case ANEG_STATE_NEXT_PAGE_WAIT:
2198                 /* ??? unimplemented */
2199                 break;
2200
2201         default:
2202                 ret = ANEG_FAILED;
2203                 break;
2204         };
2205
2206         return ret;
2207 }
2208
2209 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2210 {
2211         int res = 0;
2212         struct tg3_fiber_aneginfo aninfo;
2213         int status = ANEG_FAILED;
2214         unsigned int tick;
2215         u32 tmp;
2216
2217         tw32_f(MAC_TX_AUTO_NEG, 0);
2218
2219         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2220         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2221         udelay(40);
2222
2223         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2224         udelay(40);
2225
2226         memset(&aninfo, 0, sizeof(aninfo));
2227         aninfo.flags |= MR_AN_ENABLE;
2228         aninfo.state = ANEG_STATE_UNKNOWN;
2229         aninfo.cur_time = 0;
2230         tick = 0;
2231         while (++tick < 195000) {
2232                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2233                 if (status == ANEG_DONE || status == ANEG_FAILED)
2234                         break;
2235
2236                 udelay(1);
2237         }
2238
2239         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2240         tw32_f(MAC_MODE, tp->mac_mode);
2241         udelay(40);
2242
2243         *flags = aninfo.flags;
2244
2245         if (status == ANEG_DONE &&
2246             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2247                              MR_LP_ADV_FULL_DUPLEX)))
2248                 res = 1;
2249
2250         return res;
2251 }
2252
2253 static void tg3_init_bcm8002(struct tg3 *tp)
2254 {
2255         u32 mac_status = tr32(MAC_STATUS);
2256         int i;
2257
2258         /* Reset when initting first time or we have a link. */
2259         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2260             !(mac_status & MAC_STATUS_PCS_SYNCED))
2261                 return;
2262
2263         /* Set PLL lock range. */
2264         tg3_writephy(tp, 0x16, 0x8007);
2265
2266         /* SW reset */
2267         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2268
2269         /* Wait for reset to complete. */
2270         /* XXX schedule_timeout() ... */
2271         for (i = 0; i < 500; i++)
2272                 udelay(10);
2273
2274         /* Config mode; select PMA/Ch 1 regs. */
2275         tg3_writephy(tp, 0x10, 0x8411);
2276
2277         /* Enable auto-lock and comdet, select txclk for tx. */
2278         tg3_writephy(tp, 0x11, 0x0a10);
2279
2280         tg3_writephy(tp, 0x18, 0x00a0);
2281         tg3_writephy(tp, 0x16, 0x41ff);
2282
2283         /* Assert and deassert POR. */
2284         tg3_writephy(tp, 0x13, 0x0400);
2285         udelay(40);
2286         tg3_writephy(tp, 0x13, 0x0000);
2287
2288         tg3_writephy(tp, 0x11, 0x0a50);
2289         udelay(40);
2290         tg3_writephy(tp, 0x11, 0x0a10);
2291
2292         /* Wait for signal to stabilize */
2293         /* XXX schedule_timeout() ... */
2294         for (i = 0; i < 15000; i++)
2295                 udelay(10);
2296
2297         /* Deselect the channel register so we can read the PHYID
2298          * later.
2299          */
2300         tg3_writephy(tp, 0x10, 0x8011);
2301 }
2302
2303 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2304 {
2305         u32 sg_dig_ctrl, sg_dig_status;
2306         u32 serdes_cfg, expected_sg_dig_ctrl;
2307         int workaround, port_a;
2308         int current_link_up;
2309
2310         serdes_cfg = 0;
2311         expected_sg_dig_ctrl = 0;
2312         workaround = 0;
2313         port_a = 1;
2314         current_link_up = 0;
2315
2316         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2317             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2318                 workaround = 1;
2319                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2320                         port_a = 0;
2321
2322                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2323                 /* preserve bits 20-23 for voltage regulator */
2324                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2325         }
2326
2327         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2328
2329         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2330                 if (sg_dig_ctrl & (1 << 31)) {
2331                         if (workaround) {
2332                                 u32 val = serdes_cfg;
2333
2334                                 if (port_a)
2335                                         val |= 0xc010000;
2336                                 else
2337                                         val |= 0x4010000;
2338                                 tw32_f(MAC_SERDES_CFG, val);
2339                         }
2340                         tw32_f(SG_DIG_CTRL, 0x01388400);
2341                 }
2342                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2343                         tg3_setup_flow_control(tp, 0, 0);
2344                         current_link_up = 1;
2345                 }
2346                 goto out;
2347         }
2348
2349         /* Want auto-negotiation.  */
2350         expected_sg_dig_ctrl = 0x81388400;
2351
2352         /* Pause capability */
2353         expected_sg_dig_ctrl |= (1 << 11);
2354
2355         /* Asymettric pause */
2356         expected_sg_dig_ctrl |= (1 << 12);
2357
2358         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2359                 if (workaround)
2360                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2361                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2362                 udelay(5);
2363                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2364
2365                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2366         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2367                                  MAC_STATUS_SIGNAL_DET)) {
2368                 int i;
2369
2370                 /* Giver time to negotiate (~200ms) */
2371                 for (i = 0; i < 40000; i++) {
2372                         sg_dig_status = tr32(SG_DIG_STATUS);
2373                         if (sg_dig_status & (0x3))
2374                                 break;
2375                         udelay(5);
2376                 }
2377                 mac_status = tr32(MAC_STATUS);
2378
2379                 if ((sg_dig_status & (1 << 1)) &&
2380                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2381                         u32 local_adv, remote_adv;
2382
2383                         local_adv = ADVERTISE_PAUSE_CAP;
2384                         remote_adv = 0;
2385                         if (sg_dig_status & (1 << 19))
2386                                 remote_adv |= LPA_PAUSE_CAP;
2387                         if (sg_dig_status & (1 << 20))
2388                                 remote_adv |= LPA_PAUSE_ASYM;
2389
2390                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2391                         current_link_up = 1;
2392                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2393                 } else if (!(sg_dig_status & (1 << 1))) {
2394                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2395                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2396                         else {
2397                                 if (workaround) {
2398                                         u32 val = serdes_cfg;
2399
2400                                         if (port_a)
2401                                                 val |= 0xc010000;
2402                                         else
2403                                                 val |= 0x4010000;
2404
2405                                         tw32_f(MAC_SERDES_CFG, val);
2406                                 }
2407
2408                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2409                                 udelay(40);
2410
2411                                 /* Link parallel detection - link is up */
2412                                 /* only if we have PCS_SYNC and not */
2413                                 /* receiving config code words */
2414                                 mac_status = tr32(MAC_STATUS);
2415                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2416                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2417                                         tg3_setup_flow_control(tp, 0, 0);
2418                                         current_link_up = 1;
2419                                 }
2420                         }
2421                 }
2422         }
2423
2424 out:
2425         return current_link_up;
2426 }
2427
2428 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2429 {
2430         int current_link_up = 0;
2431
2432         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2433                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2434                 goto out;
2435         }
2436
2437         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2438                 u32 flags;
2439                 int i;
2440   
2441                 if (fiber_autoneg(tp, &flags)) {
2442                         u32 local_adv, remote_adv;
2443
2444                         local_adv = ADVERTISE_PAUSE_CAP;
2445                         remote_adv = 0;
2446                         if (flags & MR_LP_ADV_SYM_PAUSE)
2447                                 remote_adv |= LPA_PAUSE_CAP;
2448                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2449                                 remote_adv |= LPA_PAUSE_ASYM;
2450
2451                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2452
2453                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2454                         current_link_up = 1;
2455                 }
2456                 for (i = 0; i < 30; i++) {
2457                         udelay(20);
2458                         tw32_f(MAC_STATUS,
2459                                (MAC_STATUS_SYNC_CHANGED |
2460                                 MAC_STATUS_CFG_CHANGED));
2461                         udelay(40);
2462                         if ((tr32(MAC_STATUS) &
2463                              (MAC_STATUS_SYNC_CHANGED |
2464                               MAC_STATUS_CFG_CHANGED)) == 0)
2465                                 break;
2466                 }
2467
2468                 mac_status = tr32(MAC_STATUS);
2469                 if (current_link_up == 0 &&
2470                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2471                     !(mac_status & MAC_STATUS_RCVD_CFG))
2472                         current_link_up = 1;
2473         } else {
2474                 /* Forcing 1000FD link up. */
2475                 current_link_up = 1;
2476                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2477
2478                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2479                 udelay(40);
2480         }
2481
2482 out:
2483         return current_link_up;
2484 }
2485
2486 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2487 {
2488         u32 orig_pause_cfg;
2489         u16 orig_active_speed;
2490         u8 orig_active_duplex;
2491         u32 mac_status;
2492         int current_link_up;
2493         int i;
2494
2495         orig_pause_cfg =
2496                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2497                                   TG3_FLAG_TX_PAUSE));
2498         orig_active_speed = tp->link_config.active_speed;
2499         orig_active_duplex = tp->link_config.active_duplex;
2500
2501         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2502             netif_carrier_ok(tp->dev) &&
2503             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2504                 mac_status = tr32(MAC_STATUS);
2505                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2506                                MAC_STATUS_SIGNAL_DET |
2507                                MAC_STATUS_CFG_CHANGED |
2508                                MAC_STATUS_RCVD_CFG);
2509                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2510                                    MAC_STATUS_SIGNAL_DET)) {
2511                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2512                                             MAC_STATUS_CFG_CHANGED));
2513                         return 0;
2514                 }
2515         }
2516
2517         tw32_f(MAC_TX_AUTO_NEG, 0);
2518
2519         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2520         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2521         tw32_f(MAC_MODE, tp->mac_mode);
2522         udelay(40);
2523
2524         if (tp->phy_id == PHY_ID_BCM8002)
2525                 tg3_init_bcm8002(tp);
2526
2527         /* Enable link change event even when serdes polling.  */
2528         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2529         udelay(40);
2530
2531         current_link_up = 0;
2532         mac_status = tr32(MAC_STATUS);
2533
2534         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2535                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2536         else
2537                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2538
2539         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2540         tw32_f(MAC_MODE, tp->mac_mode);
2541         udelay(40);
2542
2543         tp->hw_status->status =
2544                 (SD_STATUS_UPDATED |
2545                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2546
2547         for (i = 0; i < 100; i++) {
2548                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2549                                     MAC_STATUS_CFG_CHANGED));
2550                 udelay(5);
2551                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2552                                          MAC_STATUS_CFG_CHANGED)) == 0)
2553                         break;
2554         }
2555
2556         mac_status = tr32(MAC_STATUS);
2557         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2558                 current_link_up = 0;
2559                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2560                         tw32_f(MAC_MODE, (tp->mac_mode |
2561                                           MAC_MODE_SEND_CONFIGS));
2562                         udelay(1);
2563                         tw32_f(MAC_MODE, tp->mac_mode);
2564                 }
2565         }
2566
2567         if (current_link_up == 1) {
2568                 tp->link_config.active_speed = SPEED_1000;
2569                 tp->link_config.active_duplex = DUPLEX_FULL;
2570                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2571                                     LED_CTRL_LNKLED_OVERRIDE |
2572                                     LED_CTRL_1000MBPS_ON));
2573         } else {
2574                 tp->link_config.active_speed = SPEED_INVALID;
2575                 tp->link_config.active_duplex = DUPLEX_INVALID;
2576                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2577                                     LED_CTRL_LNKLED_OVERRIDE |
2578                                     LED_CTRL_TRAFFIC_OVERRIDE));
2579         }
2580
2581         if (current_link_up != netif_carrier_ok(tp->dev)) {
2582                 if (current_link_up)
2583                         netif_carrier_on(tp->dev);
2584                 else
2585                         netif_carrier_off(tp->dev);
2586                 tg3_link_report(tp);
2587         } else {
2588                 u32 now_pause_cfg =
2589                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2590                                          TG3_FLAG_TX_PAUSE);
2591                 if (orig_pause_cfg != now_pause_cfg ||
2592                     orig_active_speed != tp->link_config.active_speed ||
2593                     orig_active_duplex != tp->link_config.active_duplex)
2594                         tg3_link_report(tp);
2595         }
2596
2597         return 0;
2598 }
2599
2600 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2601 {
2602         int current_link_up, err = 0;
2603         u32 bmsr, bmcr;
2604         u16 current_speed;
2605         u8 current_duplex;
2606
2607         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2608         tw32_f(MAC_MODE, tp->mac_mode);
2609         udelay(40);
2610
2611         tw32(MAC_EVENT, 0);
2612
2613         tw32_f(MAC_STATUS,
2614              (MAC_STATUS_SYNC_CHANGED |
2615               MAC_STATUS_CFG_CHANGED |
2616               MAC_STATUS_MI_COMPLETION |
2617               MAC_STATUS_LNKSTATE_CHANGED));
2618         udelay(40);
2619
2620         if (force_reset)
2621                 tg3_phy_reset(tp);
2622
2623         current_link_up = 0;
2624         current_speed = SPEED_INVALID;
2625         current_duplex = DUPLEX_INVALID;
2626
2627         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2628         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2629
2630         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2631
2632         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2633             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2634                 /* do nothing, just check for link up at the end */
2635         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2636                 u32 adv, new_adv;
2637
2638                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2639                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2640                                   ADVERTISE_1000XPAUSE |
2641                                   ADVERTISE_1000XPSE_ASYM |
2642                                   ADVERTISE_SLCT);
2643
2644                 /* Always advertise symmetric PAUSE just like copper */
2645                 new_adv |= ADVERTISE_1000XPAUSE;
2646
2647                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2648                         new_adv |= ADVERTISE_1000XHALF;
2649                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2650                         new_adv |= ADVERTISE_1000XFULL;
2651
2652                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2653                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2654                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2655                         tg3_writephy(tp, MII_BMCR, bmcr);
2656
2657                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2658                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2659                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2660
2661                         return err;
2662                 }
2663         } else {
2664                 u32 new_bmcr;
2665
2666                 bmcr &= ~BMCR_SPEED1000;
2667                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2668
2669                 if (tp->link_config.duplex == DUPLEX_FULL)
2670                         new_bmcr |= BMCR_FULLDPLX;
2671
2672                 if (new_bmcr != bmcr) {
2673                         /* BMCR_SPEED1000 is a reserved bit that needs
2674                          * to be set on write.
2675                          */
2676                         new_bmcr |= BMCR_SPEED1000;
2677
2678                         /* Force a linkdown */
2679                         if (netif_carrier_ok(tp->dev)) {
2680                                 u32 adv;
2681
2682                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2683                                 adv &= ~(ADVERTISE_1000XFULL |
2684                                          ADVERTISE_1000XHALF |
2685                                          ADVERTISE_SLCT);
2686                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2687                                 tg3_writephy(tp, MII_BMCR, bmcr |
2688                                                            BMCR_ANRESTART |
2689                                                            BMCR_ANENABLE);
2690                                 udelay(10);
2691                                 netif_carrier_off(tp->dev);
2692                         }
2693                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2694                         bmcr = new_bmcr;
2695                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2696                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2697                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2698                 }
2699         }
2700
2701         if (bmsr & BMSR_LSTATUS) {
2702                 current_speed = SPEED_1000;
2703                 current_link_up = 1;
2704                 if (bmcr & BMCR_FULLDPLX)
2705                         current_duplex = DUPLEX_FULL;
2706                 else
2707                         current_duplex = DUPLEX_HALF;
2708
2709                 if (bmcr & BMCR_ANENABLE) {
2710                         u32 local_adv, remote_adv, common;
2711
2712                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2713                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2714                         common = local_adv & remote_adv;
2715                         if (common & (ADVERTISE_1000XHALF |
2716                                       ADVERTISE_1000XFULL)) {
2717                                 if (common & ADVERTISE_1000XFULL)
2718                                         current_duplex = DUPLEX_FULL;
2719                                 else
2720                                         current_duplex = DUPLEX_HALF;
2721
2722                                 tg3_setup_flow_control(tp, local_adv,
2723                                                        remote_adv);
2724                         }
2725                         else
2726                                 current_link_up = 0;
2727                 }
2728         }
2729
2730         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2731         if (tp->link_config.active_duplex == DUPLEX_HALF)
2732                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2733
2734         tw32_f(MAC_MODE, tp->mac_mode);
2735         udelay(40);
2736
2737         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2738
2739         tp->link_config.active_speed = current_speed;
2740         tp->link_config.active_duplex = current_duplex;
2741
2742         if (current_link_up != netif_carrier_ok(tp->dev)) {
2743                 if (current_link_up)
2744                         netif_carrier_on(tp->dev);
2745                 else {
2746                         netif_carrier_off(tp->dev);
2747                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2748                 }
2749                 tg3_link_report(tp);
2750         }
2751         return err;
2752 }
2753
2754 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2755 {
2756         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2757                 /* Give autoneg time to complete. */
2758                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2759                 return;
2760         }
2761         if (!netif_carrier_ok(tp->dev) &&
2762             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2763                 u32 bmcr;
2764
2765                 tg3_readphy(tp, MII_BMCR, &bmcr);
2766                 if (bmcr & BMCR_ANENABLE) {
2767                         u32 phy1, phy2;
2768
2769                         /* Select shadow register 0x1f */
2770                         tg3_writephy(tp, 0x1c, 0x7c00);
2771                         tg3_readphy(tp, 0x1c, &phy1);
2772
2773                         /* Select expansion interrupt status register */
2774                         tg3_writephy(tp, 0x17, 0x0f01);
2775                         tg3_readphy(tp, 0x15, &phy2);
2776                         tg3_readphy(tp, 0x15, &phy2);
2777
2778                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2779                                 /* We have signal detect and not receiving
2780                                  * config code words, link is up by parallel
2781                                  * detection.
2782                                  */
2783
2784                                 bmcr &= ~BMCR_ANENABLE;
2785                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2786                                 tg3_writephy(tp, MII_BMCR, bmcr);
2787                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2788                         }
2789                 }
2790         }
2791         else if (netif_carrier_ok(tp->dev) &&
2792                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2793                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2794                 u32 phy2;
2795
2796                 /* Select expansion interrupt status register */
2797                 tg3_writephy(tp, 0x17, 0x0f01);
2798                 tg3_readphy(tp, 0x15, &phy2);
2799                 if (phy2 & 0x20) {
2800                         u32 bmcr;
2801
2802                         /* Config code words received, turn on autoneg. */
2803                         tg3_readphy(tp, MII_BMCR, &bmcr);
2804                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2805
2806                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2807
2808                 }
2809         }
2810 }
2811
2812 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2813 {
2814         int err;
2815
2816         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2817                 err = tg3_setup_fiber_phy(tp, force_reset);
2818         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2819                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2820         } else {
2821                 err = tg3_setup_copper_phy(tp, force_reset);
2822         }
2823
2824         if (tp->link_config.active_speed == SPEED_1000 &&
2825             tp->link_config.active_duplex == DUPLEX_HALF)
2826                 tw32(MAC_TX_LENGTHS,
2827                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2828                       (6 << TX_LENGTHS_IPG_SHIFT) |
2829                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2830         else
2831                 tw32(MAC_TX_LENGTHS,
2832                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2833                       (6 << TX_LENGTHS_IPG_SHIFT) |
2834                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2835
2836         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2837                 if (netif_carrier_ok(tp->dev)) {
2838                         tw32(HOSTCC_STAT_COAL_TICKS,
2839                              tp->coal.stats_block_coalesce_usecs);
2840                 } else {
2841                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2842                 }
2843         }
2844
2845         return err;
2846 }
2847
2848 /* Tigon3 never reports partial packet sends.  So we do not
2849  * need special logic to handle SKBs that have not had all
2850  * of their frags sent yet, like SunGEM does.
2851  */
2852 static void tg3_tx(struct tg3 *tp)
2853 {
2854         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2855         u32 sw_idx = tp->tx_cons;
2856
2857         while (sw_idx != hw_idx) {
2858                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2859                 struct sk_buff *skb = ri->skb;
2860                 int i;
2861
2862                 if (unlikely(skb == NULL))
2863                         BUG();
2864
2865                 pci_unmap_single(tp->pdev,
2866                                  pci_unmap_addr(ri, mapping),
2867                                  skb_headlen(skb),
2868                                  PCI_DMA_TODEVICE);
2869
2870                 ri->skb = NULL;
2871
2872                 sw_idx = NEXT_TX(sw_idx);
2873
2874                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2875                         if (unlikely(sw_idx == hw_idx))
2876                                 BUG();
2877
2878                         ri = &tp->tx_buffers[sw_idx];
2879                         if (unlikely(ri->skb != NULL))
2880                                 BUG();
2881
2882                         pci_unmap_page(tp->pdev,
2883                                        pci_unmap_addr(ri, mapping),
2884                                        skb_shinfo(skb)->frags[i].size,
2885                                        PCI_DMA_TODEVICE);
2886
2887                         sw_idx = NEXT_TX(sw_idx);
2888                 }
2889
2890                 dev_kfree_skb(skb);
2891         }
2892
2893         tp->tx_cons = sw_idx;
2894
2895         if (unlikely(netif_queue_stopped(tp->dev))) {
2896                 spin_lock(&tp->tx_lock);
2897                 if (netif_queue_stopped(tp->dev) &&
2898                     (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2899                         netif_wake_queue(tp->dev);
2900                 spin_unlock(&tp->tx_lock);
2901         }
2902 }
2903
2904 /* Returns size of skb allocated or < 0 on error.
2905  *
2906  * We only need to fill in the address because the other members
2907  * of the RX descriptor are invariant, see tg3_init_rings.
2908  *
2909  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2910  * posting buffers we only dirty the first cache line of the RX
2911  * descriptor (containing the address).  Whereas for the RX status
2912  * buffers the cpu only reads the last cacheline of the RX descriptor
2913  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2914  */
2915 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2916                             int src_idx, u32 dest_idx_unmasked)
2917 {
2918         struct tg3_rx_buffer_desc *desc;
2919         struct ring_info *map, *src_map;
2920         struct sk_buff *skb;
2921         dma_addr_t mapping;
2922         int skb_size, dest_idx;
2923
2924         src_map = NULL;
2925         switch (opaque_key) {
2926         case RXD_OPAQUE_RING_STD:
2927                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2928                 desc = &tp->rx_std[dest_idx];
2929                 map = &tp->rx_std_buffers[dest_idx];
2930                 if (src_idx >= 0)
2931                         src_map = &tp->rx_std_buffers[src_idx];
2932                 skb_size = tp->rx_pkt_buf_sz;
2933                 break;
2934
2935         case RXD_OPAQUE_RING_JUMBO:
2936                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2937                 desc = &tp->rx_jumbo[dest_idx];
2938                 map = &tp->rx_jumbo_buffers[dest_idx];
2939                 if (src_idx >= 0)
2940                         src_map = &tp->rx_jumbo_buffers[src_idx];
2941                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2942                 break;
2943
2944         default:
2945                 return -EINVAL;
2946         };
2947
2948         /* Do not overwrite any of the map or rp information
2949          * until we are sure we can commit to a new buffer.
2950          *
2951          * Callers depend upon this behavior and assume that
2952          * we leave everything unchanged if we fail.
2953          */
2954         skb = dev_alloc_skb(skb_size);
2955         if (skb == NULL)
2956                 return -ENOMEM;
2957
2958         skb->dev = tp->dev;
2959         skb_reserve(skb, tp->rx_offset);
2960
2961         mapping = pci_map_single(tp->pdev, skb->data,
2962                                  skb_size - tp->rx_offset,
2963                                  PCI_DMA_FROMDEVICE);
2964
2965         map->skb = skb;
2966         pci_unmap_addr_set(map, mapping, mapping);
2967
2968         if (src_map != NULL)
2969                 src_map->skb = NULL;
2970
2971         desc->addr_hi = ((u64)mapping >> 32);
2972         desc->addr_lo = ((u64)mapping & 0xffffffff);
2973
2974         return skb_size;
2975 }
2976
2977 /* We only need to move over in the address because the other
2978  * members of the RX descriptor are invariant.  See notes above
2979  * tg3_alloc_rx_skb for full details.
2980  */
2981 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2982                            int src_idx, u32 dest_idx_unmasked)
2983 {
2984         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2985         struct ring_info *src_map, *dest_map;
2986         int dest_idx;
2987
2988         switch (opaque_key) {
2989         case RXD_OPAQUE_RING_STD:
2990                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2991                 dest_desc = &tp->rx_std[dest_idx];
2992                 dest_map = &tp->rx_std_buffers[dest_idx];
2993                 src_desc = &tp->rx_std[src_idx];
2994                 src_map = &tp->rx_std_buffers[src_idx];
2995                 break;
2996
2997         case RXD_OPAQUE_RING_JUMBO:
2998                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2999                 dest_desc = &tp->rx_jumbo[dest_idx];
3000                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3001                 src_desc = &tp->rx_jumbo[src_idx];
3002                 src_map = &tp->rx_jumbo_buffers[src_idx];
3003                 break;
3004
3005         default:
3006                 return;
3007         };
3008
3009         dest_map->skb = src_map->skb;
3010         pci_unmap_addr_set(dest_map, mapping,
3011                            pci_unmap_addr(src_map, mapping));
3012         dest_desc->addr_hi = src_desc->addr_hi;
3013         dest_desc->addr_lo = src_desc->addr_lo;
3014
3015         src_map->skb = NULL;
3016 }
3017
3018 #if TG3_VLAN_TAG_USED
3019 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3020 {
3021         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3022 }
3023 #endif
3024
3025 /* The RX ring scheme is composed of multiple rings which post fresh
3026  * buffers to the chip, and one special ring the chip uses to report
3027  * status back to the host.
3028  *
3029  * The special ring reports the status of received packets to the
3030  * host.  The chip does not write into the original descriptor the
3031  * RX buffer was obtained from.  The chip simply takes the original
3032  * descriptor as provided by the host, updates the status and length
3033  * field, then writes this into the next status ring entry.
3034  *
3035  * Each ring the host uses to post buffers to the chip is described
3036  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3037  * it is first placed into the on-chip ram.  When the packet's length
3038  * is known, it walks down the TG3_BDINFO entries to select the ring.
3039  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3040  * which is within the range of the new packet's length is chosen.
3041  *
3042  * The "separate ring for rx status" scheme may sound queer, but it makes
3043  * sense from a cache coherency perspective.  If only the host writes
3044  * to the buffer post rings, and only the chip writes to the rx status
3045  * rings, then cache lines never move beyond shared-modified state.
3046  * If both the host and chip were to write into the same ring, cache line
3047  * eviction could occur since both entities want it in an exclusive state.
3048  */
3049 static int tg3_rx(struct tg3 *tp, int budget)
3050 {
3051         u32 work_mask;
3052         u32 sw_idx = tp->rx_rcb_ptr;
3053         u16 hw_idx;
3054         int received;
3055
3056         hw_idx = tp->hw_status->idx[0].rx_producer;
3057         /*
3058          * We need to order the read of hw_idx and the read of
3059          * the opaque cookie.
3060          */
3061         rmb();
3062         work_mask = 0;
3063         received = 0;
3064         while (sw_idx != hw_idx && budget > 0) {
3065                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3066                 unsigned int len;
3067                 struct sk_buff *skb;
3068                 dma_addr_t dma_addr;
3069                 u32 opaque_key, desc_idx, *post_ptr;
3070
3071                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3072                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3073                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3074                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3075                                                   mapping);
3076                         skb = tp->rx_std_buffers[desc_idx].skb;
3077                         post_ptr = &tp->rx_std_ptr;
3078                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3079                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3080                                                   mapping);
3081                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3082                         post_ptr = &tp->rx_jumbo_ptr;
3083                 }
3084                 else {
3085                         goto next_pkt_nopost;
3086                 }
3087
3088                 work_mask |= opaque_key;
3089
3090                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3091                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3092                 drop_it:
3093                         tg3_recycle_rx(tp, opaque_key,
3094                                        desc_idx, *post_ptr);
3095                 drop_it_no_recycle:
3096                         /* Other statistics kept track of by card. */
3097                         tp->net_stats.rx_dropped++;
3098                         goto next_pkt;
3099                 }
3100
3101                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3102
3103                 if (len > RX_COPY_THRESHOLD 
3104                         && tp->rx_offset == 2
3105                         /* rx_offset != 2 iff this is a 5701 card running
3106                          * in PCI-X mode [see tg3_get_invariants()] */
3107                 ) {
3108                         int skb_size;
3109
3110                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3111                                                     desc_idx, *post_ptr);
3112                         if (skb_size < 0)
3113                                 goto drop_it;
3114
3115                         pci_unmap_single(tp->pdev, dma_addr,
3116                                          skb_size - tp->rx_offset,
3117                                          PCI_DMA_FROMDEVICE);
3118
3119                         skb_put(skb, len);
3120                 } else {
3121                         struct sk_buff *copy_skb;
3122
3123                         tg3_recycle_rx(tp, opaque_key,
3124                                        desc_idx, *post_ptr);
3125
3126                         copy_skb = dev_alloc_skb(len + 2);
3127                         if (copy_skb == NULL)
3128                                 goto drop_it_no_recycle;
3129
3130                         copy_skb->dev = tp->dev;
3131                         skb_reserve(copy_skb, 2);
3132                         skb_put(copy_skb, len);
3133                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3134                         memcpy(copy_skb->data, skb->data, len);
3135                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3136
3137                         /* We'll reuse the original ring buffer. */
3138                         skb = copy_skb;
3139                 }
3140
3141                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3142                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3143                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3144                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3145                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3146                 else
3147                         skb->ip_summed = CHECKSUM_NONE;
3148
3149                 skb->protocol = eth_type_trans(skb, tp->dev);
3150 #if TG3_VLAN_TAG_USED
3151                 if (tp->vlgrp != NULL &&
3152                     desc->type_flags & RXD_FLAG_VLAN) {
3153                         tg3_vlan_rx(tp, skb,
3154                                     desc->err_vlan & RXD_VLAN_MASK);
3155                 } else
3156 #endif
3157                         netif_receive_skb(skb);
3158
3159                 tp->dev->last_rx = jiffies;
3160                 received++;
3161                 budget--;
3162
3163 next_pkt:
3164                 (*post_ptr)++;
3165 next_pkt_nopost:
3166                 sw_idx++;
3167                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3168
3169                 /* Refresh hw_idx to see if there is new work */
3170                 if (sw_idx == hw_idx) {
3171                         hw_idx = tp->hw_status->idx[0].rx_producer;
3172                         rmb();
3173                 }
3174         }
3175
3176         /* ACK the status ring. */
3177         tp->rx_rcb_ptr = sw_idx;
3178         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3179
3180         /* Refill RX ring(s). */
3181         if (work_mask & RXD_OPAQUE_RING_STD) {
3182                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3183                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3184                              sw_idx);
3185         }
3186         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3187                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3188                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3189                              sw_idx);
3190         }
3191         mmiowb();
3192
3193         return received;
3194 }
3195
3196 static int tg3_poll(struct net_device *netdev, int *budget)
3197 {
3198         struct tg3 *tp = netdev_priv(netdev);
3199         struct tg3_hw_status *sblk = tp->hw_status;
3200         int done;
3201
3202         /* handle link change and other phy events */
3203         if (!(tp->tg3_flags &
3204               (TG3_FLAG_USE_LINKCHG_REG |
3205                TG3_FLAG_POLL_SERDES))) {
3206                 if (sblk->status & SD_STATUS_LINK_CHG) {
3207                         sblk->status = SD_STATUS_UPDATED |
3208                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3209                         spin_lock(&tp->lock);
3210                         tg3_setup_phy(tp, 0);
3211                         spin_unlock(&tp->lock);
3212                 }
3213         }
3214
3215         /* run TX completion thread */
3216         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3217                 tg3_tx(tp);
3218         }
3219
3220         /* run RX thread, within the bounds set by NAPI.
3221          * All RX "locking" is done by ensuring outside
3222          * code synchronizes with dev->poll()
3223          */
3224         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3225                 int orig_budget = *budget;
3226                 int work_done;
3227
3228                 if (orig_budget > netdev->quota)
3229                         orig_budget = netdev->quota;
3230
3231                 work_done = tg3_rx(tp, orig_budget);
3232
3233                 *budget -= work_done;
3234                 netdev->quota -= work_done;
3235         }
3236
3237         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3238                 tp->last_tag = sblk->status_tag;
3239                 rmb();
3240         } else
3241                 sblk->status &= ~SD_STATUS_UPDATED;
3242
3243         /* if no more work, tell net stack and NIC we're done */
3244         done = !tg3_has_work(tp);
3245         if (done) {
3246                 netif_rx_complete(netdev);
3247                 tg3_restart_ints(tp);
3248         }
3249
3250         return (done ? 0 : 1);
3251 }
3252
3253 static void tg3_irq_quiesce(struct tg3 *tp)
3254 {
3255         BUG_ON(tp->irq_sync);
3256
3257         tp->irq_sync = 1;
3258         smp_mb();
3259
3260         synchronize_irq(tp->pdev->irq);
3261 }
3262
3263 static inline int tg3_irq_sync(struct tg3 *tp)
3264 {
3265         return tp->irq_sync;
3266 }
3267
3268 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3269  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3270  * with as well.  Most of the time, this is not necessary except when
3271  * shutting down the device.
3272  */
3273 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3274 {
3275         if (irq_sync)
3276                 tg3_irq_quiesce(tp);
3277         spin_lock_bh(&tp->lock);
3278         spin_lock(&tp->tx_lock);
3279 }
3280
3281 static inline void tg3_full_unlock(struct tg3 *tp)
3282 {
3283         spin_unlock(&tp->tx_lock);
3284         spin_unlock_bh(&tp->lock);
3285 }
3286
3287 /* MSI ISR - No need to check for interrupt sharing and no need to
3288  * flush status block and interrupt mailbox. PCI ordering rules
3289  * guarantee that MSI will arrive after the status block.
3290  */
3291 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3292 {
3293         struct net_device *dev = dev_id;
3294         struct tg3 *tp = netdev_priv(dev);
3295
3296         prefetch(tp->hw_status);
3297         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3298         /*
3299          * Writing any value to intr-mbox-0 clears PCI INTA# and
3300          * chip-internal interrupt pending events.
3301          * Writing non-zero to intr-mbox-0 additional tells the
3302          * NIC to stop sending us irqs, engaging "in-intr-handler"
3303          * event coalescing.
3304          */
3305         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3306         if (likely(!tg3_irq_sync(tp)))
3307                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3308
3309         return IRQ_RETVAL(1);
3310 }
3311
3312 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3313 {
3314         struct net_device *dev = dev_id;
3315         struct tg3 *tp = netdev_priv(dev);
3316         struct tg3_hw_status *sblk = tp->hw_status;
3317         unsigned int handled = 1;
3318
3319         /* In INTx mode, it is possible for the interrupt to arrive at
3320          * the CPU before the status block posted prior to the interrupt.
3321          * Reading the PCI State register will confirm whether the
3322          * interrupt is ours and will flush the status block.
3323          */
3324         if ((sblk->status & SD_STATUS_UPDATED) ||
3325             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3326                 /*
3327                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3328                  * chip-internal interrupt pending events.
3329                  * Writing non-zero to intr-mbox-0 additional tells the
3330                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3331                  * event coalescing.
3332                  */
3333                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3334                              0x00000001);
3335                 if (tg3_irq_sync(tp))
3336                         goto out;
3337                 sblk->status &= ~SD_STATUS_UPDATED;
3338                 if (likely(tg3_has_work(tp))) {
3339                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3340                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3341                 } else {
3342                         /* No work, shared interrupt perhaps?  re-enable
3343                          * interrupts, and flush that PCI write
3344                          */
3345                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3346                                 0x00000000);
3347                 }
3348         } else {        /* shared interrupt */
3349                 handled = 0;
3350         }
3351 out:
3352         return IRQ_RETVAL(handled);
3353 }
3354
3355 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3356 {
3357         struct net_device *dev = dev_id;
3358         struct tg3 *tp = netdev_priv(dev);
3359         struct tg3_hw_status *sblk = tp->hw_status;
3360         unsigned int handled = 1;
3361
3362         /* In INTx mode, it is possible for the interrupt to arrive at
3363          * the CPU before the status block posted prior to the interrupt.
3364          * Reading the PCI State register will confirm whether the
3365          * interrupt is ours and will flush the status block.
3366          */
3367         if ((sblk->status_tag != tp->last_tag) ||
3368             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3369                 /*
3370                  * writing any value to intr-mbox-0 clears PCI INTA# and
3371                  * chip-internal interrupt pending events.
3372                  * writing non-zero to intr-mbox-0 additional tells the
3373                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3374                  * event coalescing.
3375                  */
3376                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3377                              0x00000001);
3378                 if (tg3_irq_sync(tp))
3379                         goto out;
3380                 if (netif_rx_schedule_prep(dev)) {
3381                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3382                         /* Update last_tag to mark that this status has been
3383                          * seen. Because interrupt may be shared, we may be
3384                          * racing with tg3_poll(), so only update last_tag
3385                          * if tg3_poll() is not scheduled.
3386                          */
3387                         tp->last_tag = sblk->status_tag;
3388                         __netif_rx_schedule(dev);
3389                 }
3390         } else {        /* shared interrupt */
3391                 handled = 0;
3392         }
3393 out:
3394         return IRQ_RETVAL(handled);
3395 }
3396
3397 /* ISR for interrupt test */
3398 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3399                 struct pt_regs *regs)
3400 {
3401         struct net_device *dev = dev_id;
3402         struct tg3 *tp = netdev_priv(dev);
3403         struct tg3_hw_status *sblk = tp->hw_status;
3404
3405         if ((sblk->status & SD_STATUS_UPDATED) ||
3406             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3407                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3408                              0x00000001);
3409                 return IRQ_RETVAL(1);
3410         }
3411         return IRQ_RETVAL(0);
3412 }
3413
3414 static int tg3_init_hw(struct tg3 *);
3415 static int tg3_halt(struct tg3 *, int, int);
3416
3417 #ifdef CONFIG_NET_POLL_CONTROLLER
3418 static void tg3_poll_controller(struct net_device *dev)
3419 {
3420         struct tg3 *tp = netdev_priv(dev);
3421
3422         tg3_interrupt(tp->pdev->irq, dev, NULL);
3423 }
3424 #endif
3425
3426 static void tg3_reset_task(void *_data)
3427 {
3428         struct tg3 *tp = _data;
3429         unsigned int restart_timer;
3430
3431         tg3_netif_stop(tp);
3432
3433         tg3_full_lock(tp, 1);
3434
3435         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3436         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3437
3438         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3439         tg3_init_hw(tp);
3440
3441         tg3_netif_start(tp);
3442
3443         tg3_full_unlock(tp);
3444
3445         if (restart_timer)
3446                 mod_timer(&tp->timer, jiffies + 1);
3447 }
3448
3449 static void tg3_tx_timeout(struct net_device *dev)
3450 {
3451         struct tg3 *tp = netdev_priv(dev);
3452
3453         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3454                dev->name);
3455
3456         schedule_work(&tp->reset_task);
3457 }
3458
3459 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3460 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3461 {
3462         u32 base = (u32) mapping & 0xffffffff;
3463
3464         return ((base > 0xffffdcc0) &&
3465                 (base + len + 8 < base));
3466 }
3467
3468 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3469
3470 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3471                                        u32 last_plus_one, u32 *start,
3472                                        u32 base_flags, u32 mss)
3473 {
3474         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3475         dma_addr_t new_addr = 0;
3476         u32 entry = *start;
3477         int i, ret = 0;
3478
3479         if (!new_skb) {
3480                 ret = -1;
3481         } else {
3482                 /* New SKB is guaranteed to be linear. */
3483                 entry = *start;
3484                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3485                                           PCI_DMA_TODEVICE);
3486                 /* Make sure new skb does not cross any 4G boundaries.
3487                  * Drop the packet if it does.
3488                  */
3489                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3490                         ret = -1;
3491                         dev_kfree_skb(new_skb);
3492                         new_skb = NULL;
3493                 } else {
3494                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3495                                     base_flags, 1 | (mss << 1));
3496                         *start = NEXT_TX(entry);
3497                 }
3498         }
3499
3500         /* Now clean up the sw ring entries. */
3501         i = 0;
3502         while (entry != last_plus_one) {
3503                 int len;
3504
3505                 if (i == 0)
3506                         len = skb_headlen(skb);
3507                 else
3508                         len = skb_shinfo(skb)->frags[i-1].size;
3509                 pci_unmap_single(tp->pdev,
3510                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3511                                  len, PCI_DMA_TODEVICE);
3512                 if (i == 0) {
3513                         tp->tx_buffers[entry].skb = new_skb;
3514                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3515                 } else {
3516                         tp->tx_buffers[entry].skb = NULL;
3517                 }
3518                 entry = NEXT_TX(entry);
3519                 i++;
3520         }
3521
3522         dev_kfree_skb(skb);
3523
3524         return ret;
3525 }
3526
3527 static void tg3_set_txd(struct tg3 *tp, int entry,
3528                         dma_addr_t mapping, int len, u32 flags,
3529                         u32 mss_and_is_end)
3530 {
3531         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3532         int is_end = (mss_and_is_end & 0x1);
3533         u32 mss = (mss_and_is_end >> 1);
3534         u32 vlan_tag = 0;
3535
3536         if (is_end)
3537                 flags |= TXD_FLAG_END;
3538         if (flags & TXD_FLAG_VLAN) {
3539                 vlan_tag = flags >> 16;
3540                 flags &= 0xffff;
3541         }
3542         vlan_tag |= (mss << TXD_MSS_SHIFT);
3543
3544         txd->addr_hi = ((u64) mapping >> 32);
3545         txd->addr_lo = ((u64) mapping & 0xffffffff);
3546         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3547         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3548 }
3549
3550 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3551 {
3552         struct tg3 *tp = netdev_priv(dev);
3553         dma_addr_t mapping;
3554         u32 len, entry, base_flags, mss;
3555         int would_hit_hwbug;
3556
3557         len = skb_headlen(skb);
3558
3559         /* No BH disabling for tx_lock here.  We are running in BH disabled
3560          * context and TX reclaim runs via tp->poll inside of a software
3561          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3562          * no IRQ context deadlocks to worry about either.  Rejoice!
3563          */
3564         if (!spin_trylock(&tp->tx_lock))
3565                 return NETDEV_TX_LOCKED; 
3566
3567         /* This is a hard error, log it. */
3568         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3569                 netif_stop_queue(dev);
3570                 spin_unlock(&tp->tx_lock);
3571                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3572                        dev->name);
3573                 return NETDEV_TX_BUSY;
3574         }
3575
3576         entry = tp->tx_prod;
3577         base_flags = 0;
3578         if (skb->ip_summed == CHECKSUM_HW)
3579                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3580 #if TG3_TSO_SUPPORT != 0
3581         mss = 0;
3582         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3583             (mss = skb_shinfo(skb)->tso_size) != 0) {
3584                 int tcp_opt_len, ip_tcp_len;
3585
3586                 if (skb_header_cloned(skb) &&
3587                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3588                         dev_kfree_skb(skb);
3589                         goto out_unlock;
3590                 }
3591
3592                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3593                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3594
3595                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3596                                TXD_FLAG_CPU_POST_DMA);
3597
3598                 skb->nh.iph->check = 0;
3599                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3600                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3601                         skb->h.th->check = 0;
3602                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3603                 }
3604                 else {
3605                         skb->h.th->check =
3606                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3607                                                    skb->nh.iph->daddr,
3608                                                    0, IPPROTO_TCP, 0);
3609                 }
3610
3611                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3612                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3613                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3614                                 int tsflags;
3615
3616                                 tsflags = ((skb->nh.iph->ihl - 5) +
3617                                            (tcp_opt_len >> 2));
3618                                 mss |= (tsflags << 11);
3619                         }
3620                 } else {
3621                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3622                                 int tsflags;
3623
3624                                 tsflags = ((skb->nh.iph->ihl - 5) +
3625                                            (tcp_opt_len >> 2));
3626                                 base_flags |= tsflags << 12;
3627                         }
3628                 }
3629         }
3630 #else
3631         mss = 0;
3632 #endif
3633 #if TG3_VLAN_TAG_USED
3634         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3635                 base_flags |= (TXD_FLAG_VLAN |
3636                                (vlan_tx_tag_get(skb) << 16));
3637 #endif
3638
3639         /* Queue skb data, a.k.a. the main skb fragment. */
3640         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3641
3642         tp->tx_buffers[entry].skb = skb;
3643         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3644
3645         would_hit_hwbug = 0;
3646
3647         if (tg3_4g_overflow_test(mapping, len))
3648                 would_hit_hwbug = 1;
3649
3650         tg3_set_txd(tp, entry, mapping, len, base_flags,
3651                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3652
3653         entry = NEXT_TX(entry);
3654
3655         /* Now loop through additional data fragments, and queue them. */
3656         if (skb_shinfo(skb)->nr_frags > 0) {
3657                 unsigned int i, last;
3658
3659                 last = skb_shinfo(skb)->nr_frags - 1;
3660                 for (i = 0; i <= last; i++) {
3661                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3662
3663                         len = frag->size;
3664                         mapping = pci_map_page(tp->pdev,
3665                                                frag->page,
3666                                                frag->page_offset,
3667                                                len, PCI_DMA_TODEVICE);
3668
3669                         tp->tx_buffers[entry].skb = NULL;
3670                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3671
3672                         if (tg3_4g_overflow_test(mapping, len))
3673                                 would_hit_hwbug = 1;
3674
3675                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3676                                 tg3_set_txd(tp, entry, mapping, len,
3677                                             base_flags, (i == last)|(mss << 1));
3678                         else
3679                                 tg3_set_txd(tp, entry, mapping, len,
3680                                             base_flags, (i == last));
3681
3682                         entry = NEXT_TX(entry);
3683                 }
3684         }
3685
3686         if (would_hit_hwbug) {
3687                 u32 last_plus_one = entry;
3688                 u32 start;
3689
3690                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3691                 start &= (TG3_TX_RING_SIZE - 1);
3692
3693                 /* If the workaround fails due to memory/mapping
3694                  * failure, silently drop this packet.
3695                  */
3696                 if (tigon3_4gb_hwbug_workaround(tp, skb, last_plus_one,
3697                                                 &start, base_flags, mss))
3698                         goto out_unlock;
3699
3700                 entry = start;
3701         }
3702
3703         /* Packets are ready, update Tx producer idx local and on card. */
3704         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3705
3706         tp->tx_prod = entry;
3707         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3708                 netif_stop_queue(dev);
3709                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3710                         netif_wake_queue(tp->dev);
3711         }
3712
3713 out_unlock:
3714         mmiowb();
3715         spin_unlock(&tp->tx_lock);
3716
3717         dev->trans_start = jiffies;
3718
3719         return NETDEV_TX_OK;
3720 }
3721
3722 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3723                                int new_mtu)
3724 {
3725         dev->mtu = new_mtu;
3726
3727         if (new_mtu > ETH_DATA_LEN) {
3728                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
3729                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
3730                         ethtool_op_set_tso(dev, 0);
3731                 }
3732                 else
3733                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3734         } else {
3735                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
3736                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
3737                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
3738         }
3739 }
3740
3741 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3742 {
3743         struct tg3 *tp = netdev_priv(dev);
3744
3745         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3746                 return -EINVAL;
3747
3748         if (!netif_running(dev)) {
3749                 /* We'll just catch it later when the
3750                  * device is up'd.
3751                  */
3752                 tg3_set_mtu(dev, tp, new_mtu);
3753                 return 0;
3754         }
3755
3756         tg3_netif_stop(tp);
3757
3758         tg3_full_lock(tp, 1);
3759
3760         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3761
3762         tg3_set_mtu(dev, tp, new_mtu);
3763
3764         tg3_init_hw(tp);
3765
3766         tg3_netif_start(tp);
3767
3768         tg3_full_unlock(tp);
3769
3770         return 0;
3771 }
3772
3773 /* Free up pending packets in all rx/tx rings.
3774  *
3775  * The chip has been shut down and the driver detached from
3776  * the networking, so no interrupts or new tx packets will
3777  * end up in the driver.  tp->{tx,}lock is not held and we are not
3778  * in an interrupt context and thus may sleep.
3779  */
3780 static void tg3_free_rings(struct tg3 *tp)
3781 {
3782         struct ring_info *rxp;
3783         int i;
3784
3785         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3786                 rxp = &tp->rx_std_buffers[i];
3787
3788                 if (rxp->skb == NULL)
3789                         continue;
3790                 pci_unmap_single(tp->pdev,
3791                                  pci_unmap_addr(rxp, mapping),
3792                                  tp->rx_pkt_buf_sz - tp->rx_offset,
3793                                  PCI_DMA_FROMDEVICE);
3794                 dev_kfree_skb_any(rxp->skb);
3795                 rxp->skb = NULL;
3796         }
3797
3798         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3799                 rxp = &tp->rx_jumbo_buffers[i];
3800
3801                 if (rxp->skb == NULL)
3802                         continue;
3803                 pci_unmap_single(tp->pdev,
3804                                  pci_unmap_addr(rxp, mapping),
3805                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3806                                  PCI_DMA_FROMDEVICE);
3807                 dev_kfree_skb_any(rxp->skb);
3808                 rxp->skb = NULL;
3809         }
3810
3811         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3812                 struct tx_ring_info *txp;
3813                 struct sk_buff *skb;
3814                 int j;
3815
3816                 txp = &tp->tx_buffers[i];
3817                 skb = txp->skb;
3818
3819                 if (skb == NULL) {
3820                         i++;
3821                         continue;
3822                 }
3823
3824                 pci_unmap_single(tp->pdev,
3825                                  pci_unmap_addr(txp, mapping),
3826                                  skb_headlen(skb),
3827                                  PCI_DMA_TODEVICE);
3828                 txp->skb = NULL;
3829
3830                 i++;
3831
3832                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3833                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3834                         pci_unmap_page(tp->pdev,
3835                                        pci_unmap_addr(txp, mapping),
3836                                        skb_shinfo(skb)->frags[j].size,
3837                                        PCI_DMA_TODEVICE);
3838                         i++;
3839                 }
3840
3841                 dev_kfree_skb_any(skb);
3842         }
3843 }
3844
3845 /* Initialize tx/rx rings for packet processing.
3846  *
3847  * The chip has been shut down and the driver detached from
3848  * the networking, so no interrupts or new tx packets will
3849  * end up in the driver.  tp->{tx,}lock are held and thus
3850  * we may not sleep.
3851  */
3852 static void tg3_init_rings(struct tg3 *tp)
3853 {
3854         u32 i;
3855
3856         /* Free up all the SKBs. */
3857         tg3_free_rings(tp);
3858
3859         /* Zero out all descriptors. */
3860         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3861         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3862         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3863         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3864
3865         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
3866         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
3867             (tp->dev->mtu > ETH_DATA_LEN))
3868                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
3869
3870         /* Initialize invariants of the rings, we only set this
3871          * stuff once.  This works because the card does not
3872          * write into the rx buffer posting rings.
3873          */
3874         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3875                 struct tg3_rx_buffer_desc *rxd;
3876
3877                 rxd = &tp->rx_std[i];
3878                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
3879                         << RXD_LEN_SHIFT;
3880                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3881                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3882                                (i << RXD_OPAQUE_INDEX_SHIFT));
3883         }
3884
3885         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3886                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3887                         struct tg3_rx_buffer_desc *rxd;
3888
3889                         rxd = &tp->rx_jumbo[i];
3890                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3891                                 << RXD_LEN_SHIFT;
3892                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3893                                 RXD_FLAG_JUMBO;
3894                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3895                                (i << RXD_OPAQUE_INDEX_SHIFT));
3896                 }
3897         }
3898
3899         /* Now allocate fresh SKBs for each rx ring. */
3900         for (i = 0; i < tp->rx_pending; i++) {
3901                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3902                                      -1, i) < 0)
3903                         break;
3904         }
3905
3906         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3907                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3908                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3909                                              -1, i) < 0)
3910                                 break;
3911                 }
3912         }
3913 }
3914
3915 /*
3916  * Must not be invoked with interrupt sources disabled and
3917  * the hardware shutdown down.
3918  */
3919 static void tg3_free_consistent(struct tg3 *tp)
3920 {
3921         kfree(tp->rx_std_buffers);
3922         tp->rx_std_buffers = NULL;
3923         if (tp->rx_std) {
3924                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3925                                     tp->rx_std, tp->rx_std_mapping);
3926                 tp->rx_std = NULL;
3927         }
3928         if (tp->rx_jumbo) {
3929                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3930                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3931                 tp->rx_jumbo = NULL;
3932         }
3933         if (tp->rx_rcb) {
3934                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3935                                     tp->rx_rcb, tp->rx_rcb_mapping);
3936                 tp->rx_rcb = NULL;
3937         }
3938         if (tp->tx_ring) {
3939                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3940                         tp->tx_ring, tp->tx_desc_mapping);
3941                 tp->tx_ring = NULL;
3942         }
3943         if (tp->hw_status) {
3944                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3945                                     tp->hw_status, tp->status_mapping);
3946                 tp->hw_status = NULL;
3947         }
3948         if (tp->hw_stats) {
3949                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3950                                     tp->hw_stats, tp->stats_mapping);
3951                 tp->hw_stats = NULL;
3952         }
3953 }
3954
3955 /*
3956  * Must not be invoked with interrupt sources disabled and
3957  * the hardware shutdown down.  Can sleep.
3958  */
3959 static int tg3_alloc_consistent(struct tg3 *tp)
3960 {
3961         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3962                                       (TG3_RX_RING_SIZE +
3963                                        TG3_RX_JUMBO_RING_SIZE)) +
3964                                      (sizeof(struct tx_ring_info) *
3965                                       TG3_TX_RING_SIZE),
3966                                      GFP_KERNEL);
3967         if (!tp->rx_std_buffers)
3968                 return -ENOMEM;
3969
3970         memset(tp->rx_std_buffers, 0,
3971                (sizeof(struct ring_info) *
3972                 (TG3_RX_RING_SIZE +
3973                  TG3_RX_JUMBO_RING_SIZE)) +
3974                (sizeof(struct tx_ring_info) *
3975                 TG3_TX_RING_SIZE));
3976
3977         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3978         tp->tx_buffers = (struct tx_ring_info *)
3979                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3980
3981         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3982                                           &tp->rx_std_mapping);
3983         if (!tp->rx_std)
3984                 goto err_out;
3985
3986         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3987                                             &tp->rx_jumbo_mapping);
3988
3989         if (!tp->rx_jumbo)
3990                 goto err_out;
3991
3992         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3993                                           &tp->rx_rcb_mapping);
3994         if (!tp->rx_rcb)
3995                 goto err_out;
3996
3997         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3998                                            &tp->tx_desc_mapping);
3999         if (!tp->tx_ring)
4000                 goto err_out;
4001
4002         tp->hw_status = pci_alloc_consistent(tp->pdev,
4003                                              TG3_HW_STATUS_SIZE,
4004                                              &tp->status_mapping);
4005         if (!tp->hw_status)
4006                 goto err_out;
4007
4008         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4009                                             sizeof(struct tg3_hw_stats),
4010                                             &tp->stats_mapping);
4011         if (!tp->hw_stats)
4012                 goto err_out;
4013
4014         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4015         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4016
4017         return 0;
4018
4019 err_out:
4020         tg3_free_consistent(tp);
4021         return -ENOMEM;
4022 }
4023
4024 #define MAX_WAIT_CNT 1000
4025
4026 /* To stop a block, clear the enable bit and poll till it
4027  * clears.  tp->lock is held.
4028  */
4029 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4030 {
4031         unsigned int i;
4032         u32 val;
4033
4034         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4035                 switch (ofs) {
4036                 case RCVLSC_MODE:
4037                 case DMAC_MODE:
4038                 case MBFREE_MODE:
4039                 case BUFMGR_MODE:
4040                 case MEMARB_MODE:
4041                         /* We can't enable/disable these bits of the
4042                          * 5705/5750, just say success.
4043                          */
4044                         return 0;
4045
4046                 default:
4047                         break;
4048                 };
4049         }
4050
4051         val = tr32(ofs);
4052         val &= ~enable_bit;
4053         tw32_f(ofs, val);
4054
4055         for (i = 0; i < MAX_WAIT_CNT; i++) {
4056                 udelay(100);
4057                 val = tr32(ofs);
4058                 if ((val & enable_bit) == 0)
4059                         break;
4060         }
4061
4062         if (i == MAX_WAIT_CNT && !silent) {
4063                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4064                        "ofs=%lx enable_bit=%x\n",
4065                        ofs, enable_bit);
4066                 return -ENODEV;
4067         }
4068
4069         return 0;
4070 }
4071
4072 /* tp->lock is held. */
4073 static int tg3_abort_hw(struct tg3 *tp, int silent)
4074 {
4075         int i, err;
4076
4077         tg3_disable_ints(tp);
4078
4079         tp->rx_mode &= ~RX_MODE_ENABLE;
4080         tw32_f(MAC_RX_MODE, tp->rx_mode);
4081         udelay(10);
4082
4083         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4084         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4085         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4086         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4087         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4088         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4089
4090         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4091         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4092         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4093         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4094         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4095         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4096         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4097
4098         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4099         tw32_f(MAC_MODE, tp->mac_mode);
4100         udelay(40);
4101
4102         tp->tx_mode &= ~TX_MODE_ENABLE;
4103         tw32_f(MAC_TX_MODE, tp->tx_mode);
4104
4105         for (i = 0; i < MAX_WAIT_CNT; i++) {
4106                 udelay(100);
4107                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4108                         break;
4109         }
4110         if (i >= MAX_WAIT_CNT) {
4111                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4112                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4113                        tp->dev->name, tr32(MAC_TX_MODE));
4114                 err |= -ENODEV;
4115         }
4116
4117         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4118         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4119         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4120
4121         tw32(FTQ_RESET, 0xffffffff);
4122         tw32(FTQ_RESET, 0x00000000);
4123
4124         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4125         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4126
4127         if (tp->hw_status)
4128                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4129         if (tp->hw_stats)
4130                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4131
4132         return err;
4133 }
4134
4135 /* tp->lock is held. */
4136 static int tg3_nvram_lock(struct tg3 *tp)
4137 {
4138         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4139                 int i;
4140
4141                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4142                 for (i = 0; i < 8000; i++) {
4143                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4144                                 break;
4145                         udelay(20);
4146                 }
4147                 if (i == 8000)
4148                         return -ENODEV;
4149         }
4150         return 0;
4151 }
4152
4153 /* tp->lock is held. */
4154 static void tg3_nvram_unlock(struct tg3 *tp)
4155 {
4156         if (tp->tg3_flags & TG3_FLAG_NVRAM)
4157                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4158 }
4159
4160 /* tp->lock is held. */
4161 static void tg3_enable_nvram_access(struct tg3 *tp)
4162 {
4163         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4164             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4165                 u32 nvaccess = tr32(NVRAM_ACCESS);
4166
4167                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4168         }
4169 }
4170
4171 /* tp->lock is held. */
4172 static void tg3_disable_nvram_access(struct tg3 *tp)
4173 {
4174         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4175             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4176                 u32 nvaccess = tr32(NVRAM_ACCESS);
4177
4178                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4179         }
4180 }
4181
4182 /* tp->lock is held. */
4183 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4184 {
4185         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4186                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4187                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4188
4189         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4190                 switch (kind) {
4191                 case RESET_KIND_INIT:
4192                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4193                                       DRV_STATE_START);
4194                         break;
4195
4196                 case RESET_KIND_SHUTDOWN:
4197                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4198                                       DRV_STATE_UNLOAD);
4199                         break;
4200
4201                 case RESET_KIND_SUSPEND:
4202                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4203                                       DRV_STATE_SUSPEND);
4204                         break;
4205
4206                 default:
4207                         break;
4208                 };
4209         }
4210 }
4211
4212 /* tp->lock is held. */
4213 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4214 {
4215         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4216                 switch (kind) {
4217                 case RESET_KIND_INIT:
4218                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4219                                       DRV_STATE_START_DONE);
4220                         break;
4221
4222                 case RESET_KIND_SHUTDOWN:
4223                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4224                                       DRV_STATE_UNLOAD_DONE);
4225                         break;
4226
4227                 default:
4228                         break;
4229                 };
4230         }
4231 }
4232
4233 /* tp->lock is held. */
4234 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4235 {
4236         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4237                 switch (kind) {
4238                 case RESET_KIND_INIT:
4239                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4240                                       DRV_STATE_START);
4241                         break;
4242
4243                 case RESET_KIND_SHUTDOWN:
4244                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4245                                       DRV_STATE_UNLOAD);
4246                         break;
4247
4248                 case RESET_KIND_SUSPEND:
4249                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4250                                       DRV_STATE_SUSPEND);
4251                         break;
4252
4253                 default:
4254                         break;
4255                 };
4256         }
4257 }
4258
4259 static void tg3_stop_fw(struct tg3 *);
4260
4261 /* tp->lock is held. */
4262 static int tg3_chip_reset(struct tg3 *tp)
4263 {
4264         u32 val;
4265         void (*write_op)(struct tg3 *, u32, u32);
4266         int i;
4267
4268         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4269                 tg3_nvram_lock(tp);
4270
4271         /*
4272          * We must avoid the readl() that normally takes place.
4273          * It locks machines, causes machine checks, and other
4274          * fun things.  So, temporarily disable the 5701
4275          * hardware workaround, while we do the reset.
4276          */
4277         write_op = tp->write32;
4278         if (write_op == tg3_write_flush_reg32)
4279                 tp->write32 = tg3_write32;
4280
4281         /* do the reset */
4282         val = GRC_MISC_CFG_CORECLK_RESET;
4283
4284         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4285                 if (tr32(0x7e2c) == 0x60) {
4286                         tw32(0x7e2c, 0x20);
4287                 }
4288                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4289                         tw32(GRC_MISC_CFG, (1 << 29));
4290                         val |= (1 << 29);
4291                 }
4292         }
4293
4294         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4295                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4296         tw32(GRC_MISC_CFG, val);
4297
4298         /* restore 5701 hardware bug workaround write method */
4299         tp->write32 = write_op;
4300
4301         /* Unfortunately, we have to delay before the PCI read back.
4302          * Some 575X chips even will not respond to a PCI cfg access
4303          * when the reset command is given to the chip.
4304          *
4305          * How do these hardware designers expect things to work
4306          * properly if the PCI write is posted for a long period
4307          * of time?  It is always necessary to have some method by
4308          * which a register read back can occur to push the write
4309          * out which does the reset.
4310          *
4311          * For most tg3 variants the trick below was working.
4312          * Ho hum...
4313          */
4314         udelay(120);
4315
4316         /* Flush PCI posted writes.  The normal MMIO registers
4317          * are inaccessible at this time so this is the only
4318          * way to make this reliably (actually, this is no longer
4319          * the case, see above).  I tried to use indirect
4320          * register read/write but this upset some 5701 variants.
4321          */
4322         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4323
4324         udelay(120);
4325
4326         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4327                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4328                         int i;
4329                         u32 cfg_val;
4330
4331                         /* Wait for link training to complete.  */
4332                         for (i = 0; i < 5000; i++)
4333                                 udelay(100);
4334
4335                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4336                         pci_write_config_dword(tp->pdev, 0xc4,
4337                                                cfg_val | (1 << 15));
4338                 }
4339                 /* Set PCIE max payload size and clear error status.  */
4340                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4341         }
4342
4343         /* Re-enable indirect register accesses. */
4344         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4345                                tp->misc_host_ctrl);
4346
4347         /* Set MAX PCI retry to zero. */
4348         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4349         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4350             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4351                 val |= PCISTATE_RETRY_SAME_DMA;
4352         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4353
4354         pci_restore_state(tp->pdev);
4355
4356         /* Make sure PCI-X relaxed ordering bit is clear. */
4357         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4358         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4359         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4360
4361         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4362                 u32 val;
4363
4364                 /* Chip reset on 5780 will reset MSI enable bit,
4365                  * so need to restore it.
4366                  */
4367                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4368                         u16 ctrl;
4369
4370                         pci_read_config_word(tp->pdev,
4371                                              tp->msi_cap + PCI_MSI_FLAGS,
4372                                              &ctrl);
4373                         pci_write_config_word(tp->pdev,
4374                                               tp->msi_cap + PCI_MSI_FLAGS,
4375                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4376                         val = tr32(MSGINT_MODE);
4377                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4378                 }
4379
4380                 val = tr32(MEMARB_MODE);
4381                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4382
4383         } else
4384                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4385
4386         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4387                 tg3_stop_fw(tp);
4388                 tw32(0x5000, 0x400);
4389         }
4390
4391         tw32(GRC_MODE, tp->grc_mode);
4392
4393         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4394                 u32 val = tr32(0xc4);
4395
4396                 tw32(0xc4, val | (1 << 15));
4397         }
4398
4399         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4400             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4401                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4402                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4403                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4404                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4405         }
4406
4407         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4408                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4409                 tw32_f(MAC_MODE, tp->mac_mode);
4410         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4411                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4412                 tw32_f(MAC_MODE, tp->mac_mode);
4413         } else
4414                 tw32_f(MAC_MODE, 0);
4415         udelay(40);
4416
4417         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4418                 /* Wait for firmware initialization to complete. */
4419                 for (i = 0; i < 100000; i++) {
4420                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4421                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4422                                 break;
4423                         udelay(10);
4424                 }
4425                 if (i >= 100000) {
4426                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4427                                "firmware will not restart magic=%08x\n",
4428                                tp->dev->name, val);
4429                         return -ENODEV;
4430                 }
4431         }
4432
4433         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4434             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4435                 u32 val = tr32(0x7c00);
4436
4437                 tw32(0x7c00, val | (1 << 25));
4438         }
4439
4440         /* Reprobe ASF enable state.  */
4441         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4442         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4443         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4444         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4445                 u32 nic_cfg;
4446
4447                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4448                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4449                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4450                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4451                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4452                 }
4453         }
4454
4455         return 0;
4456 }
4457
4458 /* tp->lock is held. */
4459 static void tg3_stop_fw(struct tg3 *tp)
4460 {
4461         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4462                 u32 val;
4463                 int i;
4464
4465                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4466                 val = tr32(GRC_RX_CPU_EVENT);
4467                 val |= (1 << 14);
4468                 tw32(GRC_RX_CPU_EVENT, val);
4469
4470                 /* Wait for RX cpu to ACK the event.  */
4471                 for (i = 0; i < 100; i++) {
4472                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4473                                 break;
4474                         udelay(1);
4475                 }
4476         }
4477 }
4478
4479 /* tp->lock is held. */
4480 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4481 {
4482         int err;
4483
4484         tg3_stop_fw(tp);
4485
4486         tg3_write_sig_pre_reset(tp, kind);
4487
4488         tg3_abort_hw(tp, silent);
4489         err = tg3_chip_reset(tp);
4490
4491         tg3_write_sig_legacy(tp, kind);
4492         tg3_write_sig_post_reset(tp, kind);
4493
4494         if (err)
4495                 return err;
4496
4497         return 0;
4498 }
4499
4500 #define TG3_FW_RELEASE_MAJOR    0x0
4501 #define TG3_FW_RELASE_MINOR     0x0
4502 #define TG3_FW_RELEASE_FIX      0x0
4503 #define TG3_FW_START_ADDR       0x08000000
4504 #define TG3_FW_TEXT_ADDR        0x08000000
4505 #define TG3_FW_TEXT_LEN         0x9c0
4506 #define TG3_FW_RODATA_ADDR      0x080009c0
4507 #define TG3_FW_RODATA_LEN       0x60
4508 #define TG3_FW_DATA_ADDR        0x08000a40
4509 #define TG3_FW_DATA_LEN         0x20
4510 #define TG3_FW_SBSS_ADDR        0x08000a60
4511 #define TG3_FW_SBSS_LEN         0xc
4512 #define TG3_FW_BSS_ADDR         0x08000a70
4513 #define TG3_FW_BSS_LEN          0x10
4514
4515 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4516         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4517         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4518         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4519         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4520         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4521         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4522         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4523         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4524         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4525         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4526         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4527         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4528         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4529         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4530         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4531         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4532         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4533         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4534         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4535         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4536         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4537         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4538         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4539         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4540         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4541         0, 0, 0, 0, 0, 0,
4542         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4543         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4544         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4545         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4546         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4547         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4548         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4549         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4550         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4551         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4552         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4553         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4554         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4555         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4556         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4557         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4558         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4559         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4560         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4561         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4562         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4563         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4564         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4565         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4566         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4567         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4568         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4569         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4570         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4571         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4572         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4573         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4574         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4575         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4576         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4577         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4578         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4579         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4580         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4581         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4582         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4583         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4584         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4585         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4586         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4587         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4588         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4589         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4590         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4591         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4592         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4593         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4594         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4595         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4596         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4597         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4598         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4599         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4600         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4601         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4602         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4603         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4604         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4605         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4606         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4607 };
4608
4609 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4610         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4611         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4612         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4613         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4614         0x00000000
4615 };
4616
4617 #if 0 /* All zeros, don't eat up space with it. */
4618 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4619         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4620         0x00000000, 0x00000000, 0x00000000, 0x00000000
4621 };
4622 #endif
4623
4624 #define RX_CPU_SCRATCH_BASE     0x30000
4625 #define RX_CPU_SCRATCH_SIZE     0x04000
4626 #define TX_CPU_SCRATCH_BASE     0x34000
4627 #define TX_CPU_SCRATCH_SIZE     0x04000
4628
4629 /* tp->lock is held. */
4630 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4631 {
4632         int i;
4633
4634         if (offset == TX_CPU_BASE &&
4635             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4636                 BUG();
4637
4638         if (offset == RX_CPU_BASE) {
4639                 for (i = 0; i < 10000; i++) {
4640                         tw32(offset + CPU_STATE, 0xffffffff);
4641                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4642                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4643                                 break;
4644                 }
4645
4646                 tw32(offset + CPU_STATE, 0xffffffff);
4647                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4648                 udelay(10);
4649         } else {
4650                 for (i = 0; i < 10000; i++) {
4651                         tw32(offset + CPU_STATE, 0xffffffff);
4652                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4653                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4654                                 break;
4655                 }
4656         }
4657
4658         if (i >= 10000) {
4659                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4660                        "and %s CPU\n",
4661                        tp->dev->name,
4662                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4663                 return -ENODEV;
4664         }
4665         return 0;
4666 }
4667
4668 struct fw_info {
4669         unsigned int text_base;
4670         unsigned int text_len;
4671         u32 *text_data;
4672         unsigned int rodata_base;
4673         unsigned int rodata_len;
4674         u32 *rodata_data;
4675         unsigned int data_base;
4676         unsigned int data_len;
4677         u32 *data_data;
4678 };
4679
4680 /* tp->lock is held. */
4681 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4682                                  int cpu_scratch_size, struct fw_info *info)
4683 {
4684         int err, i;
4685         void (*write_op)(struct tg3 *, u32, u32);
4686
4687         if (cpu_base == TX_CPU_BASE &&
4688             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4689                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4690                        "TX cpu firmware on %s which is 5705.\n",
4691                        tp->dev->name);
4692                 return -EINVAL;
4693         }
4694
4695         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4696                 write_op = tg3_write_mem;
4697         else
4698                 write_op = tg3_write_indirect_reg32;
4699
4700         /* It is possible that bootcode is still loading at this point.
4701          * Get the nvram lock first before halting the cpu.
4702          */
4703         tg3_nvram_lock(tp);
4704         err = tg3_halt_cpu(tp, cpu_base);
4705         tg3_nvram_unlock(tp);
4706         if (err)
4707                 goto out;
4708
4709         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4710                 write_op(tp, cpu_scratch_base + i, 0);
4711         tw32(cpu_base + CPU_STATE, 0xffffffff);
4712         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4713         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4714                 write_op(tp, (cpu_scratch_base +
4715                               (info->text_base & 0xffff) +
4716                               (i * sizeof(u32))),
4717                          (info->text_data ?
4718                           info->text_data[i] : 0));
4719         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4720                 write_op(tp, (cpu_scratch_base +
4721                               (info->rodata_base & 0xffff) +
4722                               (i * sizeof(u32))),
4723                          (info->rodata_data ?
4724                           info->rodata_data[i] : 0));
4725         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4726                 write_op(tp, (cpu_scratch_base +
4727                               (info->data_base & 0xffff) +
4728                               (i * sizeof(u32))),
4729                          (info->data_data ?
4730                           info->data_data[i] : 0));
4731
4732         err = 0;
4733
4734 out:
4735         return err;
4736 }
4737
4738 /* tp->lock is held. */
4739 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4740 {
4741         struct fw_info info;
4742         int err, i;
4743
4744         info.text_base = TG3_FW_TEXT_ADDR;
4745         info.text_len = TG3_FW_TEXT_LEN;
4746         info.text_data = &tg3FwText[0];
4747         info.rodata_base = TG3_FW_RODATA_ADDR;
4748         info.rodata_len = TG3_FW_RODATA_LEN;
4749         info.rodata_data = &tg3FwRodata[0];
4750         info.data_base = TG3_FW_DATA_ADDR;
4751         info.data_len = TG3_FW_DATA_LEN;
4752         info.data_data = NULL;
4753
4754         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4755                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4756                                     &info);
4757         if (err)
4758                 return err;
4759
4760         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4761                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4762                                     &info);
4763         if (err)
4764                 return err;
4765
4766         /* Now startup only the RX cpu. */
4767         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4768         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4769
4770         for (i = 0; i < 5; i++) {
4771                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4772                         break;
4773                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4774                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4775                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4776                 udelay(1000);
4777         }
4778         if (i >= 5) {
4779                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4780                        "to set RX CPU PC, is %08x should be %08x\n",
4781                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4782                        TG3_FW_TEXT_ADDR);
4783                 return -ENODEV;
4784         }
4785         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4786         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4787
4788         return 0;
4789 }
4790
4791 #if TG3_TSO_SUPPORT != 0
4792
4793 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4794 #define TG3_TSO_FW_RELASE_MINOR         0x6
4795 #define TG3_TSO_FW_RELEASE_FIX          0x0
4796 #define TG3_TSO_FW_START_ADDR           0x08000000
4797 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4798 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4799 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4800 #define TG3_TSO_FW_RODATA_LEN           0x60
4801 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4802 #define TG3_TSO_FW_DATA_LEN             0x30
4803 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4804 #define TG3_TSO_FW_SBSS_LEN             0x2c
4805 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4806 #define TG3_TSO_FW_BSS_LEN              0x894
4807
4808 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4809         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4810         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4811         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4812         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4813         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4814         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4815         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4816         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4817         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4818         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4819         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4820         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4821         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4822         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4823         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4824         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4825         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4826         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4827         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4828         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4829         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4830         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4831         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4832         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4833         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4834         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4835         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4836         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4837         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4838         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4839         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4840         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4841         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4842         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4843         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4844         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4845         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4846         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4847         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4848         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4849         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4850         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4851         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4852         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4853         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4854         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4855         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4856         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4857         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4858         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4859         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4860         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4861         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4862         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4863         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4864         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4865         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4866         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4867         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4868         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4869         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4870         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4871         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4872         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4873         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4874         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4875         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4876         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4877         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4878         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4879         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4880         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4881         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4882         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4883         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4884         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4885         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4886         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4887         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4888         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4889         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4890         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4891         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4892         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4893         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4894         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4895         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4896         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4897         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4898         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4899         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4900         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4901         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4902         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4903         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4904         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4905         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4906         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4907         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4908         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4909         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4910         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4911         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4912         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4913         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4914         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4915         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4916         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4917         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4918         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4919         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4920         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4921         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4922         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4923         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4924         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4925         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4926         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4927         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4928         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4929         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4930         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4931         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4932         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4933         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4934         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4935         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4936         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4937         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4938         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4939         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4940         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4941         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4942         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4943         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4944         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4945         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4946         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4947         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4948         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4949         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4950         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4951         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4952         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4953         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4954         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4955         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4956         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4957         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4958         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4959         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4960         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4961         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4962         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4963         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4964         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4965         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4966         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4967         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4968         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4969         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4970         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4971         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4972         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4973         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4974         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4975         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4976         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4977         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4978         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4979         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4980         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4981         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4982         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4983         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4984         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4985         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4986         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4987         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4988         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4989         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4990         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4991         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4992         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4993         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4994         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4995         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4996         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4997         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4998         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4999         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5000         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5001         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5002         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5003         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5004         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5005         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5006         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5007         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5008         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5009         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5010         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5011         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5012         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5013         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5014         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5015         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5016         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5017         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5018         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5019         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5020         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5021         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5022         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5023         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5024         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5025         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5026         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5027         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5028         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5029         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5030         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5031         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5032         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5033         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5034         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5035         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5036         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5037         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5038         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5039         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5040         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5041         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5042         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5043         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5044         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5045         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5046         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5047         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5048         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5049         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5050         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5051         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5052         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5053         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5054         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5055         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5056         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5057         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5058         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5059         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5060         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5061         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5062         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5063         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5064         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5065         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5066         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5067         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5068         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5069         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5070         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5071         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5072         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5073         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5074         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5075         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5076         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5077         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5078         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5079         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5080         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5081         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5082         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5083         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5084         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5085         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5086         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5087         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5088         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5089         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5090         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5091         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5092         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5093 };
5094
5095 static u32 tg3TsoFwRodata[] = {
5096         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5097         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5098         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5099         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5100         0x00000000,
5101 };
5102
5103 static u32 tg3TsoFwData[] = {
5104         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5105         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5106         0x00000000,
5107 };
5108
5109 /* 5705 needs a special version of the TSO firmware.  */
5110 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5111 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5112 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5113 #define TG3_TSO5_FW_START_ADDR          0x00010000
5114 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5115 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5116 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5117 #define TG3_TSO5_FW_RODATA_LEN          0x50
5118 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5119 #define TG3_TSO5_FW_DATA_LEN            0x20
5120 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5121 #define TG3_TSO5_FW_SBSS_LEN            0x28
5122 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5123 #define TG3_TSO5_FW_BSS_LEN             0x88
5124
5125 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5126         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5127         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5128         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5129         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5130         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5131         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5132         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5133         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5134         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5135         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5136         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5137         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5138         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5139         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5140         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5141         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5142         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5143         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5144         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5145         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5146         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5147         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5148         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5149         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5150         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5151         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5152         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5153         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5154         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5155         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5156         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5157         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5158         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5159         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5160         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5161         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5162         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5163         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5164         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5165         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5166         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5167         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5168         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5169         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5170         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5171         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5172         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5173         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5174         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5175         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5176         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5177         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5178         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5179         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5180         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5181         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5182         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5183         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5184         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5185         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5186         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5187         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5188         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5189         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5190         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5191         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5192         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5193         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5194         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5195         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5196         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5197         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5198         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5199         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5200         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5201         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5202         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5203         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5204         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5205         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5206         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5207         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5208         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5209         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5210         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5211         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5212         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5213         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5214         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5215         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5216         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5217         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5218         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5219         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5220         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5221         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5222         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5223         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5224         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5225         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5226         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5227         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5228         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5229         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5230         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5231         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5232         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5233         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5234         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5235         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5236         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5237         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5238         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5239         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5240         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5241         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5242         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5243         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5244         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5245         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5246         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5247         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5248         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5249         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5250         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5251         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5252         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5253         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5254         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5255         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5256         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5257         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5258         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5259         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5260         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5261         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5262         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5263         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5264         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5265         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5266         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5267         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5268         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5269         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5270         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5271         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5272         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5273         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5274         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5275         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5276         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5277         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5278         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5279         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5280         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5281         0x00000000, 0x00000000, 0x00000000,
5282 };
5283
5284 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5285         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5286         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5287         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5288         0x00000000, 0x00000000, 0x00000000,
5289 };
5290
5291 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5292         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5293         0x00000000, 0x00000000, 0x00000000,
5294 };
5295
5296 /* tp->lock is held. */
5297 static int tg3_load_tso_firmware(struct tg3 *tp)
5298 {
5299         struct fw_info info;
5300         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5301         int err, i;
5302
5303         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5304                 return 0;
5305
5306         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5307                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5308                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5309                 info.text_data = &tg3Tso5FwText[0];
5310                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5311                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5312                 info.rodata_data = &tg3Tso5FwRodata[0];
5313                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5314                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5315                 info.data_data = &tg3Tso5FwData[0];
5316                 cpu_base = RX_CPU_BASE;
5317                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5318                 cpu_scratch_size = (info.text_len +
5319                                     info.rodata_len +
5320                                     info.data_len +
5321                                     TG3_TSO5_FW_SBSS_LEN +
5322                                     TG3_TSO5_FW_BSS_LEN);
5323         } else {
5324                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5325                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5326                 info.text_data = &tg3TsoFwText[0];
5327                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5328                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5329                 info.rodata_data = &tg3TsoFwRodata[0];
5330                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5331                 info.data_len = TG3_TSO_FW_DATA_LEN;
5332                 info.data_data = &tg3TsoFwData[0];
5333                 cpu_base = TX_CPU_BASE;
5334                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5335                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5336         }
5337
5338         err = tg3_load_firmware_cpu(tp, cpu_base,
5339                                     cpu_scratch_base, cpu_scratch_size,
5340                                     &info);
5341         if (err)
5342                 return err;
5343
5344         /* Now startup the cpu. */
5345         tw32(cpu_base + CPU_STATE, 0xffffffff);
5346         tw32_f(cpu_base + CPU_PC,    info.text_base);
5347
5348         for (i = 0; i < 5; i++) {
5349                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5350                         break;
5351                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5352                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5353                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5354                 udelay(1000);
5355         }
5356         if (i >= 5) {
5357                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5358                        "to set CPU PC, is %08x should be %08x\n",
5359                        tp->dev->name, tr32(cpu_base + CPU_PC),
5360                        info.text_base);
5361                 return -ENODEV;
5362         }
5363         tw32(cpu_base + CPU_STATE, 0xffffffff);
5364         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5365         return 0;
5366 }
5367
5368 #endif /* TG3_TSO_SUPPORT != 0 */
5369
5370 /* tp->lock is held. */
5371 static void __tg3_set_mac_addr(struct tg3 *tp)
5372 {
5373         u32 addr_high, addr_low;
5374         int i;
5375
5376         addr_high = ((tp->dev->dev_addr[0] << 8) |
5377                      tp->dev->dev_addr[1]);
5378         addr_low = ((tp->dev->dev_addr[2] << 24) |
5379                     (tp->dev->dev_addr[3] << 16) |
5380                     (tp->dev->dev_addr[4] <<  8) |
5381                     (tp->dev->dev_addr[5] <<  0));
5382         for (i = 0; i < 4; i++) {
5383                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5384                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5385         }
5386
5387         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5388             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5389                 for (i = 0; i < 12; i++) {
5390                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5391                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5392                 }
5393         }
5394
5395         addr_high = (tp->dev->dev_addr[0] +
5396                      tp->dev->dev_addr[1] +
5397                      tp->dev->dev_addr[2] +
5398                      tp->dev->dev_addr[3] +
5399                      tp->dev->dev_addr[4] +
5400                      tp->dev->dev_addr[5]) &
5401                 TX_BACKOFF_SEED_MASK;
5402         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5403 }
5404
5405 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5406 {
5407         struct tg3 *tp = netdev_priv(dev);
5408         struct sockaddr *addr = p;
5409
5410         if (!is_valid_ether_addr(addr->sa_data))
5411                 return -EINVAL;
5412
5413         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5414
5415         spin_lock_bh(&tp->lock);
5416         __tg3_set_mac_addr(tp);
5417         spin_unlock_bh(&tp->lock);
5418
5419         return 0;
5420 }
5421
5422 /* tp->lock is held. */
5423 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5424                            dma_addr_t mapping, u32 maxlen_flags,
5425                            u32 nic_addr)
5426 {
5427         tg3_write_mem(tp,
5428                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5429                       ((u64) mapping >> 32));
5430         tg3_write_mem(tp,
5431                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5432                       ((u64) mapping & 0xffffffff));
5433         tg3_write_mem(tp,
5434                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5435                        maxlen_flags);
5436
5437         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5438                 tg3_write_mem(tp,
5439                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5440                               nic_addr);
5441 }
5442
5443 static void __tg3_set_rx_mode(struct net_device *);
5444 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5445 {
5446         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5447         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5448         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5449         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5450         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5451                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5452                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5453         }
5454         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5455         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5456         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5457                 u32 val = ec->stats_block_coalesce_usecs;
5458
5459                 if (!netif_carrier_ok(tp->dev))
5460                         val = 0;
5461
5462                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5463         }
5464 }
5465
5466 /* tp->lock is held. */
5467 static int tg3_reset_hw(struct tg3 *tp)
5468 {
5469         u32 val, rdmac_mode;
5470         int i, err, limit;
5471
5472         tg3_disable_ints(tp);
5473
5474         tg3_stop_fw(tp);
5475
5476         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5477
5478         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5479                 tg3_abort_hw(tp, 1);
5480         }
5481
5482         err = tg3_chip_reset(tp);
5483         if (err)
5484                 return err;
5485
5486         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5487
5488         /* This works around an issue with Athlon chipsets on
5489          * B3 tigon3 silicon.  This bit has no effect on any
5490          * other revision.  But do not set this on PCI Express
5491          * chips.
5492          */
5493         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5494                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5495         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5496
5497         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5498             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5499                 val = tr32(TG3PCI_PCISTATE);
5500                 val |= PCISTATE_RETRY_SAME_DMA;
5501                 tw32(TG3PCI_PCISTATE, val);
5502         }
5503
5504         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5505                 /* Enable some hw fixes.  */
5506                 val = tr32(TG3PCI_MSI_DATA);
5507                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5508                 tw32(TG3PCI_MSI_DATA, val);
5509         }
5510
5511         /* Descriptor ring init may make accesses to the
5512          * NIC SRAM area to setup the TX descriptors, so we
5513          * can only do this after the hardware has been
5514          * successfully reset.
5515          */
5516         tg3_init_rings(tp);
5517
5518         /* This value is determined during the probe time DMA
5519          * engine test, tg3_test_dma.
5520          */
5521         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5522
5523         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5524                           GRC_MODE_4X_NIC_SEND_RINGS |
5525                           GRC_MODE_NO_TX_PHDR_CSUM |
5526                           GRC_MODE_NO_RX_PHDR_CSUM);
5527         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5528         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5529                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5530         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5531                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5532
5533         tw32(GRC_MODE,
5534              tp->grc_mode |
5535              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5536
5537         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5538         val = tr32(GRC_MISC_CFG);
5539         val &= ~0xff;
5540         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5541         tw32(GRC_MISC_CFG, val);
5542
5543         /* Initialize MBUF/DESC pool. */
5544         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5545                 /* Do nothing.  */
5546         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5547                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5548                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5549                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5550                 else
5551                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5552                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5553                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5554         }
5555 #if TG3_TSO_SUPPORT != 0
5556         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5557                 int fw_len;
5558
5559                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5560                           TG3_TSO5_FW_RODATA_LEN +
5561                           TG3_TSO5_FW_DATA_LEN +
5562                           TG3_TSO5_FW_SBSS_LEN +
5563                           TG3_TSO5_FW_BSS_LEN);
5564                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5565                 tw32(BUFMGR_MB_POOL_ADDR,
5566                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5567                 tw32(BUFMGR_MB_POOL_SIZE,
5568                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5569         }
5570 #endif
5571
5572         if (tp->dev->mtu <= ETH_DATA_LEN) {
5573                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5574                      tp->bufmgr_config.mbuf_read_dma_low_water);
5575                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5576                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5577                 tw32(BUFMGR_MB_HIGH_WATER,
5578                      tp->bufmgr_config.mbuf_high_water);
5579         } else {
5580                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5581                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5582                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5583                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5584                 tw32(BUFMGR_MB_HIGH_WATER,
5585                      tp->bufmgr_config.mbuf_high_water_jumbo);
5586         }
5587         tw32(BUFMGR_DMA_LOW_WATER,
5588              tp->bufmgr_config.dma_low_water);
5589         tw32(BUFMGR_DMA_HIGH_WATER,
5590              tp->bufmgr_config.dma_high_water);
5591
5592         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5593         for (i = 0; i < 2000; i++) {
5594                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5595                         break;
5596                 udelay(10);
5597         }
5598         if (i >= 2000) {
5599                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5600                        tp->dev->name);
5601                 return -ENODEV;
5602         }
5603
5604         /* Setup replenish threshold. */
5605         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5606
5607         /* Initialize TG3_BDINFO's at:
5608          *  RCVDBDI_STD_BD:     standard eth size rx ring
5609          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5610          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5611          *
5612          * like so:
5613          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5614          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5615          *                              ring attribute flags
5616          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5617          *
5618          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5619          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5620          *
5621          * The size of each ring is fixed in the firmware, but the location is
5622          * configurable.
5623          */
5624         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5625              ((u64) tp->rx_std_mapping >> 32));
5626         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5627              ((u64) tp->rx_std_mapping & 0xffffffff));
5628         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5629              NIC_SRAM_RX_BUFFER_DESC);
5630
5631         /* Don't even try to program the JUMBO/MINI buffer descriptor
5632          * configs on 5705.
5633          */
5634         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5635                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5636                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5637         } else {
5638                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5639                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5640
5641                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5642                      BDINFO_FLAGS_DISABLED);
5643
5644                 /* Setup replenish threshold. */
5645                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5646
5647                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5648                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5649                              ((u64) tp->rx_jumbo_mapping >> 32));
5650                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5651                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5652                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5653                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5654                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5655                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5656                 } else {
5657                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5658                              BDINFO_FLAGS_DISABLED);
5659                 }
5660
5661         }
5662
5663         /* There is only one send ring on 5705/5750, no need to explicitly
5664          * disable the others.
5665          */
5666         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5667                 /* Clear out send RCB ring in SRAM. */
5668                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5669                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5670                                       BDINFO_FLAGS_DISABLED);
5671         }
5672
5673         tp->tx_prod = 0;
5674         tp->tx_cons = 0;
5675         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5676         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5677
5678         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5679                        tp->tx_desc_mapping,
5680                        (TG3_TX_RING_SIZE <<
5681                         BDINFO_FLAGS_MAXLEN_SHIFT),
5682                        NIC_SRAM_TX_BUFFER_DESC);
5683
5684         /* There is only one receive return ring on 5705/5750, no need
5685          * to explicitly disable the others.
5686          */
5687         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5688                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5689                      i += TG3_BDINFO_SIZE) {
5690                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5691                                       BDINFO_FLAGS_DISABLED);
5692                 }
5693         }
5694
5695         tp->rx_rcb_ptr = 0;
5696         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5697
5698         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5699                        tp->rx_rcb_mapping,
5700                        (TG3_RX_RCB_RING_SIZE(tp) <<
5701                         BDINFO_FLAGS_MAXLEN_SHIFT),
5702                        0);
5703
5704         tp->rx_std_ptr = tp->rx_pending;
5705         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5706                      tp->rx_std_ptr);
5707
5708         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
5709                                                 tp->rx_jumbo_pending : 0;
5710         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5711                      tp->rx_jumbo_ptr);
5712
5713         /* Initialize MAC address and backoff seed. */
5714         __tg3_set_mac_addr(tp);
5715
5716         /* MTU + ethernet header + FCS + optional VLAN tag */
5717         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5718
5719         /* The slot time is changed by tg3_setup_phy if we
5720          * run at gigabit with half duplex.
5721          */
5722         tw32(MAC_TX_LENGTHS,
5723              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5724              (6 << TX_LENGTHS_IPG_SHIFT) |
5725              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5726
5727         /* Receive rules. */
5728         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5729         tw32(RCVLPC_CONFIG, 0x0181);
5730
5731         /* Calculate RDMAC_MODE setting early, we need it to determine
5732          * the RCVLPC_STATE_ENABLE mask.
5733          */
5734         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5735                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5736                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5737                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5738                       RDMAC_MODE_LNGREAD_ENAB);
5739         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5740                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5741
5742         /* If statement applies to 5705 and 5750 PCI devices only */
5743         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5744              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5745             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5746                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5747                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5748                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5749                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5750                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5751                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5752                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5753                 }
5754         }
5755
5756         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5757                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5758
5759 #if TG3_TSO_SUPPORT != 0
5760         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5761                 rdmac_mode |= (1 << 27);
5762 #endif
5763
5764         /* Receive/send statistics. */
5765         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5766             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5767                 val = tr32(RCVLPC_STATS_ENABLE);
5768                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5769                 tw32(RCVLPC_STATS_ENABLE, val);
5770         } else {
5771                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5772         }
5773         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5774         tw32(SNDDATAI_STATSENAB, 0xffffff);
5775         tw32(SNDDATAI_STATSCTRL,
5776              (SNDDATAI_SCTRL_ENABLE |
5777               SNDDATAI_SCTRL_FASTUPD));
5778
5779         /* Setup host coalescing engine. */
5780         tw32(HOSTCC_MODE, 0);
5781         for (i = 0; i < 2000; i++) {
5782                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5783                         break;
5784                 udelay(10);
5785         }
5786
5787         __tg3_set_coalesce(tp, &tp->coal);
5788
5789         /* set status block DMA address */
5790         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5791              ((u64) tp->status_mapping >> 32));
5792         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5793              ((u64) tp->status_mapping & 0xffffffff));
5794
5795         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5796                 /* Status/statistics block address.  See tg3_timer,
5797                  * the tg3_periodic_fetch_stats call there, and
5798                  * tg3_get_stats to see how this works for 5705/5750 chips.
5799                  */
5800                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5801                      ((u64) tp->stats_mapping >> 32));
5802                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5803                      ((u64) tp->stats_mapping & 0xffffffff));
5804                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5805                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5806         }
5807
5808         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5809
5810         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5811         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5812         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5813                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5814
5815         /* Clear statistics/status block in chip, and status block in ram. */
5816         for (i = NIC_SRAM_STATS_BLK;
5817              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5818              i += sizeof(u32)) {
5819                 tg3_write_mem(tp, i, 0);
5820                 udelay(40);
5821         }
5822         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5823
5824         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5825                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
5826                 /* reset to prevent losing 1st rx packet intermittently */
5827                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5828                 udelay(10);
5829         }
5830
5831         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5832                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5833         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5834         udelay(40);
5835
5836         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5837          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5838          * register to preserve the GPIO settings for LOMs. The GPIOs,
5839          * whether used as inputs or outputs, are set by boot code after
5840          * reset.
5841          */
5842         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5843                 u32 gpio_mask;
5844
5845                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5846                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
5847
5848                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5849                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5850                                      GRC_LCLCTRL_GPIO_OUTPUT3;
5851
5852                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5853
5854                 /* GPIO1 must be driven high for eeprom write protect */
5855                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5856                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5857         }
5858         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5859         udelay(100);
5860
5861         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5862         tp->last_tag = 0;
5863
5864         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5865                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5866                 udelay(40);
5867         }
5868
5869         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5870                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5871                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5872                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5873                WDMAC_MODE_LNGREAD_ENAB);
5874
5875         /* If statement applies to 5705 and 5750 PCI devices only */
5876         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5877              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5878             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5879                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5880                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5881                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5882                         /* nothing */
5883                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5884                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5885                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5886                         val |= WDMAC_MODE_RX_ACCEL;
5887                 }
5888         }
5889
5890         tw32_f(WDMAC_MODE, val);
5891         udelay(40);
5892
5893         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5894                 val = tr32(TG3PCI_X_CAPS);
5895                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5896                         val &= ~PCIX_CAPS_BURST_MASK;
5897                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5898                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5899                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5900                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5901                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5902                                 val |= (tp->split_mode_max_reqs <<
5903                                         PCIX_CAPS_SPLIT_SHIFT);
5904                 }
5905                 tw32(TG3PCI_X_CAPS, val);
5906         }
5907
5908         tw32_f(RDMAC_MODE, rdmac_mode);
5909         udelay(40);
5910
5911         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5912         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5913                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5914         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5915         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5916         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5917         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5918         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5919 #if TG3_TSO_SUPPORT != 0
5920         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5921                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5922 #endif
5923         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5924         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5925
5926         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5927                 err = tg3_load_5701_a0_firmware_fix(tp);
5928                 if (err)
5929                         return err;
5930         }
5931
5932 #if TG3_TSO_SUPPORT != 0
5933         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5934                 err = tg3_load_tso_firmware(tp);
5935                 if (err)
5936                         return err;
5937         }
5938 #endif
5939
5940         tp->tx_mode = TX_MODE_ENABLE;
5941         tw32_f(MAC_TX_MODE, tp->tx_mode);
5942         udelay(100);
5943
5944         tp->rx_mode = RX_MODE_ENABLE;
5945         tw32_f(MAC_RX_MODE, tp->rx_mode);
5946         udelay(10);
5947
5948         if (tp->link_config.phy_is_low_power) {
5949                 tp->link_config.phy_is_low_power = 0;
5950                 tp->link_config.speed = tp->link_config.orig_speed;
5951                 tp->link_config.duplex = tp->link_config.orig_duplex;
5952                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5953         }
5954
5955         tp->mi_mode = MAC_MI_MODE_BASE;
5956         tw32_f(MAC_MI_MODE, tp->mi_mode);
5957         udelay(80);
5958
5959         tw32(MAC_LED_CTRL, tp->led_ctrl);
5960
5961         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5962         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5963                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5964                 udelay(10);
5965         }
5966         tw32_f(MAC_RX_MODE, tp->rx_mode);
5967         udelay(10);
5968
5969         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5970                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5971                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5972                         /* Set drive transmission level to 1.2V  */
5973                         /* only if the signal pre-emphasis bit is not set  */
5974                         val = tr32(MAC_SERDES_CFG);
5975                         val &= 0xfffff000;
5976                         val |= 0x880;
5977                         tw32(MAC_SERDES_CFG, val);
5978                 }
5979                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5980                         tw32(MAC_SERDES_CFG, 0x616000);
5981         }
5982
5983         /* Prevent chip from dropping frames when flow control
5984          * is enabled.
5985          */
5986         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5987
5988         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5989             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5990                 /* Use hardware link auto-negotiation */
5991                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5992         }
5993
5994         err = tg3_setup_phy(tp, 1);
5995         if (err)
5996                 return err;
5997
5998         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5999                 u32 tmp;
6000
6001                 /* Clear CRC stats. */
6002                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6003                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6004                         tg3_readphy(tp, 0x14, &tmp);
6005                 }
6006         }
6007
6008         __tg3_set_rx_mode(tp->dev);
6009
6010         /* Initialize receive rules. */
6011         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6012         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6013         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6014         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6015
6016         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6017             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6018                 limit = 8;
6019         else
6020                 limit = 16;
6021         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6022                 limit -= 4;
6023         switch (limit) {
6024         case 16:
6025                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6026         case 15:
6027                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6028         case 14:
6029                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6030         case 13:
6031                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6032         case 12:
6033                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6034         case 11:
6035                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6036         case 10:
6037                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6038         case 9:
6039                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6040         case 8:
6041                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6042         case 7:
6043                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6044         case 6:
6045                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6046         case 5:
6047                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6048         case 4:
6049                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6050         case 3:
6051                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6052         case 2:
6053         case 1:
6054
6055         default:
6056                 break;
6057         };
6058
6059         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6060
6061         return 0;
6062 }
6063
6064 /* Called at device open time to get the chip ready for
6065  * packet processing.  Invoked with tp->lock held.
6066  */
6067 static int tg3_init_hw(struct tg3 *tp)
6068 {
6069         int err;
6070
6071         /* Force the chip into D0. */
6072         err = tg3_set_power_state(tp, 0);
6073         if (err)
6074                 goto out;
6075
6076         tg3_switch_clocks(tp);
6077
6078         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6079
6080         err = tg3_reset_hw(tp);
6081
6082 out:
6083         return err;
6084 }
6085
6086 #define TG3_STAT_ADD32(PSTAT, REG) \
6087 do {    u32 __val = tr32(REG); \
6088         (PSTAT)->low += __val; \
6089         if ((PSTAT)->low < __val) \
6090                 (PSTAT)->high += 1; \
6091 } while (0)
6092
6093 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6094 {
6095         struct tg3_hw_stats *sp = tp->hw_stats;
6096
6097         if (!netif_carrier_ok(tp->dev))
6098                 return;
6099
6100         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6101         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6102         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6103         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6104         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6105         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6106         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6107         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6108         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6109         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6110         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6111         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6112         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6113
6114         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6115         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6116         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6117         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6118         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6119         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6120         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6121         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6122         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6123         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6124         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6125         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6126         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6127         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6128 }
6129
6130 static void tg3_timer(unsigned long __opaque)
6131 {
6132         struct tg3 *tp = (struct tg3 *) __opaque;
6133
6134         spin_lock(&tp->lock);
6135
6136         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6137                 /* All of this garbage is because when using non-tagged
6138                  * IRQ status the mailbox/status_block protocol the chip
6139                  * uses with the cpu is race prone.
6140                  */
6141                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6142                         tw32(GRC_LOCAL_CTRL,
6143                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6144                 } else {
6145                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6146                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6147                 }
6148
6149                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6150                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6151                         spin_unlock(&tp->lock);
6152                         schedule_work(&tp->reset_task);
6153                         return;
6154                 }
6155         }
6156
6157         /* This part only runs once per second. */
6158         if (!--tp->timer_counter) {
6159                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6160                         tg3_periodic_fetch_stats(tp);
6161
6162                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6163                         u32 mac_stat;
6164                         int phy_event;
6165
6166                         mac_stat = tr32(MAC_STATUS);
6167
6168                         phy_event = 0;
6169                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6170                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6171                                         phy_event = 1;
6172                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6173                                 phy_event = 1;
6174
6175                         if (phy_event)
6176                                 tg3_setup_phy(tp, 0);
6177                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6178                         u32 mac_stat = tr32(MAC_STATUS);
6179                         int need_setup = 0;
6180
6181                         if (netif_carrier_ok(tp->dev) &&
6182                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6183                                 need_setup = 1;
6184                         }
6185                         if (! netif_carrier_ok(tp->dev) &&
6186                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6187                                          MAC_STATUS_SIGNAL_DET))) {
6188                                 need_setup = 1;
6189                         }
6190                         if (need_setup) {
6191                                 tw32_f(MAC_MODE,
6192                                      (tp->mac_mode &
6193                                       ~MAC_MODE_PORT_MODE_MASK));
6194                                 udelay(40);
6195                                 tw32_f(MAC_MODE, tp->mac_mode);
6196                                 udelay(40);
6197                                 tg3_setup_phy(tp, 0);
6198                         }
6199                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6200                         tg3_serdes_parallel_detect(tp);
6201
6202                 tp->timer_counter = tp->timer_multiplier;
6203         }
6204
6205         /* Heartbeat is only sent once every 2 seconds.  */
6206         if (!--tp->asf_counter) {
6207                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6208                         u32 val;
6209
6210                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_MBOX,
6211                                            FWCMD_NICDRV_ALIVE2);
6212                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6213                         /* 5 seconds timeout */
6214                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6215                         val = tr32(GRC_RX_CPU_EVENT);
6216                         val |= (1 << 14);
6217                         tw32(GRC_RX_CPU_EVENT, val);
6218                 }
6219                 tp->asf_counter = tp->asf_multiplier;
6220         }
6221
6222         spin_unlock(&tp->lock);
6223
6224         tp->timer.expires = jiffies + tp->timer_offset;
6225         add_timer(&tp->timer);
6226 }
6227
6228 static int tg3_test_interrupt(struct tg3 *tp)
6229 {
6230         struct net_device *dev = tp->dev;
6231         int err, i;
6232         u32 int_mbox = 0;
6233
6234         if (!netif_running(dev))
6235                 return -ENODEV;
6236
6237         tg3_disable_ints(tp);
6238
6239         free_irq(tp->pdev->irq, dev);
6240
6241         err = request_irq(tp->pdev->irq, tg3_test_isr,
6242                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6243         if (err)
6244                 return err;
6245
6246         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6247         tg3_enable_ints(tp);
6248
6249         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6250                HOSTCC_MODE_NOW);
6251
6252         for (i = 0; i < 5; i++) {
6253                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6254                                         TG3_64BIT_REG_LOW);
6255                 if (int_mbox != 0)
6256                         break;
6257                 msleep(10);
6258         }
6259
6260         tg3_disable_ints(tp);
6261
6262         free_irq(tp->pdev->irq, dev);
6263         
6264         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6265                 err = request_irq(tp->pdev->irq, tg3_msi,
6266                                   SA_SAMPLE_RANDOM, dev->name, dev);
6267         else {
6268                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6269                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6270                         fn = tg3_interrupt_tagged;
6271                 err = request_irq(tp->pdev->irq, fn,
6272                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6273         }
6274
6275         if (err)
6276                 return err;
6277
6278         if (int_mbox != 0)
6279                 return 0;
6280
6281         return -EIO;
6282 }
6283
6284 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6285  * successfully restored
6286  */
6287 static int tg3_test_msi(struct tg3 *tp)
6288 {
6289         struct net_device *dev = tp->dev;
6290         int err;
6291         u16 pci_cmd;
6292
6293         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6294                 return 0;
6295
6296         /* Turn off SERR reporting in case MSI terminates with Master
6297          * Abort.
6298          */
6299         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6300         pci_write_config_word(tp->pdev, PCI_COMMAND,
6301                               pci_cmd & ~PCI_COMMAND_SERR);
6302
6303         err = tg3_test_interrupt(tp);
6304
6305         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6306
6307         if (!err)
6308                 return 0;
6309
6310         /* other failures */
6311         if (err != -EIO)
6312                 return err;
6313
6314         /* MSI test failed, go back to INTx mode */
6315         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6316                "switching to INTx mode. Please report this failure to "
6317                "the PCI maintainer and include system chipset information.\n",
6318                        tp->dev->name);
6319
6320         free_irq(tp->pdev->irq, dev);
6321         pci_disable_msi(tp->pdev);
6322
6323         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6324
6325         {
6326                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6327                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6328                         fn = tg3_interrupt_tagged;
6329
6330                 err = request_irq(tp->pdev->irq, fn,
6331                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6332         }
6333         if (err)
6334                 return err;
6335
6336         /* Need to reset the chip because the MSI cycle may have terminated
6337          * with Master Abort.
6338          */
6339         tg3_full_lock(tp, 1);
6340
6341         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6342         err = tg3_init_hw(tp);
6343
6344         tg3_full_unlock(tp);
6345
6346         if (err)
6347                 free_irq(tp->pdev->irq, dev);
6348
6349         return err;
6350 }
6351
6352 static int tg3_open(struct net_device *dev)
6353 {
6354         struct tg3 *tp = netdev_priv(dev);
6355         int err;
6356
6357         tg3_full_lock(tp, 0);
6358
6359         tg3_disable_ints(tp);
6360         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6361
6362         tg3_full_unlock(tp);
6363
6364         /* The placement of this call is tied
6365          * to the setup and use of Host TX descriptors.
6366          */
6367         err = tg3_alloc_consistent(tp);
6368         if (err)
6369                 return err;
6370
6371         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6372             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6373             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
6374                 /* All MSI supporting chips should support tagged
6375                  * status.  Assert that this is the case.
6376                  */
6377                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6378                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6379                                "Not using MSI.\n", tp->dev->name);
6380                 } else if (pci_enable_msi(tp->pdev) == 0) {
6381                         u32 msi_mode;
6382
6383                         msi_mode = tr32(MSGINT_MODE);
6384                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6385                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6386                 }
6387         }
6388         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6389                 err = request_irq(tp->pdev->irq, tg3_msi,
6390                                   SA_SAMPLE_RANDOM, dev->name, dev);
6391         else {
6392                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6393                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6394                         fn = tg3_interrupt_tagged;
6395
6396                 err = request_irq(tp->pdev->irq, fn,
6397                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6398         }
6399
6400         if (err) {
6401                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6402                         pci_disable_msi(tp->pdev);
6403                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6404                 }
6405                 tg3_free_consistent(tp);
6406                 return err;
6407         }
6408
6409         tg3_full_lock(tp, 0);
6410
6411         err = tg3_init_hw(tp);
6412         if (err) {
6413                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6414                 tg3_free_rings(tp);
6415         } else {
6416                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6417                         tp->timer_offset = HZ;
6418                 else
6419                         tp->timer_offset = HZ / 10;
6420
6421                 BUG_ON(tp->timer_offset > HZ);
6422                 tp->timer_counter = tp->timer_multiplier =
6423                         (HZ / tp->timer_offset);
6424                 tp->asf_counter = tp->asf_multiplier =
6425                         ((HZ / tp->timer_offset) * 2);
6426
6427                 init_timer(&tp->timer);
6428                 tp->timer.expires = jiffies + tp->timer_offset;
6429                 tp->timer.data = (unsigned long) tp;
6430                 tp->timer.function = tg3_timer;
6431         }
6432
6433         tg3_full_unlock(tp);
6434
6435         if (err) {
6436                 free_irq(tp->pdev->irq, dev);
6437                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6438                         pci_disable_msi(tp->pdev);
6439                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6440                 }
6441                 tg3_free_consistent(tp);
6442                 return err;
6443         }
6444
6445         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6446                 err = tg3_test_msi(tp);
6447
6448                 if (err) {
6449                         tg3_full_lock(tp, 0);
6450
6451                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6452                                 pci_disable_msi(tp->pdev);
6453                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6454                         }
6455                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6456                         tg3_free_rings(tp);
6457                         tg3_free_consistent(tp);
6458
6459                         tg3_full_unlock(tp);
6460
6461                         return err;
6462                 }
6463         }
6464
6465         tg3_full_lock(tp, 0);
6466
6467         add_timer(&tp->timer);
6468         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6469         tg3_enable_ints(tp);
6470
6471         tg3_full_unlock(tp);
6472
6473         netif_start_queue(dev);
6474
6475         return 0;
6476 }
6477
6478 #if 0
6479 /*static*/ void tg3_dump_state(struct tg3 *tp)
6480 {
6481         u32 val32, val32_2, val32_3, val32_4, val32_5;
6482         u16 val16;
6483         int i;
6484
6485         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6486         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6487         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6488                val16, val32);
6489
6490         /* MAC block */
6491         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6492                tr32(MAC_MODE), tr32(MAC_STATUS));
6493         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6494                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6495         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6496                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6497         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6498                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6499
6500         /* Send data initiator control block */
6501         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6502                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6503         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6504                tr32(SNDDATAI_STATSCTRL));
6505
6506         /* Send data completion control block */
6507         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6508
6509         /* Send BD ring selector block */
6510         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6511                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6512
6513         /* Send BD initiator control block */
6514         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6515                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6516
6517         /* Send BD completion control block */
6518         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6519
6520         /* Receive list placement control block */
6521         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6522                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6523         printk("       RCVLPC_STATSCTRL[%08x]\n",
6524                tr32(RCVLPC_STATSCTRL));
6525
6526         /* Receive data and receive BD initiator control block */
6527         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6528                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6529
6530         /* Receive data completion control block */
6531         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6532                tr32(RCVDCC_MODE));
6533
6534         /* Receive BD initiator control block */
6535         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6536                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6537
6538         /* Receive BD completion control block */
6539         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6540                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6541
6542         /* Receive list selector control block */
6543         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6544                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6545
6546         /* Mbuf cluster free block */
6547         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6548                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6549
6550         /* Host coalescing control block */
6551         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6552                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6553         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6554                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6555                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6556         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6557                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6558                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6559         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6560                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6561         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6562                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6563
6564         /* Memory arbiter control block */
6565         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6566                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6567
6568         /* Buffer manager control block */
6569         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6570                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6571         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6572                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6573         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6574                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6575                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6576                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6577
6578         /* Read DMA control block */
6579         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6580                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6581
6582         /* Write DMA control block */
6583         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6584                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6585
6586         /* DMA completion block */
6587         printk("DEBUG: DMAC_MODE[%08x]\n",
6588                tr32(DMAC_MODE));
6589
6590         /* GRC block */
6591         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6592                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6593         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6594                tr32(GRC_LOCAL_CTRL));
6595
6596         /* TG3_BDINFOs */
6597         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6598                tr32(RCVDBDI_JUMBO_BD + 0x0),
6599                tr32(RCVDBDI_JUMBO_BD + 0x4),
6600                tr32(RCVDBDI_JUMBO_BD + 0x8),
6601                tr32(RCVDBDI_JUMBO_BD + 0xc));
6602         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6603                tr32(RCVDBDI_STD_BD + 0x0),
6604                tr32(RCVDBDI_STD_BD + 0x4),
6605                tr32(RCVDBDI_STD_BD + 0x8),
6606                tr32(RCVDBDI_STD_BD + 0xc));
6607         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6608                tr32(RCVDBDI_MINI_BD + 0x0),
6609                tr32(RCVDBDI_MINI_BD + 0x4),
6610                tr32(RCVDBDI_MINI_BD + 0x8),
6611                tr32(RCVDBDI_MINI_BD + 0xc));
6612
6613         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6614         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6615         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6616         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6617         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6618                val32, val32_2, val32_3, val32_4);
6619
6620         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6621         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6622         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6623         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6624         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6625                val32, val32_2, val32_3, val32_4);
6626
6627         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6628         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6629         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6630         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6631         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6632         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6633                val32, val32_2, val32_3, val32_4, val32_5);
6634
6635         /* SW status block */
6636         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6637                tp->hw_status->status,
6638                tp->hw_status->status_tag,
6639                tp->hw_status->rx_jumbo_consumer,
6640                tp->hw_status->rx_consumer,
6641                tp->hw_status->rx_mini_consumer,
6642                tp->hw_status->idx[0].rx_producer,
6643                tp->hw_status->idx[0].tx_consumer);
6644
6645         /* SW statistics block */
6646         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6647                ((u32 *)tp->hw_stats)[0],
6648                ((u32 *)tp->hw_stats)[1],
6649                ((u32 *)tp->hw_stats)[2],
6650                ((u32 *)tp->hw_stats)[3]);
6651
6652         /* Mailboxes */
6653         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6654                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6655                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6656                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6657                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6658
6659         /* NIC side send descriptors. */
6660         for (i = 0; i < 6; i++) {
6661                 unsigned long txd;
6662
6663                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6664                         + (i * sizeof(struct tg3_tx_buffer_desc));
6665                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6666                        i,
6667                        readl(txd + 0x0), readl(txd + 0x4),
6668                        readl(txd + 0x8), readl(txd + 0xc));
6669         }
6670
6671         /* NIC side RX descriptors. */
6672         for (i = 0; i < 6; i++) {
6673                 unsigned long rxd;
6674
6675                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6676                         + (i * sizeof(struct tg3_rx_buffer_desc));
6677                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6678                        i,
6679                        readl(rxd + 0x0), readl(rxd + 0x4),
6680                        readl(rxd + 0x8), readl(rxd + 0xc));
6681                 rxd += (4 * sizeof(u32));
6682                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6683                        i,
6684                        readl(rxd + 0x0), readl(rxd + 0x4),
6685                        readl(rxd + 0x8), readl(rxd + 0xc));
6686         }
6687
6688         for (i = 0; i < 6; i++) {
6689                 unsigned long rxd;
6690
6691                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6692                         + (i * sizeof(struct tg3_rx_buffer_desc));
6693                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6694                        i,
6695                        readl(rxd + 0x0), readl(rxd + 0x4),
6696                        readl(rxd + 0x8), readl(rxd + 0xc));
6697                 rxd += (4 * sizeof(u32));
6698                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6699                        i,
6700                        readl(rxd + 0x0), readl(rxd + 0x4),
6701                        readl(rxd + 0x8), readl(rxd + 0xc));
6702         }
6703 }
6704 #endif
6705
6706 static struct net_device_stats *tg3_get_stats(struct net_device *);
6707 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6708
6709 static int tg3_close(struct net_device *dev)
6710 {
6711         struct tg3 *tp = netdev_priv(dev);
6712
6713         netif_stop_queue(dev);
6714
6715         del_timer_sync(&tp->timer);
6716
6717         tg3_full_lock(tp, 1);
6718 #if 0
6719         tg3_dump_state(tp);
6720 #endif
6721
6722         tg3_disable_ints(tp);
6723
6724         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6725         tg3_free_rings(tp);
6726         tp->tg3_flags &=
6727                 ~(TG3_FLAG_INIT_COMPLETE |
6728                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6729         netif_carrier_off(tp->dev);
6730
6731         tg3_full_unlock(tp);
6732
6733         free_irq(tp->pdev->irq, dev);
6734         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6735                 pci_disable_msi(tp->pdev);
6736                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6737         }
6738
6739         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6740                sizeof(tp->net_stats_prev));
6741         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6742                sizeof(tp->estats_prev));
6743
6744         tg3_free_consistent(tp);
6745
6746         return 0;
6747 }
6748
6749 static inline unsigned long get_stat64(tg3_stat64_t *val)
6750 {
6751         unsigned long ret;
6752
6753 #if (BITS_PER_LONG == 32)
6754         ret = val->low;
6755 #else
6756         ret = ((u64)val->high << 32) | ((u64)val->low);
6757 #endif
6758         return ret;
6759 }
6760
6761 static unsigned long calc_crc_errors(struct tg3 *tp)
6762 {
6763         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6764
6765         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6766             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6767              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6768                 u32 val;
6769
6770                 spin_lock_bh(&tp->lock);
6771                 if (!tg3_readphy(tp, 0x1e, &val)) {
6772                         tg3_writephy(tp, 0x1e, val | 0x8000);
6773                         tg3_readphy(tp, 0x14, &val);
6774                 } else
6775                         val = 0;
6776                 spin_unlock_bh(&tp->lock);
6777
6778                 tp->phy_crc_errors += val;
6779
6780                 return tp->phy_crc_errors;
6781         }
6782
6783         return get_stat64(&hw_stats->rx_fcs_errors);
6784 }
6785
6786 #define ESTAT_ADD(member) \
6787         estats->member =        old_estats->member + \
6788                                 get_stat64(&hw_stats->member)
6789
6790 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6791 {
6792         struct tg3_ethtool_stats *estats = &tp->estats;
6793         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6794         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6795
6796         if (!hw_stats)
6797                 return old_estats;
6798
6799         ESTAT_ADD(rx_octets);
6800         ESTAT_ADD(rx_fragments);
6801         ESTAT_ADD(rx_ucast_packets);
6802         ESTAT_ADD(rx_mcast_packets);
6803         ESTAT_ADD(rx_bcast_packets);
6804         ESTAT_ADD(rx_fcs_errors);
6805         ESTAT_ADD(rx_align_errors);
6806         ESTAT_ADD(rx_xon_pause_rcvd);
6807         ESTAT_ADD(rx_xoff_pause_rcvd);
6808         ESTAT_ADD(rx_mac_ctrl_rcvd);
6809         ESTAT_ADD(rx_xoff_entered);
6810         ESTAT_ADD(rx_frame_too_long_errors);
6811         ESTAT_ADD(rx_jabbers);
6812         ESTAT_ADD(rx_undersize_packets);
6813         ESTAT_ADD(rx_in_length_errors);
6814         ESTAT_ADD(rx_out_length_errors);
6815         ESTAT_ADD(rx_64_or_less_octet_packets);
6816         ESTAT_ADD(rx_65_to_127_octet_packets);
6817         ESTAT_ADD(rx_128_to_255_octet_packets);
6818         ESTAT_ADD(rx_256_to_511_octet_packets);
6819         ESTAT_ADD(rx_512_to_1023_octet_packets);
6820         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6821         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6822         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6823         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6824         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6825
6826         ESTAT_ADD(tx_octets);
6827         ESTAT_ADD(tx_collisions);
6828         ESTAT_ADD(tx_xon_sent);
6829         ESTAT_ADD(tx_xoff_sent);
6830         ESTAT_ADD(tx_flow_control);
6831         ESTAT_ADD(tx_mac_errors);
6832         ESTAT_ADD(tx_single_collisions);
6833         ESTAT_ADD(tx_mult_collisions);
6834         ESTAT_ADD(tx_deferred);
6835         ESTAT_ADD(tx_excessive_collisions);
6836         ESTAT_ADD(tx_late_collisions);
6837         ESTAT_ADD(tx_collide_2times);
6838         ESTAT_ADD(tx_collide_3times);
6839         ESTAT_ADD(tx_collide_4times);
6840         ESTAT_ADD(tx_collide_5times);
6841         ESTAT_ADD(tx_collide_6times);
6842         ESTAT_ADD(tx_collide_7times);
6843         ESTAT_ADD(tx_collide_8times);
6844         ESTAT_ADD(tx_collide_9times);
6845         ESTAT_ADD(tx_collide_10times);
6846         ESTAT_ADD(tx_collide_11times);
6847         ESTAT_ADD(tx_collide_12times);
6848         ESTAT_ADD(tx_collide_13times);
6849         ESTAT_ADD(tx_collide_14times);
6850         ESTAT_ADD(tx_collide_15times);
6851         ESTAT_ADD(tx_ucast_packets);
6852         ESTAT_ADD(tx_mcast_packets);
6853         ESTAT_ADD(tx_bcast_packets);
6854         ESTAT_ADD(tx_carrier_sense_errors);
6855         ESTAT_ADD(tx_discards);
6856         ESTAT_ADD(tx_errors);
6857
6858         ESTAT_ADD(dma_writeq_full);
6859         ESTAT_ADD(dma_write_prioq_full);
6860         ESTAT_ADD(rxbds_empty);
6861         ESTAT_ADD(rx_discards);
6862         ESTAT_ADD(rx_errors);
6863         ESTAT_ADD(rx_threshold_hit);
6864
6865         ESTAT_ADD(dma_readq_full);
6866         ESTAT_ADD(dma_read_prioq_full);
6867         ESTAT_ADD(tx_comp_queue_full);
6868
6869         ESTAT_ADD(ring_set_send_prod_index);
6870         ESTAT_ADD(ring_status_update);
6871         ESTAT_ADD(nic_irqs);
6872         ESTAT_ADD(nic_avoided_irqs);
6873         ESTAT_ADD(nic_tx_threshold_hit);
6874
6875         return estats;
6876 }
6877
6878 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6879 {
6880         struct tg3 *tp = netdev_priv(dev);
6881         struct net_device_stats *stats = &tp->net_stats;
6882         struct net_device_stats *old_stats = &tp->net_stats_prev;
6883         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6884
6885         if (!hw_stats)
6886                 return old_stats;
6887
6888         stats->rx_packets = old_stats->rx_packets +
6889                 get_stat64(&hw_stats->rx_ucast_packets) +
6890                 get_stat64(&hw_stats->rx_mcast_packets) +
6891                 get_stat64(&hw_stats->rx_bcast_packets);
6892                 
6893         stats->tx_packets = old_stats->tx_packets +
6894                 get_stat64(&hw_stats->tx_ucast_packets) +
6895                 get_stat64(&hw_stats->tx_mcast_packets) +
6896                 get_stat64(&hw_stats->tx_bcast_packets);
6897
6898         stats->rx_bytes = old_stats->rx_bytes +
6899                 get_stat64(&hw_stats->rx_octets);
6900         stats->tx_bytes = old_stats->tx_bytes +
6901                 get_stat64(&hw_stats->tx_octets);
6902
6903         stats->rx_errors = old_stats->rx_errors +
6904                 get_stat64(&hw_stats->rx_errors);
6905         stats->tx_errors = old_stats->tx_errors +
6906                 get_stat64(&hw_stats->tx_errors) +
6907                 get_stat64(&hw_stats->tx_mac_errors) +
6908                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6909                 get_stat64(&hw_stats->tx_discards);
6910
6911         stats->multicast = old_stats->multicast +
6912                 get_stat64(&hw_stats->rx_mcast_packets);
6913         stats->collisions = old_stats->collisions +
6914                 get_stat64(&hw_stats->tx_collisions);
6915
6916         stats->rx_length_errors = old_stats->rx_length_errors +
6917                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6918                 get_stat64(&hw_stats->rx_undersize_packets);
6919
6920         stats->rx_over_errors = old_stats->rx_over_errors +
6921                 get_stat64(&hw_stats->rxbds_empty);
6922         stats->rx_frame_errors = old_stats->rx_frame_errors +
6923                 get_stat64(&hw_stats->rx_align_errors);
6924         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6925                 get_stat64(&hw_stats->tx_discards);
6926         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6927                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6928
6929         stats->rx_crc_errors = old_stats->rx_crc_errors +
6930                 calc_crc_errors(tp);
6931
6932         stats->rx_missed_errors = old_stats->rx_missed_errors +
6933                 get_stat64(&hw_stats->rx_discards);
6934
6935         return stats;
6936 }
6937
6938 static inline u32 calc_crc(unsigned char *buf, int len)
6939 {
6940         u32 reg;
6941         u32 tmp;
6942         int j, k;
6943
6944         reg = 0xffffffff;
6945
6946         for (j = 0; j < len; j++) {
6947                 reg ^= buf[j];
6948
6949                 for (k = 0; k < 8; k++) {
6950                         tmp = reg & 0x01;
6951
6952                         reg >>= 1;
6953
6954                         if (tmp) {
6955                                 reg ^= 0xedb88320;
6956                         }
6957                 }
6958         }
6959
6960         return ~reg;
6961 }
6962
6963 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6964 {
6965         /* accept or reject all multicast frames */
6966         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6967         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6968         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6969         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6970 }
6971
6972 static void __tg3_set_rx_mode(struct net_device *dev)
6973 {
6974         struct tg3 *tp = netdev_priv(dev);
6975         u32 rx_mode;
6976
6977         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6978                                   RX_MODE_KEEP_VLAN_TAG);
6979
6980         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6981          * flag clear.
6982          */
6983 #if TG3_VLAN_TAG_USED
6984         if (!tp->vlgrp &&
6985             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6986                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6987 #else
6988         /* By definition, VLAN is disabled always in this
6989          * case.
6990          */
6991         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6992                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6993 #endif
6994
6995         if (dev->flags & IFF_PROMISC) {
6996                 /* Promiscuous mode. */
6997                 rx_mode |= RX_MODE_PROMISC;
6998         } else if (dev->flags & IFF_ALLMULTI) {
6999                 /* Accept all multicast. */
7000                 tg3_set_multi (tp, 1);
7001         } else if (dev->mc_count < 1) {
7002                 /* Reject all multicast. */
7003                 tg3_set_multi (tp, 0);
7004         } else {
7005                 /* Accept one or more multicast(s). */
7006                 struct dev_mc_list *mclist;
7007                 unsigned int i;
7008                 u32 mc_filter[4] = { 0, };
7009                 u32 regidx;
7010                 u32 bit;
7011                 u32 crc;
7012
7013                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7014                      i++, mclist = mclist->next) {
7015
7016                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7017                         bit = ~crc & 0x7f;
7018                         regidx = (bit & 0x60) >> 5;
7019                         bit &= 0x1f;
7020                         mc_filter[regidx] |= (1 << bit);
7021                 }
7022
7023                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7024                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7025                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7026                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7027         }
7028
7029         if (rx_mode != tp->rx_mode) {
7030                 tp->rx_mode = rx_mode;
7031                 tw32_f(MAC_RX_MODE, rx_mode);
7032                 udelay(10);
7033         }
7034 }
7035
7036 static void tg3_set_rx_mode(struct net_device *dev)
7037 {
7038         struct tg3 *tp = netdev_priv(dev);
7039
7040         tg3_full_lock(tp, 0);
7041         __tg3_set_rx_mode(dev);
7042         tg3_full_unlock(tp);
7043 }
7044
7045 #define TG3_REGDUMP_LEN         (32 * 1024)
7046
7047 static int tg3_get_regs_len(struct net_device *dev)
7048 {
7049         return TG3_REGDUMP_LEN;
7050 }
7051
7052 static void tg3_get_regs(struct net_device *dev,
7053                 struct ethtool_regs *regs, void *_p)
7054 {
7055         u32 *p = _p;
7056         struct tg3 *tp = netdev_priv(dev);
7057         u8 *orig_p = _p;
7058         int i;
7059
7060         regs->version = 0;
7061
7062         memset(p, 0, TG3_REGDUMP_LEN);
7063
7064         tg3_full_lock(tp, 0);
7065
7066 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7067 #define GET_REG32_LOOP(base,len)                \
7068 do {    p = (u32 *)(orig_p + (base));           \
7069         for (i = 0; i < len; i += 4)            \
7070                 __GET_REG32((base) + i);        \
7071 } while (0)
7072 #define GET_REG32_1(reg)                        \
7073 do {    p = (u32 *)(orig_p + (reg));            \
7074         __GET_REG32((reg));                     \
7075 } while (0)
7076
7077         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7078         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7079         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7080         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7081         GET_REG32_1(SNDDATAC_MODE);
7082         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7083         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7084         GET_REG32_1(SNDBDC_MODE);
7085         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7086         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7087         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7088         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7089         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7090         GET_REG32_1(RCVDCC_MODE);
7091         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7092         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7093         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7094         GET_REG32_1(MBFREE_MODE);
7095         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7096         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7097         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7098         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7099         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7100         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
7101         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
7102         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7103         GET_REG32_LOOP(FTQ_RESET, 0x120);
7104         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7105         GET_REG32_1(DMAC_MODE);
7106         GET_REG32_LOOP(GRC_MODE, 0x4c);
7107         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7108                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7109
7110 #undef __GET_REG32
7111 #undef GET_REG32_LOOP
7112 #undef GET_REG32_1
7113
7114         tg3_full_unlock(tp);
7115 }
7116
7117 static int tg3_get_eeprom_len(struct net_device *dev)
7118 {
7119         struct tg3 *tp = netdev_priv(dev);
7120
7121         return tp->nvram_size;
7122 }
7123
7124 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7125
7126 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7127 {
7128         struct tg3 *tp = netdev_priv(dev);
7129         int ret;
7130         u8  *pd;
7131         u32 i, offset, len, val, b_offset, b_count;
7132
7133         offset = eeprom->offset;
7134         len = eeprom->len;
7135         eeprom->len = 0;
7136
7137         eeprom->magic = TG3_EEPROM_MAGIC;
7138
7139         if (offset & 3) {
7140                 /* adjustments to start on required 4 byte boundary */
7141                 b_offset = offset & 3;
7142                 b_count = 4 - b_offset;
7143                 if (b_count > len) {
7144                         /* i.e. offset=1 len=2 */
7145                         b_count = len;
7146                 }
7147                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7148                 if (ret)
7149                         return ret;
7150                 val = cpu_to_le32(val);
7151                 memcpy(data, ((char*)&val) + b_offset, b_count);
7152                 len -= b_count;
7153                 offset += b_count;
7154                 eeprom->len += b_count;
7155         }
7156
7157         /* read bytes upto the last 4 byte boundary */
7158         pd = &data[eeprom->len];
7159         for (i = 0; i < (len - (len & 3)); i += 4) {
7160                 ret = tg3_nvram_read(tp, offset + i, &val);
7161                 if (ret) {
7162                         eeprom->len += i;
7163                         return ret;
7164                 }
7165                 val = cpu_to_le32(val);
7166                 memcpy(pd + i, &val, 4);
7167         }
7168         eeprom->len += i;
7169
7170         if (len & 3) {
7171                 /* read last bytes not ending on 4 byte boundary */
7172                 pd = &data[eeprom->len];
7173                 b_count = len & 3;
7174                 b_offset = offset + len - b_count;
7175                 ret = tg3_nvram_read(tp, b_offset, &val);
7176                 if (ret)
7177                         return ret;
7178                 val = cpu_to_le32(val);
7179                 memcpy(pd, ((char*)&val), b_count);
7180                 eeprom->len += b_count;
7181         }
7182         return 0;
7183 }
7184
7185 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7186
7187 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7188 {
7189         struct tg3 *tp = netdev_priv(dev);
7190         int ret;
7191         u32 offset, len, b_offset, odd_len, start, end;
7192         u8 *buf;
7193
7194         if (eeprom->magic != TG3_EEPROM_MAGIC)
7195                 return -EINVAL;
7196
7197         offset = eeprom->offset;
7198         len = eeprom->len;
7199
7200         if ((b_offset = (offset & 3))) {
7201                 /* adjustments to start on required 4 byte boundary */
7202                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7203                 if (ret)
7204                         return ret;
7205                 start = cpu_to_le32(start);
7206                 len += b_offset;
7207                 offset &= ~3;
7208                 if (len < 4)
7209                         len = 4;
7210         }
7211
7212         odd_len = 0;
7213         if (len & 3) {
7214                 /* adjustments to end on required 4 byte boundary */
7215                 odd_len = 1;
7216                 len = (len + 3) & ~3;
7217                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7218                 if (ret)
7219                         return ret;
7220                 end = cpu_to_le32(end);
7221         }
7222
7223         buf = data;
7224         if (b_offset || odd_len) {
7225                 buf = kmalloc(len, GFP_KERNEL);
7226                 if (buf == 0)
7227                         return -ENOMEM;
7228                 if (b_offset)
7229                         memcpy(buf, &start, 4);
7230                 if (odd_len)
7231                         memcpy(buf+len-4, &end, 4);
7232                 memcpy(buf + b_offset, data, eeprom->len);
7233         }
7234
7235         ret = tg3_nvram_write_block(tp, offset, len, buf);
7236
7237         if (buf != data)
7238                 kfree(buf);
7239
7240         return ret;
7241 }
7242
7243 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7244 {
7245         struct tg3 *tp = netdev_priv(dev);
7246   
7247         cmd->supported = (SUPPORTED_Autoneg);
7248
7249         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7250                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7251                                    SUPPORTED_1000baseT_Full);
7252
7253         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
7254                 cmd->supported |= (SUPPORTED_100baseT_Half |
7255                                   SUPPORTED_100baseT_Full |
7256                                   SUPPORTED_10baseT_Half |
7257                                   SUPPORTED_10baseT_Full |
7258                                   SUPPORTED_MII);
7259         else
7260                 cmd->supported |= SUPPORTED_FIBRE;
7261   
7262         cmd->advertising = tp->link_config.advertising;
7263         if (netif_running(dev)) {
7264                 cmd->speed = tp->link_config.active_speed;
7265                 cmd->duplex = tp->link_config.active_duplex;
7266         }
7267         cmd->port = 0;
7268         cmd->phy_address = PHY_ADDR;
7269         cmd->transceiver = 0;
7270         cmd->autoneg = tp->link_config.autoneg;
7271         cmd->maxtxpkt = 0;
7272         cmd->maxrxpkt = 0;
7273         return 0;
7274 }
7275   
7276 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7277 {
7278         struct tg3 *tp = netdev_priv(dev);
7279   
7280         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 
7281                 /* These are the only valid advertisement bits allowed.  */
7282                 if (cmd->autoneg == AUTONEG_ENABLE &&
7283                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7284                                           ADVERTISED_1000baseT_Full |
7285                                           ADVERTISED_Autoneg |
7286                                           ADVERTISED_FIBRE)))
7287                         return -EINVAL;
7288                 /* Fiber can only do SPEED_1000.  */
7289                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7290                          (cmd->speed != SPEED_1000))
7291                         return -EINVAL;
7292         /* Copper cannot force SPEED_1000.  */
7293         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7294                    (cmd->speed == SPEED_1000))
7295                 return -EINVAL;
7296         else if ((cmd->speed == SPEED_1000) &&
7297                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7298                 return -EINVAL;
7299
7300         tg3_full_lock(tp, 0);
7301
7302         tp->link_config.autoneg = cmd->autoneg;
7303         if (cmd->autoneg == AUTONEG_ENABLE) {
7304                 tp->link_config.advertising = cmd->advertising;
7305                 tp->link_config.speed = SPEED_INVALID;
7306                 tp->link_config.duplex = DUPLEX_INVALID;
7307         } else {
7308                 tp->link_config.advertising = 0;
7309                 tp->link_config.speed = cmd->speed;
7310                 tp->link_config.duplex = cmd->duplex;
7311         }
7312   
7313         if (netif_running(dev))
7314                 tg3_setup_phy(tp, 1);
7315
7316         tg3_full_unlock(tp);
7317   
7318         return 0;
7319 }
7320   
7321 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7322 {
7323         struct tg3 *tp = netdev_priv(dev);
7324   
7325         strcpy(info->driver, DRV_MODULE_NAME);
7326         strcpy(info->version, DRV_MODULE_VERSION);
7327         strcpy(info->bus_info, pci_name(tp->pdev));
7328 }
7329   
7330 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7331 {
7332         struct tg3 *tp = netdev_priv(dev);
7333   
7334         wol->supported = WAKE_MAGIC;
7335         wol->wolopts = 0;
7336         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7337                 wol->wolopts = WAKE_MAGIC;
7338         memset(&wol->sopass, 0, sizeof(wol->sopass));
7339 }
7340   
7341 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7342 {
7343         struct tg3 *tp = netdev_priv(dev);
7344   
7345         if (wol->wolopts & ~WAKE_MAGIC)
7346                 return -EINVAL;
7347         if ((wol->wolopts & WAKE_MAGIC) &&
7348             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7349             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7350                 return -EINVAL;
7351   
7352         spin_lock_bh(&tp->lock);
7353         if (wol->wolopts & WAKE_MAGIC)
7354                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7355         else
7356                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7357         spin_unlock_bh(&tp->lock);
7358   
7359         return 0;
7360 }
7361   
7362 static u32 tg3_get_msglevel(struct net_device *dev)
7363 {
7364         struct tg3 *tp = netdev_priv(dev);
7365         return tp->msg_enable;
7366 }
7367   
7368 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7369 {
7370         struct tg3 *tp = netdev_priv(dev);
7371         tp->msg_enable = value;
7372 }
7373   
7374 #if TG3_TSO_SUPPORT != 0
7375 static int tg3_set_tso(struct net_device *dev, u32 value)
7376 {
7377         struct tg3 *tp = netdev_priv(dev);
7378
7379         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7380                 if (value)
7381                         return -EINVAL;
7382                 return 0;
7383         }
7384         return ethtool_op_set_tso(dev, value);
7385 }
7386 #endif
7387   
7388 static int tg3_nway_reset(struct net_device *dev)
7389 {
7390         struct tg3 *tp = netdev_priv(dev);
7391         u32 bmcr;
7392         int r;
7393   
7394         if (!netif_running(dev))
7395                 return -EAGAIN;
7396
7397         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7398                 return -EINVAL;
7399
7400         spin_lock_bh(&tp->lock);
7401         r = -EINVAL;
7402         tg3_readphy(tp, MII_BMCR, &bmcr);
7403         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7404             ((bmcr & BMCR_ANENABLE) ||
7405              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7406                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7407                                            BMCR_ANENABLE);
7408                 r = 0;
7409         }
7410         spin_unlock_bh(&tp->lock);
7411   
7412         return r;
7413 }
7414   
7415 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7416 {
7417         struct tg3 *tp = netdev_priv(dev);
7418   
7419         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7420         ering->rx_mini_max_pending = 0;
7421         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7422
7423         ering->rx_pending = tp->rx_pending;
7424         ering->rx_mini_pending = 0;
7425         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7426         ering->tx_pending = tp->tx_pending;
7427 }
7428   
7429 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7430 {
7431         struct tg3 *tp = netdev_priv(dev);
7432         int irq_sync = 0;
7433   
7434         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7435             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7436             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7437                 return -EINVAL;
7438   
7439         if (netif_running(dev)) {
7440                 tg3_netif_stop(tp);
7441                 irq_sync = 1;
7442         }
7443
7444         tg3_full_lock(tp, irq_sync);
7445   
7446         tp->rx_pending = ering->rx_pending;
7447
7448         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7449             tp->rx_pending > 63)
7450                 tp->rx_pending = 63;
7451         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7452         tp->tx_pending = ering->tx_pending;
7453
7454         if (netif_running(dev)) {
7455                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7456                 tg3_init_hw(tp);
7457                 tg3_netif_start(tp);
7458         }
7459
7460         tg3_full_unlock(tp);
7461   
7462         return 0;
7463 }
7464   
7465 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7466 {
7467         struct tg3 *tp = netdev_priv(dev);
7468   
7469         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7470         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7471         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7472 }
7473   
7474 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7475 {
7476         struct tg3 *tp = netdev_priv(dev);
7477         int irq_sync = 0;
7478   
7479         if (netif_running(dev)) {
7480                 tg3_netif_stop(tp);
7481                 irq_sync = 1;
7482         }
7483
7484         tg3_full_lock(tp, irq_sync);
7485
7486         if (epause->autoneg)
7487                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7488         else
7489                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7490         if (epause->rx_pause)
7491                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7492         else
7493                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7494         if (epause->tx_pause)
7495                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7496         else
7497                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7498
7499         if (netif_running(dev)) {
7500                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7501                 tg3_init_hw(tp);
7502                 tg3_netif_start(tp);
7503         }
7504
7505         tg3_full_unlock(tp);
7506   
7507         return 0;
7508 }
7509   
7510 static u32 tg3_get_rx_csum(struct net_device *dev)
7511 {
7512         struct tg3 *tp = netdev_priv(dev);
7513         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7514 }
7515   
7516 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7517 {
7518         struct tg3 *tp = netdev_priv(dev);
7519   
7520         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7521                 if (data != 0)
7522                         return -EINVAL;
7523                 return 0;
7524         }
7525   
7526         spin_lock_bh(&tp->lock);
7527         if (data)
7528                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7529         else
7530                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7531         spin_unlock_bh(&tp->lock);
7532   
7533         return 0;
7534 }
7535   
7536 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7537 {
7538         struct tg3 *tp = netdev_priv(dev);
7539   
7540         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7541                 if (data != 0)
7542                         return -EINVAL;
7543                 return 0;
7544         }
7545   
7546         if (data)
7547                 dev->features |= NETIF_F_IP_CSUM;
7548         else
7549                 dev->features &= ~NETIF_F_IP_CSUM;
7550
7551         return 0;
7552 }
7553
7554 static int tg3_get_stats_count (struct net_device *dev)
7555 {
7556         return TG3_NUM_STATS;
7557 }
7558
7559 static int tg3_get_test_count (struct net_device *dev)
7560 {
7561         return TG3_NUM_TEST;
7562 }
7563
7564 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7565 {
7566         switch (stringset) {
7567         case ETH_SS_STATS:
7568                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7569                 break;
7570         case ETH_SS_TEST:
7571                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7572                 break;
7573         default:
7574                 WARN_ON(1);     /* we need a WARN() */
7575                 break;
7576         }
7577 }
7578
7579 static int tg3_phys_id(struct net_device *dev, u32 data)
7580 {
7581         struct tg3 *tp = netdev_priv(dev);
7582         int i;
7583
7584         if (!netif_running(tp->dev))
7585                 return -EAGAIN;
7586
7587         if (data == 0)
7588                 data = 2;
7589
7590         for (i = 0; i < (data * 2); i++) {
7591                 if ((i % 2) == 0)
7592                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7593                                            LED_CTRL_1000MBPS_ON |
7594                                            LED_CTRL_100MBPS_ON |
7595                                            LED_CTRL_10MBPS_ON |
7596                                            LED_CTRL_TRAFFIC_OVERRIDE |
7597                                            LED_CTRL_TRAFFIC_BLINK |
7598                                            LED_CTRL_TRAFFIC_LED);
7599         
7600                 else
7601                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7602                                            LED_CTRL_TRAFFIC_OVERRIDE);
7603
7604                 if (msleep_interruptible(500))
7605                         break;
7606         }
7607         tw32(MAC_LED_CTRL, tp->led_ctrl);
7608         return 0;
7609 }
7610
7611 static void tg3_get_ethtool_stats (struct net_device *dev,
7612                                    struct ethtool_stats *estats, u64 *tmp_stats)
7613 {
7614         struct tg3 *tp = netdev_priv(dev);
7615         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7616 }
7617
7618 #define NVRAM_TEST_SIZE 0x100
7619
7620 static int tg3_test_nvram(struct tg3 *tp)
7621 {
7622         u32 *buf, csum;
7623         int i, j, err = 0;
7624
7625         buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7626         if (buf == NULL)
7627                 return -ENOMEM;
7628
7629         for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7630                 u32 val;
7631
7632                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7633                         break;
7634                 buf[j] = cpu_to_le32(val);
7635         }
7636         if (i < NVRAM_TEST_SIZE)
7637                 goto out;
7638
7639         err = -EIO;
7640         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7641                 goto out;
7642
7643         /* Bootstrap checksum at offset 0x10 */
7644         csum = calc_crc((unsigned char *) buf, 0x10);
7645         if(csum != cpu_to_le32(buf[0x10/4]))
7646                 goto out;
7647
7648         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7649         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7650         if (csum != cpu_to_le32(buf[0xfc/4]))
7651                  goto out;
7652
7653         err = 0;
7654
7655 out:
7656         kfree(buf);
7657         return err;
7658 }
7659
7660 #define TG3_SERDES_TIMEOUT_SEC  2
7661 #define TG3_COPPER_TIMEOUT_SEC  6
7662
7663 static int tg3_test_link(struct tg3 *tp)
7664 {
7665         int i, max;
7666
7667         if (!netif_running(tp->dev))
7668                 return -ENODEV;
7669
7670         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
7671                 max = TG3_SERDES_TIMEOUT_SEC;
7672         else
7673                 max = TG3_COPPER_TIMEOUT_SEC;
7674
7675         for (i = 0; i < max; i++) {
7676                 if (netif_carrier_ok(tp->dev))
7677                         return 0;
7678
7679                 if (msleep_interruptible(1000))
7680                         break;
7681         }
7682
7683         return -EIO;
7684 }
7685
7686 /* Only test the commonly used registers */
7687 static int tg3_test_registers(struct tg3 *tp)
7688 {
7689         int i, is_5705;
7690         u32 offset, read_mask, write_mask, val, save_val, read_val;
7691         static struct {
7692                 u16 offset;
7693                 u16 flags;
7694 #define TG3_FL_5705     0x1
7695 #define TG3_FL_NOT_5705 0x2
7696 #define TG3_FL_NOT_5788 0x4
7697                 u32 read_mask;
7698                 u32 write_mask;
7699         } reg_tbl[] = {
7700                 /* MAC Control Registers */
7701                 { MAC_MODE, TG3_FL_NOT_5705,
7702                         0x00000000, 0x00ef6f8c },
7703                 { MAC_MODE, TG3_FL_5705,
7704                         0x00000000, 0x01ef6b8c },
7705                 { MAC_STATUS, TG3_FL_NOT_5705,
7706                         0x03800107, 0x00000000 },
7707                 { MAC_STATUS, TG3_FL_5705,
7708                         0x03800100, 0x00000000 },
7709                 { MAC_ADDR_0_HIGH, 0x0000,
7710                         0x00000000, 0x0000ffff },
7711                 { MAC_ADDR_0_LOW, 0x0000,
7712                         0x00000000, 0xffffffff },
7713                 { MAC_RX_MTU_SIZE, 0x0000,
7714                         0x00000000, 0x0000ffff },
7715                 { MAC_TX_MODE, 0x0000,
7716                         0x00000000, 0x00000070 },
7717                 { MAC_TX_LENGTHS, 0x0000,
7718                         0x00000000, 0x00003fff },
7719                 { MAC_RX_MODE, TG3_FL_NOT_5705,
7720                         0x00000000, 0x000007fc },
7721                 { MAC_RX_MODE, TG3_FL_5705,
7722                         0x00000000, 0x000007dc },
7723                 { MAC_HASH_REG_0, 0x0000,
7724                         0x00000000, 0xffffffff },
7725                 { MAC_HASH_REG_1, 0x0000,
7726                         0x00000000, 0xffffffff },
7727                 { MAC_HASH_REG_2, 0x0000,
7728                         0x00000000, 0xffffffff },
7729                 { MAC_HASH_REG_3, 0x0000,
7730                         0x00000000, 0xffffffff },
7731
7732                 /* Receive Data and Receive BD Initiator Control Registers. */
7733                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7734                         0x00000000, 0xffffffff },
7735                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7736                         0x00000000, 0xffffffff },
7737                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7738                         0x00000000, 0x00000003 },
7739                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7740                         0x00000000, 0xffffffff },
7741                 { RCVDBDI_STD_BD+0, 0x0000,
7742                         0x00000000, 0xffffffff },
7743                 { RCVDBDI_STD_BD+4, 0x0000,
7744                         0x00000000, 0xffffffff },
7745                 { RCVDBDI_STD_BD+8, 0x0000,
7746                         0x00000000, 0xffff0002 },
7747                 { RCVDBDI_STD_BD+0xc, 0x0000,
7748                         0x00000000, 0xffffffff },
7749         
7750                 /* Receive BD Initiator Control Registers. */
7751                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7752                         0x00000000, 0xffffffff },
7753                 { RCVBDI_STD_THRESH, TG3_FL_5705,
7754                         0x00000000, 0x000003ff },
7755                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7756                         0x00000000, 0xffffffff },
7757         
7758                 /* Host Coalescing Control Registers. */
7759                 { HOSTCC_MODE, TG3_FL_NOT_5705,
7760                         0x00000000, 0x00000004 },
7761                 { HOSTCC_MODE, TG3_FL_5705,
7762                         0x00000000, 0x000000f6 },
7763                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7764                         0x00000000, 0xffffffff },
7765                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7766                         0x00000000, 0x000003ff },
7767                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7768                         0x00000000, 0xffffffff },
7769                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7770                         0x00000000, 0x000003ff },
7771                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7772                         0x00000000, 0xffffffff },
7773                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7774                         0x00000000, 0x000000ff },
7775                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7776                         0x00000000, 0xffffffff },
7777                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7778                         0x00000000, 0x000000ff },
7779                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7780                         0x00000000, 0xffffffff },
7781                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7782                         0x00000000, 0xffffffff },
7783                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7784                         0x00000000, 0xffffffff },
7785                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7786                         0x00000000, 0x000000ff },
7787                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7788                         0x00000000, 0xffffffff },
7789                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7790                         0x00000000, 0x000000ff },
7791                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7792                         0x00000000, 0xffffffff },
7793                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7794                         0x00000000, 0xffffffff },
7795                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7796                         0x00000000, 0xffffffff },
7797                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7798                         0x00000000, 0xffffffff },
7799                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7800                         0x00000000, 0xffffffff },
7801                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7802                         0xffffffff, 0x00000000 },
7803                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7804                         0xffffffff, 0x00000000 },
7805
7806                 /* Buffer Manager Control Registers. */
7807                 { BUFMGR_MB_POOL_ADDR, 0x0000,
7808                         0x00000000, 0x007fff80 },
7809                 { BUFMGR_MB_POOL_SIZE, 0x0000,
7810                         0x00000000, 0x007fffff },
7811                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7812                         0x00000000, 0x0000003f },
7813                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7814                         0x00000000, 0x000001ff },
7815                 { BUFMGR_MB_HIGH_WATER, 0x0000,
7816                         0x00000000, 0x000001ff },
7817                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7818                         0xffffffff, 0x00000000 },
7819                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7820                         0xffffffff, 0x00000000 },
7821         
7822                 /* Mailbox Registers */
7823                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7824                         0x00000000, 0x000001ff },
7825                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7826                         0x00000000, 0x000001ff },
7827                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7828                         0x00000000, 0x000007ff },
7829                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7830                         0x00000000, 0x000001ff },
7831
7832                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7833         };
7834
7835         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7836                 is_5705 = 1;
7837         else
7838                 is_5705 = 0;
7839
7840         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
7841                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
7842                         continue;
7843
7844                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
7845                         continue;
7846
7847                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7848                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
7849                         continue;
7850
7851                 offset = (u32) reg_tbl[i].offset;
7852                 read_mask = reg_tbl[i].read_mask;
7853                 write_mask = reg_tbl[i].write_mask;
7854
7855                 /* Save the original register content */
7856                 save_val = tr32(offset);
7857
7858                 /* Determine the read-only value. */
7859                 read_val = save_val & read_mask;
7860
7861                 /* Write zero to the register, then make sure the read-only bits
7862                  * are not changed and the read/write bits are all zeros.
7863                  */
7864                 tw32(offset, 0);
7865
7866                 val = tr32(offset);
7867
7868                 /* Test the read-only and read/write bits. */
7869                 if (((val & read_mask) != read_val) || (val & write_mask))
7870                         goto out;
7871
7872                 /* Write ones to all the bits defined by RdMask and WrMask, then
7873                  * make sure the read-only bits are not changed and the
7874                  * read/write bits are all ones.
7875                  */
7876                 tw32(offset, read_mask | write_mask);
7877
7878                 val = tr32(offset);
7879
7880                 /* Test the read-only bits. */
7881                 if ((val & read_mask) != read_val)
7882                         goto out;
7883
7884                 /* Test the read/write bits. */
7885                 if ((val & write_mask) != write_mask)
7886                         goto out;
7887
7888                 tw32(offset, save_val);
7889         }
7890
7891         return 0;
7892
7893 out:
7894         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
7895         tw32(offset, save_val);
7896         return -EIO;
7897 }
7898
7899 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
7900 {
7901         static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7902         int i;
7903         u32 j;
7904
7905         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
7906                 for (j = 0; j < len; j += 4) {
7907                         u32 val;
7908
7909                         tg3_write_mem(tp, offset + j, test_pattern[i]);
7910                         tg3_read_mem(tp, offset + j, &val);
7911                         if (val != test_pattern[i])
7912                                 return -EIO;
7913                 }
7914         }
7915         return 0;
7916 }
7917
7918 static int tg3_test_memory(struct tg3 *tp)
7919 {
7920         static struct mem_entry {
7921                 u32 offset;
7922                 u32 len;
7923         } mem_tbl_570x[] = {
7924                 { 0x00000000, 0x01000},
7925                 { 0x00002000, 0x1c000},
7926                 { 0xffffffff, 0x00000}
7927         }, mem_tbl_5705[] = {
7928                 { 0x00000100, 0x0000c},
7929                 { 0x00000200, 0x00008},
7930                 { 0x00000b50, 0x00400},
7931                 { 0x00004000, 0x00800},
7932                 { 0x00006000, 0x01000},
7933                 { 0x00008000, 0x02000},
7934                 { 0x00010000, 0x0e000},
7935                 { 0xffffffff, 0x00000}
7936         };
7937         struct mem_entry *mem_tbl;
7938         int err = 0;
7939         int i;
7940
7941         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7942                 mem_tbl = mem_tbl_5705;
7943         else
7944                 mem_tbl = mem_tbl_570x;
7945
7946         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
7947                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
7948                     mem_tbl[i].len)) != 0)
7949                         break;
7950         }
7951         
7952         return err;
7953 }
7954
7955 #define TG3_MAC_LOOPBACK        0
7956 #define TG3_PHY_LOOPBACK        1
7957
7958 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
7959 {
7960         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
7961         u32 desc_idx;
7962         struct sk_buff *skb, *rx_skb;
7963         u8 *tx_data;
7964         dma_addr_t map;
7965         int num_pkts, tx_len, rx_len, i, err;
7966         struct tg3_rx_buffer_desc *desc;
7967
7968         if (loopback_mode == TG3_MAC_LOOPBACK) {
7969                 /* HW errata - mac loopback fails in some cases on 5780.
7970                  * Normal traffic and PHY loopback are not affected by
7971                  * errata.
7972                  */
7973                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
7974                         return 0;
7975
7976                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7977                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
7978                            MAC_MODE_PORT_MODE_GMII;
7979                 tw32(MAC_MODE, mac_mode);
7980         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
7981                 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
7982                                            BMCR_SPEED1000);
7983                 udelay(40);
7984                 /* reset to prevent losing 1st rx packet intermittently */
7985                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7986                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7987                         udelay(10);
7988                         tw32_f(MAC_RX_MODE, tp->rx_mode);
7989                 }
7990                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7991                            MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
7992                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
7993                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7994                 tw32(MAC_MODE, mac_mode);
7995         }
7996         else
7997                 return -EINVAL;
7998
7999         err = -EIO;
8000
8001         tx_len = 1514;
8002         skb = dev_alloc_skb(tx_len);
8003         tx_data = skb_put(skb, tx_len);
8004         memcpy(tx_data, tp->dev->dev_addr, 6);
8005         memset(tx_data + 6, 0x0, 8);
8006
8007         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8008
8009         for (i = 14; i < tx_len; i++)
8010                 tx_data[i] = (u8) (i & 0xff);
8011
8012         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8013
8014         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8015              HOSTCC_MODE_NOW);
8016
8017         udelay(10);
8018
8019         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8020
8021         num_pkts = 0;
8022
8023         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8024
8025         tp->tx_prod++;
8026         num_pkts++;
8027
8028         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8029                      tp->tx_prod);
8030         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8031
8032         udelay(10);
8033
8034         for (i = 0; i < 10; i++) {
8035                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8036                        HOSTCC_MODE_NOW);
8037
8038                 udelay(10);
8039
8040                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8041                 rx_idx = tp->hw_status->idx[0].rx_producer;
8042                 if ((tx_idx == tp->tx_prod) &&
8043                     (rx_idx == (rx_start_idx + num_pkts)))
8044                         break;
8045         }
8046
8047         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8048         dev_kfree_skb(skb);
8049
8050         if (tx_idx != tp->tx_prod)
8051                 goto out;
8052
8053         if (rx_idx != rx_start_idx + num_pkts)
8054                 goto out;
8055
8056         desc = &tp->rx_rcb[rx_start_idx];
8057         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8058         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8059         if (opaque_key != RXD_OPAQUE_RING_STD)
8060                 goto out;
8061
8062         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8063             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8064                 goto out;
8065
8066         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8067         if (rx_len != tx_len)
8068                 goto out;
8069
8070         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8071
8072         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8073         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8074
8075         for (i = 14; i < tx_len; i++) {
8076                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8077                         goto out;
8078         }
8079         err = 0;
8080         
8081         /* tg3_free_rings will unmap and free the rx_skb */
8082 out:
8083         return err;
8084 }
8085
8086 #define TG3_MAC_LOOPBACK_FAILED         1
8087 #define TG3_PHY_LOOPBACK_FAILED         2
8088 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8089                                          TG3_PHY_LOOPBACK_FAILED)
8090
8091 static int tg3_test_loopback(struct tg3 *tp)
8092 {
8093         int err = 0;
8094
8095         if (!netif_running(tp->dev))
8096                 return TG3_LOOPBACK_FAILED;
8097
8098         tg3_reset_hw(tp);
8099
8100         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8101                 err |= TG3_MAC_LOOPBACK_FAILED;
8102         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8103                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8104                         err |= TG3_PHY_LOOPBACK_FAILED;
8105         }
8106
8107         return err;
8108 }
8109
8110 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8111                           u64 *data)
8112 {
8113         struct tg3 *tp = netdev_priv(dev);
8114
8115         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8116
8117         if (tg3_test_nvram(tp) != 0) {
8118                 etest->flags |= ETH_TEST_FL_FAILED;
8119                 data[0] = 1;
8120         }
8121         if (tg3_test_link(tp) != 0) {
8122                 etest->flags |= ETH_TEST_FL_FAILED;
8123                 data[1] = 1;
8124         }
8125         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8126                 int irq_sync = 0;
8127
8128                 if (netif_running(dev)) {
8129                         tg3_netif_stop(tp);
8130                         irq_sync = 1;
8131                 }
8132
8133                 tg3_full_lock(tp, irq_sync);
8134
8135                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8136                 tg3_nvram_lock(tp);
8137                 tg3_halt_cpu(tp, RX_CPU_BASE);
8138                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8139                         tg3_halt_cpu(tp, TX_CPU_BASE);
8140                 tg3_nvram_unlock(tp);
8141
8142                 if (tg3_test_registers(tp) != 0) {
8143                         etest->flags |= ETH_TEST_FL_FAILED;
8144                         data[2] = 1;
8145                 }
8146                 if (tg3_test_memory(tp) != 0) {
8147                         etest->flags |= ETH_TEST_FL_FAILED;
8148                         data[3] = 1;
8149                 }
8150                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8151                         etest->flags |= ETH_TEST_FL_FAILED;
8152
8153                 tg3_full_unlock(tp);
8154
8155                 if (tg3_test_interrupt(tp) != 0) {
8156                         etest->flags |= ETH_TEST_FL_FAILED;
8157                         data[5] = 1;
8158                 }
8159
8160                 tg3_full_lock(tp, 0);
8161
8162                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8163                 if (netif_running(dev)) {
8164                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8165                         tg3_init_hw(tp);
8166                         tg3_netif_start(tp);
8167                 }
8168
8169                 tg3_full_unlock(tp);
8170         }
8171 }
8172
8173 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8174 {
8175         struct mii_ioctl_data *data = if_mii(ifr);
8176         struct tg3 *tp = netdev_priv(dev);
8177         int err;
8178
8179         switch(cmd) {
8180         case SIOCGMIIPHY:
8181                 data->phy_id = PHY_ADDR;
8182
8183                 /* fallthru */
8184         case SIOCGMIIREG: {
8185                 u32 mii_regval;
8186
8187                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8188                         break;                  /* We have no PHY */
8189
8190                 spin_lock_bh(&tp->lock);
8191                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8192                 spin_unlock_bh(&tp->lock);
8193
8194                 data->val_out = mii_regval;
8195
8196                 return err;
8197         }
8198
8199         case SIOCSMIIREG:
8200                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8201                         break;                  /* We have no PHY */
8202
8203                 if (!capable(CAP_NET_ADMIN))
8204                         return -EPERM;
8205
8206                 spin_lock_bh(&tp->lock);
8207                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8208                 spin_unlock_bh(&tp->lock);
8209
8210                 return err;
8211
8212         default:
8213                 /* do nothing */
8214                 break;
8215         }
8216         return -EOPNOTSUPP;
8217 }
8218
8219 #if TG3_VLAN_TAG_USED
8220 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8221 {
8222         struct tg3 *tp = netdev_priv(dev);
8223
8224         tg3_full_lock(tp, 0);
8225
8226         tp->vlgrp = grp;
8227
8228         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8229         __tg3_set_rx_mode(dev);
8230
8231         tg3_full_unlock(tp);
8232 }
8233
8234 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8235 {
8236         struct tg3 *tp = netdev_priv(dev);
8237
8238         tg3_full_lock(tp, 0);
8239         if (tp->vlgrp)
8240                 tp->vlgrp->vlan_devices[vid] = NULL;
8241         tg3_full_unlock(tp);
8242 }
8243 #endif
8244
8245 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8246 {
8247         struct tg3 *tp = netdev_priv(dev);
8248
8249         memcpy(ec, &tp->coal, sizeof(*ec));
8250         return 0;
8251 }
8252
8253 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8254 {
8255         struct tg3 *tp = netdev_priv(dev);
8256         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8257         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8258
8259         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8260                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8261                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8262                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8263                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8264         }
8265
8266         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8267             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8268             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8269             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8270             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8271             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8272             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8273             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8274             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8275             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8276                 return -EINVAL;
8277
8278         /* No rx interrupts will be generated if both are zero */
8279         if ((ec->rx_coalesce_usecs == 0) &&
8280             (ec->rx_max_coalesced_frames == 0))
8281                 return -EINVAL;
8282
8283         /* No tx interrupts will be generated if both are zero */
8284         if ((ec->tx_coalesce_usecs == 0) &&
8285             (ec->tx_max_coalesced_frames == 0))
8286                 return -EINVAL;
8287
8288         /* Only copy relevant parameters, ignore all others. */
8289         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8290         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8291         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8292         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8293         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8294         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8295         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8296         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8297         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8298
8299         if (netif_running(dev)) {
8300                 tg3_full_lock(tp, 0);
8301                 __tg3_set_coalesce(tp, &tp->coal);
8302                 tg3_full_unlock(tp);
8303         }
8304         return 0;
8305 }
8306
8307 static struct ethtool_ops tg3_ethtool_ops = {
8308         .get_settings           = tg3_get_settings,
8309         .set_settings           = tg3_set_settings,
8310         .get_drvinfo            = tg3_get_drvinfo,
8311         .get_regs_len           = tg3_get_regs_len,
8312         .get_regs               = tg3_get_regs,
8313         .get_wol                = tg3_get_wol,
8314         .set_wol                = tg3_set_wol,
8315         .get_msglevel           = tg3_get_msglevel,
8316         .set_msglevel           = tg3_set_msglevel,
8317         .nway_reset             = tg3_nway_reset,
8318         .get_link               = ethtool_op_get_link,
8319         .get_eeprom_len         = tg3_get_eeprom_len,
8320         .get_eeprom             = tg3_get_eeprom,
8321         .set_eeprom             = tg3_set_eeprom,
8322         .get_ringparam          = tg3_get_ringparam,
8323         .set_ringparam          = tg3_set_ringparam,
8324         .get_pauseparam         = tg3_get_pauseparam,
8325         .set_pauseparam         = tg3_set_pauseparam,
8326         .get_rx_csum            = tg3_get_rx_csum,
8327         .set_rx_csum            = tg3_set_rx_csum,
8328         .get_tx_csum            = ethtool_op_get_tx_csum,
8329         .set_tx_csum            = tg3_set_tx_csum,
8330         .get_sg                 = ethtool_op_get_sg,
8331         .set_sg                 = ethtool_op_set_sg,
8332 #if TG3_TSO_SUPPORT != 0
8333         .get_tso                = ethtool_op_get_tso,
8334         .set_tso                = tg3_set_tso,
8335 #endif
8336         .self_test_count        = tg3_get_test_count,
8337         .self_test              = tg3_self_test,
8338         .get_strings            = tg3_get_strings,
8339         .phys_id                = tg3_phys_id,
8340         .get_stats_count        = tg3_get_stats_count,
8341         .get_ethtool_stats      = tg3_get_ethtool_stats,
8342         .get_coalesce           = tg3_get_coalesce,
8343         .set_coalesce           = tg3_set_coalesce,
8344         .get_perm_addr          = ethtool_op_get_perm_addr,
8345 };
8346
8347 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8348 {
8349         u32 cursize, val;
8350
8351         tp->nvram_size = EEPROM_CHIP_SIZE;
8352
8353         if (tg3_nvram_read(tp, 0, &val) != 0)
8354                 return;
8355
8356         if (swab32(val) != TG3_EEPROM_MAGIC)
8357                 return;
8358
8359         /*
8360          * Size the chip by reading offsets at increasing powers of two.
8361          * When we encounter our validation signature, we know the addressing
8362          * has wrapped around, and thus have our chip size.
8363          */
8364         cursize = 0x800;
8365
8366         while (cursize < tp->nvram_size) {
8367                 if (tg3_nvram_read(tp, cursize, &val) != 0)
8368                         return;
8369
8370                 if (swab32(val) == TG3_EEPROM_MAGIC)
8371                         break;
8372
8373                 cursize <<= 1;
8374         }
8375
8376         tp->nvram_size = cursize;
8377 }
8378                 
8379 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8380 {
8381         u32 val;
8382
8383         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8384                 if (val != 0) {
8385                         tp->nvram_size = (val >> 16) * 1024;
8386                         return;
8387                 }
8388         }
8389         tp->nvram_size = 0x20000;
8390 }
8391
8392 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8393 {
8394         u32 nvcfg1;
8395
8396         nvcfg1 = tr32(NVRAM_CFG1);
8397         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8398                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8399         }
8400         else {
8401                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8402                 tw32(NVRAM_CFG1, nvcfg1);
8403         }
8404
8405         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8406             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8407                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8408                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8409                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8410                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8411                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8412                                 break;
8413                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8414                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8415                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8416                                 break;
8417                         case FLASH_VENDOR_ATMEL_EEPROM:
8418                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8419                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8420                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8421                                 break;
8422                         case FLASH_VENDOR_ST:
8423                                 tp->nvram_jedecnum = JEDEC_ST;
8424                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8425                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8426                                 break;
8427                         case FLASH_VENDOR_SAIFUN:
8428                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
8429                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8430                                 break;
8431                         case FLASH_VENDOR_SST_SMALL:
8432                         case FLASH_VENDOR_SST_LARGE:
8433                                 tp->nvram_jedecnum = JEDEC_SST;
8434                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8435                                 break;
8436                 }
8437         }
8438         else {
8439                 tp->nvram_jedecnum = JEDEC_ATMEL;
8440                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8441                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8442         }
8443 }
8444
8445 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8446 {
8447         u32 nvcfg1;
8448
8449         nvcfg1 = tr32(NVRAM_CFG1);
8450
8451         /* NVRAM protection for TPM */
8452         if (nvcfg1 & (1 << 27))
8453                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8454
8455         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8456                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8457                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8458                         tp->nvram_jedecnum = JEDEC_ATMEL;
8459                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8460                         break;
8461                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8462                         tp->nvram_jedecnum = JEDEC_ATMEL;
8463                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8464                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8465                         break;
8466                 case FLASH_5752VENDOR_ST_M45PE10:
8467                 case FLASH_5752VENDOR_ST_M45PE20:
8468                 case FLASH_5752VENDOR_ST_M45PE40:
8469                         tp->nvram_jedecnum = JEDEC_ST;
8470                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8471                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8472                         break;
8473         }
8474
8475         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8476                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8477                         case FLASH_5752PAGE_SIZE_256:
8478                                 tp->nvram_pagesize = 256;
8479                                 break;
8480                         case FLASH_5752PAGE_SIZE_512:
8481                                 tp->nvram_pagesize = 512;
8482                                 break;
8483                         case FLASH_5752PAGE_SIZE_1K:
8484                                 tp->nvram_pagesize = 1024;
8485                                 break;
8486                         case FLASH_5752PAGE_SIZE_2K:
8487                                 tp->nvram_pagesize = 2048;
8488                                 break;
8489                         case FLASH_5752PAGE_SIZE_4K:
8490                                 tp->nvram_pagesize = 4096;
8491                                 break;
8492                         case FLASH_5752PAGE_SIZE_264:
8493                                 tp->nvram_pagesize = 264;
8494                                 break;
8495                 }
8496         }
8497         else {
8498                 /* For eeprom, set pagesize to maximum eeprom size */
8499                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8500
8501                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8502                 tw32(NVRAM_CFG1, nvcfg1);
8503         }
8504 }
8505
8506 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
8507 static void __devinit tg3_nvram_init(struct tg3 *tp)
8508 {
8509         int j;
8510
8511         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8512                 return;
8513
8514         tw32_f(GRC_EEPROM_ADDR,
8515              (EEPROM_ADDR_FSM_RESET |
8516               (EEPROM_DEFAULT_CLOCK_PERIOD <<
8517                EEPROM_ADDR_CLKPERD_SHIFT)));
8518
8519         /* XXX schedule_timeout() ... */
8520         for (j = 0; j < 100; j++)
8521                 udelay(10);
8522
8523         /* Enable seeprom accesses. */
8524         tw32_f(GRC_LOCAL_CTRL,
8525              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8526         udelay(100);
8527
8528         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8529             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8530                 tp->tg3_flags |= TG3_FLAG_NVRAM;
8531
8532                 tg3_enable_nvram_access(tp);
8533
8534                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8535                         tg3_get_5752_nvram_info(tp);
8536                 else
8537                         tg3_get_nvram_info(tp);
8538
8539                 tg3_get_nvram_size(tp);
8540
8541                 tg3_disable_nvram_access(tp);
8542
8543         } else {
8544                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
8545
8546                 tg3_get_eeprom_size(tp);
8547         }
8548 }
8549
8550 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
8551                                         u32 offset, u32 *val)
8552 {
8553         u32 tmp;
8554         int i;
8555
8556         if (offset > EEPROM_ADDR_ADDR_MASK ||
8557             (offset % 4) != 0)
8558                 return -EINVAL;
8559
8560         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
8561                                         EEPROM_ADDR_DEVID_MASK |
8562                                         EEPROM_ADDR_READ);
8563         tw32(GRC_EEPROM_ADDR,
8564              tmp |
8565              (0 << EEPROM_ADDR_DEVID_SHIFT) |
8566              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
8567               EEPROM_ADDR_ADDR_MASK) |
8568              EEPROM_ADDR_READ | EEPROM_ADDR_START);
8569
8570         for (i = 0; i < 10000; i++) {
8571                 tmp = tr32(GRC_EEPROM_ADDR);
8572
8573                 if (tmp & EEPROM_ADDR_COMPLETE)
8574                         break;
8575                 udelay(100);
8576         }
8577         if (!(tmp & EEPROM_ADDR_COMPLETE))
8578                 return -EBUSY;
8579
8580         *val = tr32(GRC_EEPROM_DATA);
8581         return 0;
8582 }
8583
8584 #define NVRAM_CMD_TIMEOUT 10000
8585
8586 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
8587 {
8588         int i;
8589
8590         tw32(NVRAM_CMD, nvram_cmd);
8591         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
8592                 udelay(10);
8593                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
8594                         udelay(10);
8595                         break;
8596                 }
8597         }
8598         if (i == NVRAM_CMD_TIMEOUT) {
8599                 return -EBUSY;
8600         }
8601         return 0;
8602 }
8603
8604 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8605 {
8606         int ret;
8607
8608         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8609                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
8610                 return -EINVAL;
8611         }
8612
8613         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
8614                 return tg3_nvram_read_using_eeprom(tp, offset, val);
8615
8616         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
8617                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8618                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8619
8620                 offset = ((offset / tp->nvram_pagesize) <<
8621                           ATMEL_AT45DB0X1B_PAGE_POS) +
8622                         (offset % tp->nvram_pagesize);
8623         }
8624
8625         if (offset > NVRAM_ADDR_MSK)
8626                 return -EINVAL;
8627
8628         tg3_nvram_lock(tp);
8629
8630         tg3_enable_nvram_access(tp);
8631
8632         tw32(NVRAM_ADDR, offset);
8633         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
8634                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
8635
8636         if (ret == 0)
8637                 *val = swab32(tr32(NVRAM_RDDATA));
8638
8639         tg3_nvram_unlock(tp);
8640
8641         tg3_disable_nvram_access(tp);
8642
8643         return ret;
8644 }
8645
8646 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
8647                                     u32 offset, u32 len, u8 *buf)
8648 {
8649         int i, j, rc = 0;
8650         u32 val;
8651
8652         for (i = 0; i < len; i += 4) {
8653                 u32 addr, data;
8654
8655                 addr = offset + i;
8656
8657                 memcpy(&data, buf + i, 4);
8658
8659                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
8660
8661                 val = tr32(GRC_EEPROM_ADDR);
8662                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
8663
8664                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
8665                         EEPROM_ADDR_READ);
8666                 tw32(GRC_EEPROM_ADDR, val |
8667                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
8668                         (addr & EEPROM_ADDR_ADDR_MASK) |
8669                         EEPROM_ADDR_START |
8670                         EEPROM_ADDR_WRITE);
8671                 
8672                 for (j = 0; j < 10000; j++) {
8673                         val = tr32(GRC_EEPROM_ADDR);
8674
8675                         if (val & EEPROM_ADDR_COMPLETE)
8676                                 break;
8677                         udelay(100);
8678                 }
8679                 if (!(val & EEPROM_ADDR_COMPLETE)) {
8680                         rc = -EBUSY;
8681                         break;
8682                 }
8683         }
8684
8685         return rc;
8686 }
8687
8688 /* offset and length are dword aligned */
8689 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8690                 u8 *buf)
8691 {
8692         int ret = 0;
8693         u32 pagesize = tp->nvram_pagesize;
8694         u32 pagemask = pagesize - 1;
8695         u32 nvram_cmd;
8696         u8 *tmp;
8697
8698         tmp = kmalloc(pagesize, GFP_KERNEL);
8699         if (tmp == NULL)
8700                 return -ENOMEM;
8701
8702         while (len) {
8703                 int j;
8704                 u32 phy_addr, page_off, size;
8705
8706                 phy_addr = offset & ~pagemask;
8707         
8708                 for (j = 0; j < pagesize; j += 4) {
8709                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
8710                                                 (u32 *) (tmp + j))))
8711                                 break;
8712                 }
8713                 if (ret)
8714                         break;
8715
8716                 page_off = offset & pagemask;
8717                 size = pagesize;
8718                 if (len < size)
8719                         size = len;
8720
8721                 len -= size;
8722
8723                 memcpy(tmp + page_off, buf, size);
8724
8725                 offset = offset + (pagesize - page_off);
8726
8727                 tg3_enable_nvram_access(tp);
8728
8729                 /*
8730                  * Before we can erase the flash page, we need
8731                  * to issue a special "write enable" command.
8732                  */
8733                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8734
8735                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8736                         break;
8737
8738                 /* Erase the target page */
8739                 tw32(NVRAM_ADDR, phy_addr);
8740
8741                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
8742                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
8743
8744                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8745                         break;
8746
8747                 /* Issue another write enable to start the write. */
8748                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8749
8750                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8751                         break;
8752
8753                 for (j = 0; j < pagesize; j += 4) {
8754                         u32 data;
8755
8756                         data = *((u32 *) (tmp + j));
8757                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
8758
8759                         tw32(NVRAM_ADDR, phy_addr + j);
8760
8761                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
8762                                 NVRAM_CMD_WR;
8763
8764                         if (j == 0)
8765                                 nvram_cmd |= NVRAM_CMD_FIRST;
8766                         else if (j == (pagesize - 4))
8767                                 nvram_cmd |= NVRAM_CMD_LAST;
8768
8769                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8770                                 break;
8771                 }
8772                 if (ret)
8773                         break;
8774         }
8775
8776         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8777         tg3_nvram_exec_cmd(tp, nvram_cmd);
8778
8779         kfree(tmp);
8780
8781         return ret;
8782 }
8783
8784 /* offset and length are dword aligned */
8785 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
8786                 u8 *buf)
8787 {
8788         int i, ret = 0;
8789
8790         for (i = 0; i < len; i += 4, offset += 4) {
8791                 u32 data, page_off, phy_addr, nvram_cmd;
8792
8793                 memcpy(&data, buf + i, 4);
8794                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8795
8796                 page_off = offset % tp->nvram_pagesize;
8797
8798                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8799                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8800
8801                         phy_addr = ((offset / tp->nvram_pagesize) <<
8802                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
8803                 }
8804                 else {
8805                         phy_addr = offset;
8806                 }
8807
8808                 tw32(NVRAM_ADDR, phy_addr);
8809
8810                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
8811
8812                 if ((page_off == 0) || (i == 0))
8813                         nvram_cmd |= NVRAM_CMD_FIRST;
8814                 else if (page_off == (tp->nvram_pagesize - 4))
8815                         nvram_cmd |= NVRAM_CMD_LAST;
8816
8817                 if (i == (len - 4))
8818                         nvram_cmd |= NVRAM_CMD_LAST;
8819
8820                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
8821                     (tp->nvram_jedecnum == JEDEC_ST) &&
8822                     (nvram_cmd & NVRAM_CMD_FIRST)) {
8823
8824                         if ((ret = tg3_nvram_exec_cmd(tp,
8825                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
8826                                 NVRAM_CMD_DONE)))
8827
8828                                 break;
8829                 }
8830                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8831                         /* We always do complete word writes to eeprom. */
8832                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
8833                 }
8834
8835                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8836                         break;
8837         }
8838         return ret;
8839 }
8840
8841 /* offset and length are dword aligned */
8842 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
8843 {
8844         int ret;
8845
8846         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8847                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
8848                 return -EINVAL;
8849         }
8850
8851         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8852                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
8853                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
8854                 udelay(40);
8855         }
8856
8857         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
8858                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
8859         }
8860         else {
8861                 u32 grc_mode;
8862
8863                 tg3_nvram_lock(tp);
8864
8865                 tg3_enable_nvram_access(tp);
8866                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
8867                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
8868                         tw32(NVRAM_WRITE1, 0x406);
8869
8870                 grc_mode = tr32(GRC_MODE);
8871                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
8872
8873                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
8874                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8875
8876                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
8877                                 buf);
8878                 }
8879                 else {
8880                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
8881                                 buf);
8882                 }
8883
8884                 grc_mode = tr32(GRC_MODE);
8885                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
8886
8887                 tg3_disable_nvram_access(tp);
8888                 tg3_nvram_unlock(tp);
8889         }
8890
8891         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8892                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8893                 udelay(40);
8894         }
8895
8896         return ret;
8897 }
8898
8899 struct subsys_tbl_ent {
8900         u16 subsys_vendor, subsys_devid;
8901         u32 phy_id;
8902 };
8903
8904 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
8905         /* Broadcom boards. */
8906         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
8907         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
8908         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
8909         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
8910         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
8911         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
8912         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
8913         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
8914         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
8915         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
8916         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
8917
8918         /* 3com boards. */
8919         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
8920         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
8921         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
8922         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
8923         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
8924
8925         /* DELL boards. */
8926         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
8927         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
8928         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
8929         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
8930
8931         /* Compaq boards. */
8932         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
8933         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
8934         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
8935         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
8936         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
8937
8938         /* IBM boards. */
8939         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
8940 };
8941
8942 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
8943 {
8944         int i;
8945
8946         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
8947                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
8948                      tp->pdev->subsystem_vendor) &&
8949                     (subsys_id_to_phy_id[i].subsys_devid ==
8950                      tp->pdev->subsystem_device))
8951                         return &subsys_id_to_phy_id[i];
8952         }
8953         return NULL;
8954 }
8955
8956 /* Since this function may be called in D3-hot power state during
8957  * tg3_init_one(), only config cycles are allowed.
8958  */
8959 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
8960 {
8961         u32 val;
8962
8963         /* Make sure register accesses (indirect or otherwise)
8964          * will function correctly.
8965          */
8966         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8967                                tp->misc_host_ctrl);
8968
8969         tp->phy_id = PHY_ID_INVALID;
8970         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8971
8972         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8973         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8974                 u32 nic_cfg, led_cfg;
8975                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
8976                 int eeprom_phy_serdes = 0;
8977
8978                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8979                 tp->nic_sram_data_cfg = nic_cfg;
8980
8981                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
8982                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
8983                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8984                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8985                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
8986                     (ver > 0) && (ver < 0x100))
8987                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
8988
8989                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
8990                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
8991                         eeprom_phy_serdes = 1;
8992
8993                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
8994                 if (nic_phy_id != 0) {
8995                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
8996                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
8997
8998                         eeprom_phy_id  = (id1 >> 16) << 10;
8999                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9000                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9001                 } else
9002                         eeprom_phy_id = 0;
9003
9004                 tp->phy_id = eeprom_phy_id;
9005                 if (eeprom_phy_serdes) {
9006                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9007                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9008                         else
9009                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9010                 }
9011
9012                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9013                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9014                                     SHASTA_EXT_LED_MODE_MASK);
9015                 else
9016                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9017
9018                 switch (led_cfg) {
9019                 default:
9020                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9021                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9022                         break;
9023
9024                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9025                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9026                         break;
9027
9028                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9029                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9030
9031                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9032                          * read on some older 5700/5701 bootcode.
9033                          */
9034                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9035                             ASIC_REV_5700 ||
9036                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9037                             ASIC_REV_5701)
9038                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9039
9040                         break;
9041
9042                 case SHASTA_EXT_LED_SHARED:
9043                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9044                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9045                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9046                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9047                                                  LED_CTRL_MODE_PHY_2);
9048                         break;
9049
9050                 case SHASTA_EXT_LED_MAC:
9051                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9052                         break;
9053
9054                 case SHASTA_EXT_LED_COMBO:
9055                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9056                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9057                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9058                                                  LED_CTRL_MODE_PHY_2);
9059                         break;
9060
9061                 };
9062
9063                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9064                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9065                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9066                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9067
9068                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9069                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9070                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
9071                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9072
9073                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9074                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9075                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9076                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9077                 }
9078                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9079                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9080
9081                 if (cfg2 & (1 << 17))
9082                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9083
9084                 /* serdes signal pre-emphasis in register 0x590 set by */
9085                 /* bootcode if bit 18 is set */
9086                 if (cfg2 & (1 << 18))
9087                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9088         }
9089 }
9090
9091 static int __devinit tg3_phy_probe(struct tg3 *tp)
9092 {
9093         u32 hw_phy_id_1, hw_phy_id_2;
9094         u32 hw_phy_id, hw_phy_id_masked;
9095         int err;
9096
9097         /* Reading the PHY ID register can conflict with ASF
9098          * firwmare access to the PHY hardware.
9099          */
9100         err = 0;
9101         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9102                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9103         } else {
9104                 /* Now read the physical PHY_ID from the chip and verify
9105                  * that it is sane.  If it doesn't look good, we fall back
9106                  * to either the hard-coded table based PHY_ID and failing
9107                  * that the value found in the eeprom area.
9108                  */
9109                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9110                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9111
9112                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9113                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9114                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9115
9116                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9117         }
9118
9119         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9120                 tp->phy_id = hw_phy_id;
9121                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9122                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9123                 else
9124                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9125         } else {
9126                 if (tp->phy_id != PHY_ID_INVALID) {
9127                         /* Do nothing, phy ID already set up in
9128                          * tg3_get_eeprom_hw_cfg().
9129                          */
9130                 } else {
9131                         struct subsys_tbl_ent *p;
9132
9133                         /* No eeprom signature?  Try the hardcoded
9134                          * subsys device table.
9135                          */
9136                         p = lookup_by_subsys(tp);
9137                         if (!p)
9138                                 return -ENODEV;
9139
9140                         tp->phy_id = p->phy_id;
9141                         if (!tp->phy_id ||
9142                             tp->phy_id == PHY_ID_BCM8002)
9143                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9144                 }
9145         }
9146
9147         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9148             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9149                 u32 bmsr, adv_reg, tg3_ctrl;
9150
9151                 tg3_readphy(tp, MII_BMSR, &bmsr);
9152                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9153                     (bmsr & BMSR_LSTATUS))
9154                         goto skip_phy_reset;
9155                     
9156                 err = tg3_phy_reset(tp);
9157                 if (err)
9158                         return err;
9159
9160                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9161                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9162                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9163                 tg3_ctrl = 0;
9164                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9165                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9166                                     MII_TG3_CTRL_ADV_1000_FULL);
9167                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9168                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9169                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9170                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9171                 }
9172
9173                 if (!tg3_copper_is_advertising_all(tp)) {
9174                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9175
9176                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9177                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9178
9179                         tg3_writephy(tp, MII_BMCR,
9180                                      BMCR_ANENABLE | BMCR_ANRESTART);
9181                 }
9182                 tg3_phy_set_wirespeed(tp);
9183
9184                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9185                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9186                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9187         }
9188
9189 skip_phy_reset:
9190         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9191                 err = tg3_init_5401phy_dsp(tp);
9192                 if (err)
9193                         return err;
9194         }
9195
9196         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9197                 err = tg3_init_5401phy_dsp(tp);
9198         }
9199
9200         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9201                 tp->link_config.advertising =
9202                         (ADVERTISED_1000baseT_Half |
9203                          ADVERTISED_1000baseT_Full |
9204                          ADVERTISED_Autoneg |
9205                          ADVERTISED_FIBRE);
9206         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9207                 tp->link_config.advertising &=
9208                         ~(ADVERTISED_1000baseT_Half |
9209                           ADVERTISED_1000baseT_Full);
9210
9211         return err;
9212 }
9213
9214 static void __devinit tg3_read_partno(struct tg3 *tp)
9215 {
9216         unsigned char vpd_data[256];
9217         int i;
9218
9219         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9220                 /* Sun decided not to put the necessary bits in the
9221                  * NVRAM of their onboard tg3 parts :(
9222                  */
9223                 strcpy(tp->board_part_number, "Sun 570X");
9224                 return;
9225         }
9226
9227         for (i = 0; i < 256; i += 4) {
9228                 u32 tmp;
9229
9230                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9231                         goto out_not_found;
9232
9233                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
9234                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
9235                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9236                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9237         }
9238
9239         /* Now parse and find the part number. */
9240         for (i = 0; i < 256; ) {
9241                 unsigned char val = vpd_data[i];
9242                 int block_end;
9243
9244                 if (val == 0x82 || val == 0x91) {
9245                         i = (i + 3 +
9246                              (vpd_data[i + 1] +
9247                               (vpd_data[i + 2] << 8)));
9248                         continue;
9249                 }
9250
9251                 if (val != 0x90)
9252                         goto out_not_found;
9253
9254                 block_end = (i + 3 +
9255                              (vpd_data[i + 1] +
9256                               (vpd_data[i + 2] << 8)));
9257                 i += 3;
9258                 while (i < block_end) {
9259                         if (vpd_data[i + 0] == 'P' &&
9260                             vpd_data[i + 1] == 'N') {
9261                                 int partno_len = vpd_data[i + 2];
9262
9263                                 if (partno_len > 24)
9264                                         goto out_not_found;
9265
9266                                 memcpy(tp->board_part_number,
9267                                        &vpd_data[i + 3],
9268                                        partno_len);
9269
9270                                 /* Success. */
9271                                 return;
9272                         }
9273                 }
9274
9275                 /* Part number not found. */
9276                 goto out_not_found;
9277         }
9278
9279 out_not_found:
9280         strcpy(tp->board_part_number, "none");
9281 }
9282
9283 #ifdef CONFIG_SPARC64
9284 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9285 {
9286         struct pci_dev *pdev = tp->pdev;
9287         struct pcidev_cookie *pcp = pdev->sysdata;
9288
9289         if (pcp != NULL) {
9290                 int node = pcp->prom_node;
9291                 u32 venid;
9292                 int err;
9293
9294                 err = prom_getproperty(node, "subsystem-vendor-id",
9295                                        (char *) &venid, sizeof(venid));
9296                 if (err == 0 || err == -1)
9297                         return 0;
9298                 if (venid == PCI_VENDOR_ID_SUN)
9299                         return 1;
9300         }
9301         return 0;
9302 }
9303 #endif
9304
9305 static int __devinit tg3_get_invariants(struct tg3 *tp)
9306 {
9307         static struct pci_device_id write_reorder_chipsets[] = {
9308                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9309                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9310                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9311                              PCI_DEVICE_ID_VIA_8385_0) },
9312                 { },
9313         };
9314         u32 misc_ctrl_reg;
9315         u32 cacheline_sz_reg;
9316         u32 pci_state_reg, grc_misc_cfg;
9317         u32 val;
9318         u16 pci_cmd;
9319         int err;
9320
9321 #ifdef CONFIG_SPARC64
9322         if (tg3_is_sun_570X(tp))
9323                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9324 #endif
9325
9326         /* Force memory write invalidate off.  If we leave it on,
9327          * then on 5700_BX chips we have to enable a workaround.
9328          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9329          * to match the cacheline size.  The Broadcom driver have this
9330          * workaround but turns MWI off all the times so never uses
9331          * it.  This seems to suggest that the workaround is insufficient.
9332          */
9333         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9334         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9335         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9336
9337         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9338          * has the register indirect write enable bit set before
9339          * we try to access any of the MMIO registers.  It is also
9340          * critical that the PCI-X hw workaround situation is decided
9341          * before that as well.
9342          */
9343         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9344                               &misc_ctrl_reg);
9345
9346         tp->pci_chip_rev_id = (misc_ctrl_reg >>
9347                                MISC_HOST_CTRL_CHIPREV_SHIFT);
9348
9349         /* Wrong chip ID in 5752 A0. This code can be removed later
9350          * as A0 is not in production.
9351          */
9352         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9353                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9354
9355         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
9356          * we need to disable memory and use config. cycles
9357          * only to access all registers. The 5702/03 chips
9358          * can mistakenly decode the special cycles from the
9359          * ICH chipsets as memory write cycles, causing corruption
9360          * of register and memory space. Only certain ICH bridges
9361          * will drive special cycles with non-zero data during the
9362          * address phase which can fall within the 5703's address
9363          * range. This is not an ICH bug as the PCI spec allows
9364          * non-zero address during special cycles. However, only
9365          * these ICH bridges are known to drive non-zero addresses
9366          * during special cycles.
9367          *
9368          * Since special cycles do not cross PCI bridges, we only
9369          * enable this workaround if the 5703 is on the secondary
9370          * bus of these ICH bridges.
9371          */
9372         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
9373             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
9374                 static struct tg3_dev_id {
9375                         u32     vendor;
9376                         u32     device;
9377                         u32     rev;
9378                 } ich_chipsets[] = {
9379                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
9380                           PCI_ANY_ID },
9381                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
9382                           PCI_ANY_ID },
9383                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
9384                           0xa },
9385                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
9386                           PCI_ANY_ID },
9387                         { },
9388                 };
9389                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
9390                 struct pci_dev *bridge = NULL;
9391
9392                 while (pci_id->vendor != 0) {
9393                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
9394                                                 bridge);
9395                         if (!bridge) {
9396                                 pci_id++;
9397                                 continue;
9398                         }
9399                         if (pci_id->rev != PCI_ANY_ID) {
9400                                 u8 rev;
9401
9402                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
9403                                                      &rev);
9404                                 if (rev > pci_id->rev)
9405                                         continue;
9406                         }
9407                         if (bridge->subordinate &&
9408                             (bridge->subordinate->number ==
9409                              tp->pdev->bus->number)) {
9410
9411                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
9412                                 pci_dev_put(bridge);
9413                                 break;
9414                         }
9415                 }
9416         }
9417
9418         /* Find msi capability. */
9419         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
9420             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9421                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
9422                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
9423         }
9424
9425         /* Initialize misc host control in PCI block. */
9426         tp->misc_host_ctrl |= (misc_ctrl_reg &
9427                                MISC_HOST_CTRL_CHIPREV);
9428         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9429                                tp->misc_host_ctrl);
9430
9431         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9432                               &cacheline_sz_reg);
9433
9434         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
9435         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
9436         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
9437         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
9438
9439         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
9440             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
9441             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
9442                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
9443
9444         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
9445             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
9446                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
9447
9448         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9449                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
9450
9451         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
9452             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
9453             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
9454                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
9455
9456         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9457                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9458
9459         /* If we have an AMD 762 or VIA K8T800 chipset, write
9460          * reordering to the mailbox registers done by the host
9461          * controller can cause major troubles.  We read back from
9462          * every mailbox register write to force the writes to be
9463          * posted to the chip in order.
9464          */
9465         if (pci_dev_present(write_reorder_chipsets) &&
9466             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9467                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9468
9469         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9470             tp->pci_lat_timer < 64) {
9471                 tp->pci_lat_timer = 64;
9472
9473                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
9474                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
9475                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
9476                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
9477
9478                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9479                                        cacheline_sz_reg);
9480         }
9481
9482         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9483                               &pci_state_reg);
9484
9485         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
9486                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
9487
9488                 /* If this is a 5700 BX chipset, and we are in PCI-X
9489                  * mode, enable register write workaround.
9490                  *
9491                  * The workaround is to use indirect register accesses
9492                  * for all chip writes not to mailbox registers.
9493                  */
9494                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
9495                         u32 pm_reg;
9496                         u16 pci_cmd;
9497
9498                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9499
9500                         /* The chip can have it's power management PCI config
9501                          * space registers clobbered due to this bug.
9502                          * So explicitly force the chip into D0 here.
9503                          */
9504                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9505                                               &pm_reg);
9506                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
9507                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9508                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9509                                                pm_reg);
9510
9511                         /* Also, force SERR#/PERR# in PCI command. */
9512                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9513                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
9514                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9515                 }
9516         }
9517
9518         /* 5700 BX chips need to have their TX producer index mailboxes
9519          * written twice to workaround a bug.
9520          */
9521         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9522                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9523
9524         /* Back to back register writes can cause problems on this chip,
9525          * the workaround is to read back all reg writes except those to
9526          * mailbox regs.  See tg3_write_indirect_reg32().
9527          *
9528          * PCI Express 5750_A0 rev chips need this workaround too.
9529          */
9530         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9531             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
9532              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
9533                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
9534
9535         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
9536                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
9537         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
9538                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
9539
9540         /* Chip-specific fixup from Broadcom driver */
9541         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
9542             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
9543                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
9544                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9545         }
9546
9547         /* Default fast path register access methods */
9548         tp->read32 = tg3_read32;
9549         tp->write32 = tg3_write32;
9550         tp->read32_mbox = tg3_read32;
9551         tp->write32_mbox = tg3_write32;
9552         tp->write32_tx_mbox = tg3_write32;
9553         tp->write32_rx_mbox = tg3_write32;
9554
9555         /* Various workaround register access methods */
9556         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
9557                 tp->write32 = tg3_write_indirect_reg32;
9558         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
9559                 tp->write32 = tg3_write_flush_reg32;
9560
9561         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
9562             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
9563                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9564                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
9565                         tp->write32_rx_mbox = tg3_write_flush_reg32;
9566         }
9567
9568         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
9569                 tp->read32 = tg3_read_indirect_reg32;
9570                 tp->write32 = tg3_write_indirect_reg32;
9571                 tp->read32_mbox = tg3_read_indirect_mbox;
9572                 tp->write32_mbox = tg3_write_indirect_mbox;
9573                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
9574                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
9575
9576                 iounmap(tp->regs);
9577                 tp->regs = NULL;
9578
9579                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9580                 pci_cmd &= ~PCI_COMMAND_MEMORY;
9581                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9582         }
9583
9584         /* Get eeprom hw config before calling tg3_set_power_state().
9585          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9586          * determined before calling tg3_set_power_state() so that
9587          * we know whether or not to switch out of Vaux power.
9588          * When the flag is set, it means that GPIO1 is used for eeprom
9589          * write protect and also implies that it is a LOM where GPIOs
9590          * are not used to switch power.
9591          */ 
9592         tg3_get_eeprom_hw_cfg(tp);
9593
9594         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9595          * GPIO1 driven high will bring 5700's external PHY out of reset.
9596          * It is also used as eeprom write protect on LOMs.
9597          */
9598         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
9599         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9600             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
9601                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9602                                        GRC_LCLCTRL_GPIO_OUTPUT1);
9603         /* Unused GPIO3 must be driven as output on 5752 because there
9604          * are no pull-up resistors on unused GPIO pins.
9605          */
9606         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9607                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
9608
9609         /* Force the chip into D0. */
9610         err = tg3_set_power_state(tp, 0);
9611         if (err) {
9612                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
9613                        pci_name(tp->pdev));
9614                 return err;
9615         }
9616
9617         /* 5700 B0 chips do not support checksumming correctly due
9618          * to hardware bugs.
9619          */
9620         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
9621                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
9622
9623         /* Pseudo-header checksum is done by hardware logic and not
9624          * the offload processers, so make the chip do the pseudo-
9625          * header checksums on receive.  For transmit it is more
9626          * convenient to do the pseudo-header checksum in software
9627          * as Linux does that on transmit for us in all cases.
9628          */
9629         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
9630         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
9631
9632         /* Derive initial jumbo mode from MTU assigned in
9633          * ether_setup() via the alloc_etherdev() call
9634          */
9635         if (tp->dev->mtu > ETH_DATA_LEN &&
9636             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
9637                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
9638
9639         /* Determine WakeOnLan speed to use. */
9640         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9641             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9642             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
9643             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
9644                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
9645         } else {
9646                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
9647         }
9648
9649         /* A few boards don't want Ethernet@WireSpeed phy feature */
9650         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9651             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
9652              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
9653              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
9654             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
9655                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
9656
9657         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
9658             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
9659                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
9660         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
9661                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
9662
9663         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
9664                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
9665
9666         tp->coalesce_mode = 0;
9667         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
9668             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
9669                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
9670
9671         /* Initialize MAC MI mode, polling disabled. */
9672         tw32_f(MAC_MI_MODE, tp->mi_mode);
9673         udelay(80);
9674
9675         /* Initialize data/descriptor byte/word swapping. */
9676         val = tr32(GRC_MODE);
9677         val &= GRC_MODE_HOST_STACKUP;
9678         tw32(GRC_MODE, val | tp->grc_mode);
9679
9680         tg3_switch_clocks(tp);
9681
9682         /* Clear this out for sanity. */
9683         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9684
9685         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9686                               &pci_state_reg);
9687         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
9688             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
9689                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
9690
9691                 if (chiprevid == CHIPREV_ID_5701_A0 ||
9692                     chiprevid == CHIPREV_ID_5701_B0 ||
9693                     chiprevid == CHIPREV_ID_5701_B2 ||
9694                     chiprevid == CHIPREV_ID_5701_B5) {
9695                         void __iomem *sram_base;
9696
9697                         /* Write some dummy words into the SRAM status block
9698                          * area, see if it reads back correctly.  If the return
9699                          * value is bad, force enable the PCIX workaround.
9700                          */
9701                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
9702
9703                         writel(0x00000000, sram_base);
9704                         writel(0x00000000, sram_base + 4);
9705                         writel(0xffffffff, sram_base + 4);
9706                         if (readl(sram_base) != 0x00000000)
9707                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9708                 }
9709         }
9710
9711         udelay(50);
9712         tg3_nvram_init(tp);
9713
9714         grc_misc_cfg = tr32(GRC_MISC_CFG);
9715         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
9716
9717         /* Broadcom's driver says that CIOBE multisplit has a bug */
9718 #if 0
9719         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9720             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
9721                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
9722                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
9723         }
9724 #endif
9725         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9726             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
9727              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
9728                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
9729
9730         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9731             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9732                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9733         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9734                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9735                                       HOSTCC_MODE_CLRTICK_TXBD);
9736
9737                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9738                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9739                                        tp->misc_host_ctrl);
9740         }
9741
9742         /* these are limited to 10/100 only */
9743         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9744              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
9745             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9746              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9747              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
9748               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
9749               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
9750             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9751              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
9752               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
9753                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
9754
9755         err = tg3_phy_probe(tp);
9756         if (err) {
9757                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
9758                        pci_name(tp->pdev), err);
9759                 /* ... but do not return immediately ... */
9760         }
9761
9762         tg3_read_partno(tp);
9763
9764         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
9765                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9766         } else {
9767                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9768                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
9769                 else
9770                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9771         }
9772
9773         /* 5700 {AX,BX} chips have a broken status block link
9774          * change bit implementation, so we must use the
9775          * status register in those cases.
9776          */
9777         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9778                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
9779         else
9780                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
9781
9782         /* The led_ctrl is set during tg3_phy_probe, here we might
9783          * have to force the link status polling mechanism based
9784          * upon subsystem IDs.
9785          */
9786         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
9787             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9788                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
9789                                   TG3_FLAG_USE_LINKCHG_REG);
9790         }
9791
9792         /* For all SERDES we poll the MAC status register. */
9793         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9794                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
9795         else
9796                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
9797
9798         /* It seems all chips can get confused if TX buffers
9799          * straddle the 4GB address boundary in some cases.
9800          */
9801         tp->dev->hard_start_xmit = tg3_start_xmit;
9802
9803         tp->rx_offset = 2;
9804         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
9805             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
9806                 tp->rx_offset = 0;
9807
9808         /* By default, disable wake-on-lan.  User can change this
9809          * using ETHTOOL_SWOL.
9810          */
9811         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9812
9813         return err;
9814 }
9815
9816 #ifdef CONFIG_SPARC64
9817 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
9818 {
9819         struct net_device *dev = tp->dev;
9820         struct pci_dev *pdev = tp->pdev;
9821         struct pcidev_cookie *pcp = pdev->sysdata;
9822
9823         if (pcp != NULL) {
9824                 int node = pcp->prom_node;
9825
9826                 if (prom_getproplen(node, "local-mac-address") == 6) {
9827                         prom_getproperty(node, "local-mac-address",
9828                                          dev->dev_addr, 6);
9829                         memcpy(dev->perm_addr, dev->dev_addr, 6);
9830                         return 0;
9831                 }
9832         }
9833         return -ENODEV;
9834 }
9835
9836 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
9837 {
9838         struct net_device *dev = tp->dev;
9839
9840         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
9841         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
9842         return 0;
9843 }
9844 #endif
9845
9846 static int __devinit tg3_get_device_address(struct tg3 *tp)
9847 {
9848         struct net_device *dev = tp->dev;
9849         u32 hi, lo, mac_offset;
9850
9851 #ifdef CONFIG_SPARC64
9852         if (!tg3_get_macaddr_sparc(tp))
9853                 return 0;
9854 #endif
9855
9856         mac_offset = 0x7c;
9857         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9858              !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
9859             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9860                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
9861                         mac_offset = 0xcc;
9862                 if (tg3_nvram_lock(tp))
9863                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
9864                 else
9865                         tg3_nvram_unlock(tp);
9866         }
9867
9868         /* First try to get it from MAC address mailbox. */
9869         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
9870         if ((hi >> 16) == 0x484b) {
9871                 dev->dev_addr[0] = (hi >>  8) & 0xff;
9872                 dev->dev_addr[1] = (hi >>  0) & 0xff;
9873
9874                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
9875                 dev->dev_addr[2] = (lo >> 24) & 0xff;
9876                 dev->dev_addr[3] = (lo >> 16) & 0xff;
9877                 dev->dev_addr[4] = (lo >>  8) & 0xff;
9878                 dev->dev_addr[5] = (lo >>  0) & 0xff;
9879         }
9880         /* Next, try NVRAM. */
9881         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
9882                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
9883                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
9884                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
9885                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
9886                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
9887                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
9888                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
9889                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
9890         }
9891         /* Finally just fetch it out of the MAC control regs. */
9892         else {
9893                 hi = tr32(MAC_ADDR_0_HIGH);
9894                 lo = tr32(MAC_ADDR_0_LOW);
9895
9896                 dev->dev_addr[5] = lo & 0xff;
9897                 dev->dev_addr[4] = (lo >> 8) & 0xff;
9898                 dev->dev_addr[3] = (lo >> 16) & 0xff;
9899                 dev->dev_addr[2] = (lo >> 24) & 0xff;
9900                 dev->dev_addr[1] = hi & 0xff;
9901                 dev->dev_addr[0] = (hi >> 8) & 0xff;
9902         }
9903
9904         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
9905 #ifdef CONFIG_SPARC64
9906                 if (!tg3_get_default_macaddr_sparc(tp))
9907                         return 0;
9908 #endif
9909                 return -EINVAL;
9910         }
9911         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
9912         return 0;
9913 }
9914
9915 #define BOUNDARY_SINGLE_CACHELINE       1
9916 #define BOUNDARY_MULTI_CACHELINE        2
9917
9918 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
9919 {
9920         int cacheline_size;
9921         u8 byte;
9922         int goal;
9923
9924         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
9925         if (byte == 0)
9926                 cacheline_size = 1024;
9927         else
9928                 cacheline_size = (int) byte * 4;
9929
9930         /* On 5703 and later chips, the boundary bits have no
9931          * effect.
9932          */
9933         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9934             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
9935             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9936                 goto out;
9937
9938 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
9939         goal = BOUNDARY_MULTI_CACHELINE;
9940 #else
9941 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
9942         goal = BOUNDARY_SINGLE_CACHELINE;
9943 #else
9944         goal = 0;
9945 #endif
9946 #endif
9947
9948         if (!goal)
9949                 goto out;
9950
9951         /* PCI controllers on most RISC systems tend to disconnect
9952          * when a device tries to burst across a cache-line boundary.
9953          * Therefore, letting tg3 do so just wastes PCI bandwidth.
9954          *
9955          * Unfortunately, for PCI-E there are only limited
9956          * write-side controls for this, and thus for reads
9957          * we will still get the disconnects.  We'll also waste
9958          * these PCI cycles for both read and write for chips
9959          * other than 5700 and 5701 which do not implement the
9960          * boundary bits.
9961          */
9962         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
9963             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
9964                 switch (cacheline_size) {
9965                 case 16:
9966                 case 32:
9967                 case 64:
9968                 case 128:
9969                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9970                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
9971                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
9972                         } else {
9973                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9974                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9975                         }
9976                         break;
9977
9978                 case 256:
9979                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
9980                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
9981                         break;
9982
9983                 default:
9984                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9985                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9986                         break;
9987                 };
9988         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9989                 switch (cacheline_size) {
9990                 case 16:
9991                 case 32:
9992                 case 64:
9993                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9994                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9995                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
9996                                 break;
9997                         }
9998                         /* fallthrough */
9999                 case 128:
10000                 default:
10001                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10002                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10003                         break;
10004                 };
10005         } else {
10006                 switch (cacheline_size) {
10007                 case 16:
10008                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10009                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10010                                         DMA_RWCTRL_WRITE_BNDRY_16);
10011                                 break;
10012                         }
10013                         /* fallthrough */
10014                 case 32:
10015                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10016                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10017                                         DMA_RWCTRL_WRITE_BNDRY_32);
10018                                 break;
10019                         }
10020                         /* fallthrough */
10021                 case 64:
10022                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10023                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10024                                         DMA_RWCTRL_WRITE_BNDRY_64);
10025                                 break;
10026                         }
10027                         /* fallthrough */
10028                 case 128:
10029                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10030                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10031                                         DMA_RWCTRL_WRITE_BNDRY_128);
10032                                 break;
10033                         }
10034                         /* fallthrough */
10035                 case 256:
10036                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10037                                 DMA_RWCTRL_WRITE_BNDRY_256);
10038                         break;
10039                 case 512:
10040                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10041                                 DMA_RWCTRL_WRITE_BNDRY_512);
10042                         break;
10043                 case 1024:
10044                 default:
10045                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10046                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10047                         break;
10048                 };
10049         }
10050
10051 out:
10052         return val;
10053 }
10054
10055 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10056 {
10057         struct tg3_internal_buffer_desc test_desc;
10058         u32 sram_dma_descs;
10059         int i, ret;
10060
10061         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10062
10063         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10064         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10065         tw32(RDMAC_STATUS, 0);
10066         tw32(WDMAC_STATUS, 0);
10067
10068         tw32(BUFMGR_MODE, 0);
10069         tw32(FTQ_RESET, 0);
10070
10071         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10072         test_desc.addr_lo = buf_dma & 0xffffffff;
10073         test_desc.nic_mbuf = 0x00002100;
10074         test_desc.len = size;
10075
10076         /*
10077          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10078          * the *second* time the tg3 driver was getting loaded after an
10079          * initial scan.
10080          *
10081          * Broadcom tells me:
10082          *   ...the DMA engine is connected to the GRC block and a DMA
10083          *   reset may affect the GRC block in some unpredictable way...
10084          *   The behavior of resets to individual blocks has not been tested.
10085          *
10086          * Broadcom noted the GRC reset will also reset all sub-components.
10087          */
10088         if (to_device) {
10089                 test_desc.cqid_sqid = (13 << 8) | 2;
10090
10091                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10092                 udelay(40);
10093         } else {
10094                 test_desc.cqid_sqid = (16 << 8) | 7;
10095
10096                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10097                 udelay(40);
10098         }
10099         test_desc.flags = 0x00000005;
10100
10101         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10102                 u32 val;
10103
10104                 val = *(((u32 *)&test_desc) + i);
10105                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10106                                        sram_dma_descs + (i * sizeof(u32)));
10107                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10108         }
10109         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10110
10111         if (to_device) {
10112                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10113         } else {
10114                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10115         }
10116
10117         ret = -ENODEV;
10118         for (i = 0; i < 40; i++) {
10119                 u32 val;
10120
10121                 if (to_device)
10122                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10123                 else
10124                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10125                 if ((val & 0xffff) == sram_dma_descs) {
10126                         ret = 0;
10127                         break;
10128                 }
10129
10130                 udelay(100);
10131         }
10132
10133         return ret;
10134 }
10135
10136 #define TEST_BUFFER_SIZE        0x2000
10137
10138 static int __devinit tg3_test_dma(struct tg3 *tp)
10139 {
10140         dma_addr_t buf_dma;
10141         u32 *buf, saved_dma_rwctrl;
10142         int ret;
10143
10144         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10145         if (!buf) {
10146                 ret = -ENOMEM;
10147                 goto out_nofree;
10148         }
10149
10150         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10151                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10152
10153         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
10154
10155         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10156                 /* DMA read watermark not used on PCIE */
10157                 tp->dma_rwctrl |= 0x00180000;
10158         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
10159                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10160                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
10161                         tp->dma_rwctrl |= 0x003f0000;
10162                 else
10163                         tp->dma_rwctrl |= 0x003f000f;
10164         } else {
10165                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10166                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10167                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10168
10169                         if (ccval == 0x6 || ccval == 0x7)
10170                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10171
10172                         /* Set bit 23 to enable PCIX hw bug fix */
10173                         tp->dma_rwctrl |= 0x009f0000;
10174                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10175                         /* 5780 always in PCIX mode */
10176                         tp->dma_rwctrl |= 0x00144000;
10177                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10178                         /* 5714 always in PCIX mode */
10179                         tp->dma_rwctrl |= 0x00148000;
10180                 } else {
10181                         tp->dma_rwctrl |= 0x001b000f;
10182                 }
10183         }
10184
10185         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10186             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10187                 tp->dma_rwctrl &= 0xfffffff0;
10188
10189         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10190             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10191                 /* Remove this if it causes problems for some boards. */
10192                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10193
10194                 /* On 5700/5701 chips, we need to set this bit.
10195                  * Otherwise the chip will issue cacheline transactions
10196                  * to streamable DMA memory with not all the byte
10197                  * enables turned on.  This is an error on several
10198                  * RISC PCI controllers, in particular sparc64.
10199                  *
10200                  * On 5703/5704 chips, this bit has been reassigned
10201                  * a different meaning.  In particular, it is used
10202                  * on those chips to enable a PCI-X workaround.
10203                  */
10204                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10205         }
10206
10207         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10208
10209 #if 0
10210         /* Unneeded, already done by tg3_get_invariants.  */
10211         tg3_switch_clocks(tp);
10212 #endif
10213
10214         ret = 0;
10215         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10216             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10217                 goto out;
10218
10219         /* It is best to perform DMA test with maximum write burst size
10220          * to expose the 5700/5701 write DMA bug.
10221          */
10222         saved_dma_rwctrl = tp->dma_rwctrl;
10223         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10224         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10225
10226         while (1) {
10227                 u32 *p = buf, i;
10228
10229                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10230                         p[i] = i;
10231
10232                 /* Send the buffer to the chip. */
10233                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10234                 if (ret) {
10235                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10236                         break;
10237                 }
10238
10239 #if 0
10240                 /* validate data reached card RAM correctly. */
10241                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10242                         u32 val;
10243                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
10244                         if (le32_to_cpu(val) != p[i]) {
10245                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
10246                                 /* ret = -ENODEV here? */
10247                         }
10248                         p[i] = 0;
10249                 }
10250 #endif
10251                 /* Now read it back. */
10252                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10253                 if (ret) {
10254                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10255
10256                         break;
10257                 }
10258
10259                 /* Verify it. */
10260                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10261                         if (p[i] == i)
10262                                 continue;
10263
10264                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10265                             DMA_RWCTRL_WRITE_BNDRY_16) {
10266                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10267                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10268                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10269                                 break;
10270                         } else {
10271                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10272                                 ret = -ENODEV;
10273                                 goto out;
10274                         }
10275                 }
10276
10277                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10278                         /* Success. */
10279                         ret = 0;
10280                         break;
10281                 }
10282         }
10283         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10284             DMA_RWCTRL_WRITE_BNDRY_16) {
10285                 static struct pci_device_id dma_wait_state_chipsets[] = {
10286                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10287                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10288                         { },
10289                 };
10290
10291                 /* DMA test passed without adjusting DMA boundary,
10292                  * now look for chipsets that are known to expose the
10293                  * DMA bug without failing the test.
10294                  */
10295                 if (pci_dev_present(dma_wait_state_chipsets)) {
10296                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10297                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10298                 }
10299                 else
10300                         /* Safe to use the calculated DMA boundary. */
10301                         tp->dma_rwctrl = saved_dma_rwctrl;
10302
10303                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10304         }
10305
10306 out:
10307         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
10308 out_nofree:
10309         return ret;
10310 }
10311
10312 static void __devinit tg3_init_link_config(struct tg3 *tp)
10313 {
10314         tp->link_config.advertising =
10315                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10316                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10317                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
10318                  ADVERTISED_Autoneg | ADVERTISED_MII);
10319         tp->link_config.speed = SPEED_INVALID;
10320         tp->link_config.duplex = DUPLEX_INVALID;
10321         tp->link_config.autoneg = AUTONEG_ENABLE;
10322         netif_carrier_off(tp->dev);
10323         tp->link_config.active_speed = SPEED_INVALID;
10324         tp->link_config.active_duplex = DUPLEX_INVALID;
10325         tp->link_config.phy_is_low_power = 0;
10326         tp->link_config.orig_speed = SPEED_INVALID;
10327         tp->link_config.orig_duplex = DUPLEX_INVALID;
10328         tp->link_config.orig_autoneg = AUTONEG_INVALID;
10329 }
10330
10331 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
10332 {
10333         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10334                 tp->bufmgr_config.mbuf_read_dma_low_water =
10335                         DEFAULT_MB_RDMA_LOW_WATER_5705;
10336                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10337                         DEFAULT_MB_MACRX_LOW_WATER_5705;
10338                 tp->bufmgr_config.mbuf_high_water =
10339                         DEFAULT_MB_HIGH_WATER_5705;
10340
10341                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10342                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
10343                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10344                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
10345                 tp->bufmgr_config.mbuf_high_water_jumbo =
10346                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
10347         } else {
10348                 tp->bufmgr_config.mbuf_read_dma_low_water =
10349                         DEFAULT_MB_RDMA_LOW_WATER;
10350                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10351                         DEFAULT_MB_MACRX_LOW_WATER;
10352                 tp->bufmgr_config.mbuf_high_water =
10353                         DEFAULT_MB_HIGH_WATER;
10354
10355                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10356                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
10357                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10358                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
10359                 tp->bufmgr_config.mbuf_high_water_jumbo =
10360                         DEFAULT_MB_HIGH_WATER_JUMBO;
10361         }
10362
10363         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
10364         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
10365 }
10366
10367 static char * __devinit tg3_phy_string(struct tg3 *tp)
10368 {
10369         switch (tp->phy_id & PHY_ID_MASK) {
10370         case PHY_ID_BCM5400:    return "5400";
10371         case PHY_ID_BCM5401:    return "5401";
10372         case PHY_ID_BCM5411:    return "5411";
10373         case PHY_ID_BCM5701:    return "5701";
10374         case PHY_ID_BCM5703:    return "5703";
10375         case PHY_ID_BCM5704:    return "5704";
10376         case PHY_ID_BCM5705:    return "5705";
10377         case PHY_ID_BCM5750:    return "5750";
10378         case PHY_ID_BCM5752:    return "5752";
10379         case PHY_ID_BCM5714:    return "5714";
10380         case PHY_ID_BCM5780:    return "5780";
10381         case PHY_ID_BCM8002:    return "8002/serdes";
10382         case 0:                 return "serdes";
10383         default:                return "unknown";
10384         };
10385 }
10386
10387 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
10388 {
10389         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10390                 strcpy(str, "PCI Express");
10391                 return str;
10392         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
10393                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
10394
10395                 strcpy(str, "PCIX:");
10396
10397                 if ((clock_ctrl == 7) ||
10398                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
10399                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
10400                         strcat(str, "133MHz");
10401                 else if (clock_ctrl == 0)
10402                         strcat(str, "33MHz");
10403                 else if (clock_ctrl == 2)
10404                         strcat(str, "50MHz");
10405                 else if (clock_ctrl == 4)
10406                         strcat(str, "66MHz");
10407                 else if (clock_ctrl == 6)
10408                         strcat(str, "100MHz");
10409                 else if (clock_ctrl == 7)
10410                         strcat(str, "133MHz");
10411         } else {
10412                 strcpy(str, "PCI:");
10413                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
10414                         strcat(str, "66MHz");
10415                 else
10416                         strcat(str, "33MHz");
10417         }
10418         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
10419                 strcat(str, ":32-bit");
10420         else
10421                 strcat(str, ":64-bit");
10422         return str;
10423 }
10424
10425 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
10426 {
10427         struct pci_dev *peer;
10428         unsigned int func, devnr = tp->pdev->devfn & ~7;
10429
10430         for (func = 0; func < 8; func++) {
10431                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
10432                 if (peer && peer != tp->pdev)
10433                         break;
10434                 pci_dev_put(peer);
10435         }
10436         if (!peer || peer == tp->pdev)
10437                 BUG();
10438
10439         /*
10440          * We don't need to keep the refcount elevated; there's no way
10441          * to remove one half of this device without removing the other
10442          */
10443         pci_dev_put(peer);
10444
10445         return peer;
10446 }
10447
10448 static void __devinit tg3_init_coal(struct tg3 *tp)
10449 {
10450         struct ethtool_coalesce *ec = &tp->coal;
10451
10452         memset(ec, 0, sizeof(*ec));
10453         ec->cmd = ETHTOOL_GCOALESCE;
10454         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
10455         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
10456         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
10457         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
10458         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
10459         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
10460         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
10461         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
10462         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
10463
10464         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
10465                                  HOSTCC_MODE_CLRTICK_TXBD)) {
10466                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
10467                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
10468                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
10469                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
10470         }
10471
10472         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10473                 ec->rx_coalesce_usecs_irq = 0;
10474                 ec->tx_coalesce_usecs_irq = 0;
10475                 ec->stats_block_coalesce_usecs = 0;
10476         }
10477 }
10478
10479 static int __devinit tg3_init_one(struct pci_dev *pdev,
10480                                   const struct pci_device_id *ent)
10481 {
10482         static int tg3_version_printed = 0;
10483         unsigned long tg3reg_base, tg3reg_len;
10484         struct net_device *dev;
10485         struct tg3 *tp;
10486         int i, err, pci_using_dac, pm_cap;
10487         char str[40];
10488
10489         if (tg3_version_printed++ == 0)
10490                 printk(KERN_INFO "%s", version);
10491
10492         err = pci_enable_device(pdev);
10493         if (err) {
10494                 printk(KERN_ERR PFX "Cannot enable PCI device, "
10495                        "aborting.\n");
10496                 return err;
10497         }
10498
10499         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10500                 printk(KERN_ERR PFX "Cannot find proper PCI device "
10501                        "base address, aborting.\n");
10502                 err = -ENODEV;
10503                 goto err_out_disable_pdev;
10504         }
10505
10506         err = pci_request_regions(pdev, DRV_MODULE_NAME);
10507         if (err) {
10508                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
10509                        "aborting.\n");
10510                 goto err_out_disable_pdev;
10511         }
10512
10513         pci_set_master(pdev);
10514
10515         /* Find power-management capability. */
10516         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10517         if (pm_cap == 0) {
10518                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
10519                        "aborting.\n");
10520                 err = -EIO;
10521                 goto err_out_free_res;
10522         }
10523
10524         /* Configure DMA attributes. */
10525         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
10526         if (!err) {
10527                 pci_using_dac = 1;
10528                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
10529                 if (err < 0) {
10530                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
10531                                "for consistent allocations\n");
10532                         goto err_out_free_res;
10533                 }
10534         } else {
10535                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
10536                 if (err) {
10537                         printk(KERN_ERR PFX "No usable DMA configuration, "
10538                                "aborting.\n");
10539                         goto err_out_free_res;
10540                 }
10541                 pci_using_dac = 0;
10542         }
10543
10544         tg3reg_base = pci_resource_start(pdev, 0);
10545         tg3reg_len = pci_resource_len(pdev, 0);
10546
10547         dev = alloc_etherdev(sizeof(*tp));
10548         if (!dev) {
10549                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
10550                 err = -ENOMEM;
10551                 goto err_out_free_res;
10552         }
10553
10554         SET_MODULE_OWNER(dev);
10555         SET_NETDEV_DEV(dev, &pdev->dev);
10556
10557         if (pci_using_dac)
10558                 dev->features |= NETIF_F_HIGHDMA;
10559         dev->features |= NETIF_F_LLTX;
10560 #if TG3_VLAN_TAG_USED
10561         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
10562         dev->vlan_rx_register = tg3_vlan_rx_register;
10563         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
10564 #endif
10565
10566         tp = netdev_priv(dev);
10567         tp->pdev = pdev;
10568         tp->dev = dev;
10569         tp->pm_cap = pm_cap;
10570         tp->mac_mode = TG3_DEF_MAC_MODE;
10571         tp->rx_mode = TG3_DEF_RX_MODE;
10572         tp->tx_mode = TG3_DEF_TX_MODE;
10573         tp->mi_mode = MAC_MI_MODE_BASE;
10574         if (tg3_debug > 0)
10575                 tp->msg_enable = tg3_debug;
10576         else
10577                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
10578
10579         /* The word/byte swap controls here control register access byte
10580          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
10581          * setting below.
10582          */
10583         tp->misc_host_ctrl =
10584                 MISC_HOST_CTRL_MASK_PCI_INT |
10585                 MISC_HOST_CTRL_WORD_SWAP |
10586                 MISC_HOST_CTRL_INDIR_ACCESS |
10587                 MISC_HOST_CTRL_PCISTATE_RW;
10588
10589         /* The NONFRM (non-frame) byte/word swap controls take effect
10590          * on descriptor entries, anything which isn't packet data.
10591          *
10592          * The StrongARM chips on the board (one for tx, one for rx)
10593          * are running in big-endian mode.
10594          */
10595         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
10596                         GRC_MODE_WSWAP_NONFRM_DATA);
10597 #ifdef __BIG_ENDIAN
10598         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
10599 #endif
10600         spin_lock_init(&tp->lock);
10601         spin_lock_init(&tp->tx_lock);
10602         spin_lock_init(&tp->indirect_lock);
10603         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
10604
10605         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
10606         if (tp->regs == 0UL) {
10607                 printk(KERN_ERR PFX "Cannot map device registers, "
10608                        "aborting.\n");
10609                 err = -ENOMEM;
10610                 goto err_out_free_dev;
10611         }
10612
10613         tg3_init_link_config(tp);
10614
10615         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
10616         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
10617         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
10618
10619         dev->open = tg3_open;
10620         dev->stop = tg3_close;
10621         dev->get_stats = tg3_get_stats;
10622         dev->set_multicast_list = tg3_set_rx_mode;
10623         dev->set_mac_address = tg3_set_mac_addr;
10624         dev->do_ioctl = tg3_ioctl;
10625         dev->tx_timeout = tg3_tx_timeout;
10626         dev->poll = tg3_poll;
10627         dev->ethtool_ops = &tg3_ethtool_ops;
10628         dev->weight = 64;
10629         dev->watchdog_timeo = TG3_TX_TIMEOUT;
10630         dev->change_mtu = tg3_change_mtu;
10631         dev->irq = pdev->irq;
10632 #ifdef CONFIG_NET_POLL_CONTROLLER
10633         dev->poll_controller = tg3_poll_controller;
10634 #endif
10635
10636         err = tg3_get_invariants(tp);
10637         if (err) {
10638                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
10639                        "aborting.\n");
10640                 goto err_out_iounmap;
10641         }
10642
10643         tg3_init_bufmgr_config(tp);
10644
10645 #if TG3_TSO_SUPPORT != 0
10646         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
10647                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10648         }
10649         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10650             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10651             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
10652             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
10653                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
10654         } else {
10655                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10656         }
10657
10658         /* TSO is off by default, user can enable using ethtool.  */
10659 #if 0
10660         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
10661                 dev->features |= NETIF_F_TSO;
10662 #endif
10663
10664 #endif
10665
10666         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
10667             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
10668             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
10669                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
10670                 tp->rx_pending = 63;
10671         }
10672
10673         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10674                 tp->pdev_peer = tg3_find_5704_peer(tp);
10675
10676         err = tg3_get_device_address(tp);
10677         if (err) {
10678                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
10679                        "aborting.\n");
10680                 goto err_out_iounmap;
10681         }
10682
10683         /*
10684          * Reset chip in case UNDI or EFI driver did not shutdown
10685          * DMA self test will enable WDMAC and we'll see (spurious)
10686          * pending DMA on the PCI bus at that point.
10687          */
10688         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
10689             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10690                 pci_save_state(tp->pdev);
10691                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
10692                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10693         }
10694
10695         err = tg3_test_dma(tp);
10696         if (err) {
10697                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
10698                 goto err_out_iounmap;
10699         }
10700
10701         /* Tigon3 can do ipv4 only... and some chips have buggy
10702          * checksumming.
10703          */
10704         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
10705                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
10706                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10707         } else
10708                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10709
10710         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
10711                 dev->features &= ~NETIF_F_HIGHDMA;
10712
10713         /* flow control autonegotiation is default behavior */
10714         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10715
10716         tg3_init_coal(tp);
10717
10718         /* Now that we have fully setup the chip, save away a snapshot
10719          * of the PCI config space.  We need to restore this after
10720          * GRC_MISC_CFG core clock resets and some resume events.
10721          */
10722         pci_save_state(tp->pdev);
10723
10724         err = register_netdev(dev);
10725         if (err) {
10726                 printk(KERN_ERR PFX "Cannot register net device, "
10727                        "aborting.\n");
10728                 goto err_out_iounmap;
10729         }
10730
10731         pci_set_drvdata(pdev, dev);
10732
10733         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
10734                dev->name,
10735                tp->board_part_number,
10736                tp->pci_chip_rev_id,
10737                tg3_phy_string(tp),
10738                tg3_bus_string(tp, str),
10739                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
10740
10741         for (i = 0; i < 6; i++)
10742                 printk("%2.2x%c", dev->dev_addr[i],
10743                        i == 5 ? '\n' : ':');
10744
10745         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
10746                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
10747                "TSOcap[%d] \n",
10748                dev->name,
10749                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
10750                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
10751                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
10752                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
10753                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
10754                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
10755                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
10756         printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
10757                dev->name, tp->dma_rwctrl);
10758
10759         return 0;
10760
10761 err_out_iounmap:
10762         if (tp->regs) {
10763                 iounmap(tp->regs);
10764                 tp->regs = NULL;
10765         }
10766
10767 err_out_free_dev:
10768         free_netdev(dev);
10769
10770 err_out_free_res:
10771         pci_release_regions(pdev);
10772
10773 err_out_disable_pdev:
10774         pci_disable_device(pdev);
10775         pci_set_drvdata(pdev, NULL);
10776         return err;
10777 }
10778
10779 static void __devexit tg3_remove_one(struct pci_dev *pdev)
10780 {
10781         struct net_device *dev = pci_get_drvdata(pdev);
10782
10783         if (dev) {
10784                 struct tg3 *tp = netdev_priv(dev);
10785
10786                 unregister_netdev(dev);
10787                 if (tp->regs) {
10788                         iounmap(tp->regs);
10789                         tp->regs = NULL;
10790                 }
10791                 free_netdev(dev);
10792                 pci_release_regions(pdev);
10793                 pci_disable_device(pdev);
10794                 pci_set_drvdata(pdev, NULL);
10795         }
10796 }
10797
10798 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
10799 {
10800         struct net_device *dev = pci_get_drvdata(pdev);
10801         struct tg3 *tp = netdev_priv(dev);
10802         int err;
10803
10804         if (!netif_running(dev))
10805                 return 0;
10806
10807         tg3_netif_stop(tp);
10808
10809         del_timer_sync(&tp->timer);
10810
10811         tg3_full_lock(tp, 1);
10812         tg3_disable_ints(tp);
10813         tg3_full_unlock(tp);
10814
10815         netif_device_detach(dev);
10816
10817         tg3_full_lock(tp, 0);
10818         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10819         tg3_full_unlock(tp);
10820
10821         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
10822         if (err) {
10823                 tg3_full_lock(tp, 0);
10824
10825                 tg3_init_hw(tp);
10826
10827                 tp->timer.expires = jiffies + tp->timer_offset;
10828                 add_timer(&tp->timer);
10829
10830                 netif_device_attach(dev);
10831                 tg3_netif_start(tp);
10832
10833                 tg3_full_unlock(tp);
10834         }
10835
10836         return err;
10837 }
10838
10839 static int tg3_resume(struct pci_dev *pdev)
10840 {
10841         struct net_device *dev = pci_get_drvdata(pdev);
10842         struct tg3 *tp = netdev_priv(dev);
10843         int err;
10844
10845         if (!netif_running(dev))
10846                 return 0;
10847
10848         pci_restore_state(tp->pdev);
10849
10850         err = tg3_set_power_state(tp, 0);
10851         if (err)
10852                 return err;
10853
10854         netif_device_attach(dev);
10855
10856         tg3_full_lock(tp, 0);
10857
10858         tg3_init_hw(tp);
10859
10860         tp->timer.expires = jiffies + tp->timer_offset;
10861         add_timer(&tp->timer);
10862
10863         tg3_netif_start(tp);
10864
10865         tg3_full_unlock(tp);
10866
10867         return 0;
10868 }
10869
10870 static struct pci_driver tg3_driver = {
10871         .name           = DRV_MODULE_NAME,
10872         .id_table       = tg3_pci_tbl,
10873         .probe          = tg3_init_one,
10874         .remove         = __devexit_p(tg3_remove_one),
10875         .suspend        = tg3_suspend,
10876         .resume         = tg3_resume
10877 };
10878
10879 static int __init tg3_init(void)
10880 {
10881         return pci_module_init(&tg3_driver);
10882 }
10883
10884 static void __exit tg3_cleanup(void)
10885 {
10886         pci_unregister_driver(&tg3_driver);
10887 }
10888
10889 module_init(tg3_init);
10890 module_exit(tg3_cleanup);