]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/tg3.c
[PATCH] tg3: update version and minor fixes
[net-next-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40
41 #include <net/checksum.h>
42
43 #include <asm/system.h>
44 #include <asm/io.h>
45 #include <asm/byteorder.h>
46 #include <asm/uaccess.h>
47
48 #ifdef CONFIG_SPARC64
49 #include <asm/idprom.h>
50 #include <asm/oplib.h>
51 #include <asm/pbm.h>
52 #endif
53
54 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
55 #define TG3_VLAN_TAG_USED 1
56 #else
57 #define TG3_VLAN_TAG_USED 0
58 #endif
59
60 #ifdef NETIF_F_TSO
61 #define TG3_TSO_SUPPORT 1
62 #else
63 #define TG3_TSO_SUPPORT 0
64 #endif
65
66 #include "tg3.h"
67
68 #define DRV_MODULE_NAME         "tg3"
69 #define PFX DRV_MODULE_NAME     ": "
70 #define DRV_MODULE_VERSION      "3.43"
71 #define DRV_MODULE_RELDATE      "Oct 24, 2005"
72
73 #define TG3_DEF_MAC_MODE        0
74 #define TG3_DEF_RX_MODE         0
75 #define TG3_DEF_TX_MODE         0
76 #define TG3_DEF_MSG_ENABLE        \
77         (NETIF_MSG_DRV          | \
78          NETIF_MSG_PROBE        | \
79          NETIF_MSG_LINK         | \
80          NETIF_MSG_TIMER        | \
81          NETIF_MSG_IFDOWN       | \
82          NETIF_MSG_IFUP         | \
83          NETIF_MSG_RX_ERR       | \
84          NETIF_MSG_TX_ERR)
85
86 /* length of time before we decide the hardware is borked,
87  * and dev->tx_timeout() should be called to fix the problem
88  */
89 #define TG3_TX_TIMEOUT                  (5 * HZ)
90
91 /* hardware minimum and maximum for a single frame's data payload */
92 #define TG3_MIN_MTU                     60
93 #define TG3_MAX_MTU(tp) \
94         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
95
96 /* These numbers seem to be hard coded in the NIC firmware somehow.
97  * You can't change the ring sizes, but you can change where you place
98  * them in the NIC onboard memory.
99  */
100 #define TG3_RX_RING_SIZE                512
101 #define TG3_DEF_RX_RING_PENDING         200
102 #define TG3_RX_JUMBO_RING_SIZE          256
103 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
104
105 /* Do not place this n-ring entries value into the tp struct itself,
106  * we really want to expose these constants to GCC so that modulo et
107  * al.  operations are done with shifts and masks instead of with
108  * hw multiply/modulo instructions.  Another solution would be to
109  * replace things like '% foo' with '& (foo - 1)'.
110  */
111 #define TG3_RX_RCB_RING_SIZE(tp)        \
112         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
113
114 #define TG3_TX_RING_SIZE                512
115 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
116
117 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
118                                  TG3_RX_RING_SIZE)
119 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120                                  TG3_RX_JUMBO_RING_SIZE)
121 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
122                                    TG3_RX_RCB_RING_SIZE(tp))
123 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
124                                  TG3_TX_RING_SIZE)
125 #define TX_BUFFS_AVAIL(TP)                                              \
126         ((TP)->tx_pending -                                             \
127          (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
128 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
129
130 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
131 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
132
133 /* minimum number of free TX descriptors required to wake up TX process */
134 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
135
136 /* number of ETHTOOL_GSTATS u64's */
137 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
138
139 #define TG3_NUM_TEST            6
140
141 static char version[] __devinitdata =
142         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
143
144 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
145 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
146 MODULE_LICENSE("GPL");
147 MODULE_VERSION(DRV_MODULE_VERSION);
148
149 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
150 module_param(tg3_debug, int, 0);
151 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
152
153 static struct pci_device_id tg3_pci_tbl[] = {
154         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
155           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
245           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246         { 0, }
247 };
248
249 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
250
251 static struct {
252         const char string[ETH_GSTRING_LEN];
253 } ethtool_stats_keys[TG3_NUM_STATS] = {
254         { "rx_octets" },
255         { "rx_fragments" },
256         { "rx_ucast_packets" },
257         { "rx_mcast_packets" },
258         { "rx_bcast_packets" },
259         { "rx_fcs_errors" },
260         { "rx_align_errors" },
261         { "rx_xon_pause_rcvd" },
262         { "rx_xoff_pause_rcvd" },
263         { "rx_mac_ctrl_rcvd" },
264         { "rx_xoff_entered" },
265         { "rx_frame_too_long_errors" },
266         { "rx_jabbers" },
267         { "rx_undersize_packets" },
268         { "rx_in_length_errors" },
269         { "rx_out_length_errors" },
270         { "rx_64_or_less_octet_packets" },
271         { "rx_65_to_127_octet_packets" },
272         { "rx_128_to_255_octet_packets" },
273         { "rx_256_to_511_octet_packets" },
274         { "rx_512_to_1023_octet_packets" },
275         { "rx_1024_to_1522_octet_packets" },
276         { "rx_1523_to_2047_octet_packets" },
277         { "rx_2048_to_4095_octet_packets" },
278         { "rx_4096_to_8191_octet_packets" },
279         { "rx_8192_to_9022_octet_packets" },
280
281         { "tx_octets" },
282         { "tx_collisions" },
283
284         { "tx_xon_sent" },
285         { "tx_xoff_sent" },
286         { "tx_flow_control" },
287         { "tx_mac_errors" },
288         { "tx_single_collisions" },
289         { "tx_mult_collisions" },
290         { "tx_deferred" },
291         { "tx_excessive_collisions" },
292         { "tx_late_collisions" },
293         { "tx_collide_2times" },
294         { "tx_collide_3times" },
295         { "tx_collide_4times" },
296         { "tx_collide_5times" },
297         { "tx_collide_6times" },
298         { "tx_collide_7times" },
299         { "tx_collide_8times" },
300         { "tx_collide_9times" },
301         { "tx_collide_10times" },
302         { "tx_collide_11times" },
303         { "tx_collide_12times" },
304         { "tx_collide_13times" },
305         { "tx_collide_14times" },
306         { "tx_collide_15times" },
307         { "tx_ucast_packets" },
308         { "tx_mcast_packets" },
309         { "tx_bcast_packets" },
310         { "tx_carrier_sense_errors" },
311         { "tx_discards" },
312         { "tx_errors" },
313
314         { "dma_writeq_full" },
315         { "dma_write_prioq_full" },
316         { "rxbds_empty" },
317         { "rx_discards" },
318         { "rx_errors" },
319         { "rx_threshold_hit" },
320
321         { "dma_readq_full" },
322         { "dma_read_prioq_full" },
323         { "tx_comp_queue_full" },
324
325         { "ring_set_send_prod_index" },
326         { "ring_status_update" },
327         { "nic_irqs" },
328         { "nic_avoided_irqs" },
329         { "nic_tx_threshold_hit" }
330 };
331
332 static struct {
333         const char string[ETH_GSTRING_LEN];
334 } ethtool_test_keys[TG3_NUM_TEST] = {
335         { "nvram test     (online) " },
336         { "link test      (online) " },
337         { "register test  (offline)" },
338         { "memory test    (offline)" },
339         { "loopback test  (offline)" },
340         { "interrupt test (offline)" },
341 };
342
343 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
344 {
345         unsigned long flags;
346
347         spin_lock_irqsave(&tp->indirect_lock, flags);
348         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
349         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
350         spin_unlock_irqrestore(&tp->indirect_lock, flags);
351 }
352
353 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
354 {
355         writel(val, tp->regs + off);
356         readl(tp->regs + off);
357 }
358
359 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
360 {
361         unsigned long flags;
362         u32 val;
363
364         spin_lock_irqsave(&tp->indirect_lock, flags);
365         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
366         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
367         spin_unlock_irqrestore(&tp->indirect_lock, flags);
368         return val;
369 }
370
371 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
372 {
373         unsigned long flags;
374
375         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
376                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
377                                        TG3_64BIT_REG_LOW, val);
378                 return;
379         }
380         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
381                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
382                                        TG3_64BIT_REG_LOW, val);
383                 return;
384         }
385
386         spin_lock_irqsave(&tp->indirect_lock, flags);
387         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
388         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
389         spin_unlock_irqrestore(&tp->indirect_lock, flags);
390
391         /* In indirect mode when disabling interrupts, we also need
392          * to clear the interrupt bit in the GRC local ctrl register.
393          */
394         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
395             (val == 0x1)) {
396                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
397                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
398         }
399 }
400
401 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
402 {
403         unsigned long flags;
404         u32 val;
405
406         spin_lock_irqsave(&tp->indirect_lock, flags);
407         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
408         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
409         spin_unlock_irqrestore(&tp->indirect_lock, flags);
410         return val;
411 }
412
413 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
414 {
415         tp->write32(tp, off, val);
416         if (!(tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) &&
417             !(tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) &&
418             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
419                 tp->read32(tp, off);    /* flush */
420 }
421
422 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
423 {
424         tp->write32_mbox(tp, off, val);
425         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
426             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
427                 tp->read32_mbox(tp, off);
428 }
429
430 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
431 {
432         void __iomem *mbox = tp->regs + off;
433         writel(val, mbox);
434         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
435                 writel(val, mbox);
436         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
437                 readl(mbox);
438 }
439
440 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
441 {
442         writel(val, tp->regs + off);
443 }
444
445 static u32 tg3_read32(struct tg3 *tp, u32 off)
446 {
447         return (readl(tp->regs + off)); 
448 }
449
450 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
451 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
452 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
453 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
454 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
455
456 #define tw32(reg,val)           tp->write32(tp, reg, val)
457 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
458 #define tr32(reg)               tp->read32(tp, reg)
459
460 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
461 {
462         unsigned long flags;
463
464         spin_lock_irqsave(&tp->indirect_lock, flags);
465         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
466         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
467
468         /* Always leave this as zero. */
469         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
470         spin_unlock_irqrestore(&tp->indirect_lock, flags);
471 }
472
473 static void tg3_write_mem_fast(struct tg3 *tp, u32 off, u32 val)
474 {
475         /* If no workaround is needed, write to mem space directly */
476         if (tp->write32 != tg3_write_indirect_reg32)
477                 tw32(NIC_SRAM_WIN_BASE + off, val);
478         else
479                 tg3_write_mem(tp, off, val);
480 }
481
482 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
483 {
484         unsigned long flags;
485
486         spin_lock_irqsave(&tp->indirect_lock, flags);
487         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
488         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
489
490         /* Always leave this as zero. */
491         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
492         spin_unlock_irqrestore(&tp->indirect_lock, flags);
493 }
494
495 static void tg3_disable_ints(struct tg3 *tp)
496 {
497         tw32(TG3PCI_MISC_HOST_CTRL,
498              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
499         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
500 }
501
502 static inline void tg3_cond_int(struct tg3 *tp)
503 {
504         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
505             (tp->hw_status->status & SD_STATUS_UPDATED))
506                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
507 }
508
509 static void tg3_enable_ints(struct tg3 *tp)
510 {
511         tp->irq_sync = 0;
512         wmb();
513
514         tw32(TG3PCI_MISC_HOST_CTRL,
515              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
516         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
517                        (tp->last_tag << 24));
518         tg3_cond_int(tp);
519 }
520
521 static inline unsigned int tg3_has_work(struct tg3 *tp)
522 {
523         struct tg3_hw_status *sblk = tp->hw_status;
524         unsigned int work_exists = 0;
525
526         /* check for phy events */
527         if (!(tp->tg3_flags &
528               (TG3_FLAG_USE_LINKCHG_REG |
529                TG3_FLAG_POLL_SERDES))) {
530                 if (sblk->status & SD_STATUS_LINK_CHG)
531                         work_exists = 1;
532         }
533         /* check for RX/TX work to do */
534         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
535             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
536                 work_exists = 1;
537
538         return work_exists;
539 }
540
541 /* tg3_restart_ints
542  *  similar to tg3_enable_ints, but it accurately determines whether there
543  *  is new work pending and can return without flushing the PIO write
544  *  which reenables interrupts 
545  */
546 static void tg3_restart_ints(struct tg3 *tp)
547 {
548         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
549                      tp->last_tag << 24);
550         mmiowb();
551
552         /* When doing tagged status, this work check is unnecessary.
553          * The last_tag we write above tells the chip which piece of
554          * work we've completed.
555          */
556         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
557             tg3_has_work(tp))
558                 tw32(HOSTCC_MODE, tp->coalesce_mode |
559                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
560 }
561
562 static inline void tg3_netif_stop(struct tg3 *tp)
563 {
564         tp->dev->trans_start = jiffies; /* prevent tx timeout */
565         netif_poll_disable(tp->dev);
566         netif_tx_disable(tp->dev);
567 }
568
569 static inline void tg3_netif_start(struct tg3 *tp)
570 {
571         netif_wake_queue(tp->dev);
572         /* NOTE: unconditional netif_wake_queue is only appropriate
573          * so long as all callers are assured to have free tx slots
574          * (such as after tg3_init_hw)
575          */
576         netif_poll_enable(tp->dev);
577         tp->hw_status->status |= SD_STATUS_UPDATED;
578         tg3_enable_ints(tp);
579 }
580
581 static void tg3_switch_clocks(struct tg3 *tp)
582 {
583         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
584         u32 orig_clock_ctrl;
585
586         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
587                 return;
588
589         orig_clock_ctrl = clock_ctrl;
590         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
591                        CLOCK_CTRL_CLKRUN_OENABLE |
592                        0x1f);
593         tp->pci_clock_ctrl = clock_ctrl;
594
595         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
596                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
597                         tw32_f(TG3PCI_CLOCK_CTRL,
598                                clock_ctrl | CLOCK_CTRL_625_CORE);
599                         udelay(40);
600                 }
601         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
602                 tw32_f(TG3PCI_CLOCK_CTRL,
603                      clock_ctrl |
604                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
605                 udelay(40);
606                 tw32_f(TG3PCI_CLOCK_CTRL,
607                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
608                 udelay(40);
609         }
610         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
611         udelay(40);
612 }
613
614 #define PHY_BUSY_LOOPS  5000
615
616 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
617 {
618         u32 frame_val;
619         unsigned int loops;
620         int ret;
621
622         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
623                 tw32_f(MAC_MI_MODE,
624                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
625                 udelay(80);
626         }
627
628         *val = 0x0;
629
630         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
631                       MI_COM_PHY_ADDR_MASK);
632         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
633                       MI_COM_REG_ADDR_MASK);
634         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
635         
636         tw32_f(MAC_MI_COM, frame_val);
637
638         loops = PHY_BUSY_LOOPS;
639         while (loops != 0) {
640                 udelay(10);
641                 frame_val = tr32(MAC_MI_COM);
642
643                 if ((frame_val & MI_COM_BUSY) == 0) {
644                         udelay(5);
645                         frame_val = tr32(MAC_MI_COM);
646                         break;
647                 }
648                 loops -= 1;
649         }
650
651         ret = -EBUSY;
652         if (loops != 0) {
653                 *val = frame_val & MI_COM_DATA_MASK;
654                 ret = 0;
655         }
656
657         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
658                 tw32_f(MAC_MI_MODE, tp->mi_mode);
659                 udelay(80);
660         }
661
662         return ret;
663 }
664
665 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
666 {
667         u32 frame_val;
668         unsigned int loops;
669         int ret;
670
671         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
672                 tw32_f(MAC_MI_MODE,
673                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
674                 udelay(80);
675         }
676
677         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
678                       MI_COM_PHY_ADDR_MASK);
679         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
680                       MI_COM_REG_ADDR_MASK);
681         frame_val |= (val & MI_COM_DATA_MASK);
682         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
683         
684         tw32_f(MAC_MI_COM, frame_val);
685
686         loops = PHY_BUSY_LOOPS;
687         while (loops != 0) {
688                 udelay(10);
689                 frame_val = tr32(MAC_MI_COM);
690                 if ((frame_val & MI_COM_BUSY) == 0) {
691                         udelay(5);
692                         frame_val = tr32(MAC_MI_COM);
693                         break;
694                 }
695                 loops -= 1;
696         }
697
698         ret = -EBUSY;
699         if (loops != 0)
700                 ret = 0;
701
702         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
703                 tw32_f(MAC_MI_MODE, tp->mi_mode);
704                 udelay(80);
705         }
706
707         return ret;
708 }
709
710 static void tg3_phy_set_wirespeed(struct tg3 *tp)
711 {
712         u32 val;
713
714         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
715                 return;
716
717         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
718             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
719                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
720                              (val | (1 << 15) | (1 << 4)));
721 }
722
723 static int tg3_bmcr_reset(struct tg3 *tp)
724 {
725         u32 phy_control;
726         int limit, err;
727
728         /* OK, reset it, and poll the BMCR_RESET bit until it
729          * clears or we time out.
730          */
731         phy_control = BMCR_RESET;
732         err = tg3_writephy(tp, MII_BMCR, phy_control);
733         if (err != 0)
734                 return -EBUSY;
735
736         limit = 5000;
737         while (limit--) {
738                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
739                 if (err != 0)
740                         return -EBUSY;
741
742                 if ((phy_control & BMCR_RESET) == 0) {
743                         udelay(40);
744                         break;
745                 }
746                 udelay(10);
747         }
748         if (limit <= 0)
749                 return -EBUSY;
750
751         return 0;
752 }
753
754 static int tg3_wait_macro_done(struct tg3 *tp)
755 {
756         int limit = 100;
757
758         while (limit--) {
759                 u32 tmp32;
760
761                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
762                         if ((tmp32 & 0x1000) == 0)
763                                 break;
764                 }
765         }
766         if (limit <= 0)
767                 return -EBUSY;
768
769         return 0;
770 }
771
772 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
773 {
774         static const u32 test_pat[4][6] = {
775         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
776         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
777         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
778         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
779         };
780         int chan;
781
782         for (chan = 0; chan < 4; chan++) {
783                 int i;
784
785                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
786                              (chan * 0x2000) | 0x0200);
787                 tg3_writephy(tp, 0x16, 0x0002);
788
789                 for (i = 0; i < 6; i++)
790                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
791                                      test_pat[chan][i]);
792
793                 tg3_writephy(tp, 0x16, 0x0202);
794                 if (tg3_wait_macro_done(tp)) {
795                         *resetp = 1;
796                         return -EBUSY;
797                 }
798
799                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
800                              (chan * 0x2000) | 0x0200);
801                 tg3_writephy(tp, 0x16, 0x0082);
802                 if (tg3_wait_macro_done(tp)) {
803                         *resetp = 1;
804                         return -EBUSY;
805                 }
806
807                 tg3_writephy(tp, 0x16, 0x0802);
808                 if (tg3_wait_macro_done(tp)) {
809                         *resetp = 1;
810                         return -EBUSY;
811                 }
812
813                 for (i = 0; i < 6; i += 2) {
814                         u32 low, high;
815
816                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
817                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
818                             tg3_wait_macro_done(tp)) {
819                                 *resetp = 1;
820                                 return -EBUSY;
821                         }
822                         low &= 0x7fff;
823                         high &= 0x000f;
824                         if (low != test_pat[chan][i] ||
825                             high != test_pat[chan][i+1]) {
826                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
827                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
828                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
829
830                                 return -EBUSY;
831                         }
832                 }
833         }
834
835         return 0;
836 }
837
838 static int tg3_phy_reset_chanpat(struct tg3 *tp)
839 {
840         int chan;
841
842         for (chan = 0; chan < 4; chan++) {
843                 int i;
844
845                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
846                              (chan * 0x2000) | 0x0200);
847                 tg3_writephy(tp, 0x16, 0x0002);
848                 for (i = 0; i < 6; i++)
849                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
850                 tg3_writephy(tp, 0x16, 0x0202);
851                 if (tg3_wait_macro_done(tp))
852                         return -EBUSY;
853         }
854
855         return 0;
856 }
857
858 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
859 {
860         u32 reg32, phy9_orig;
861         int retries, do_phy_reset, err;
862
863         retries = 10;
864         do_phy_reset = 1;
865         do {
866                 if (do_phy_reset) {
867                         err = tg3_bmcr_reset(tp);
868                         if (err)
869                                 return err;
870                         do_phy_reset = 0;
871                 }
872
873                 /* Disable transmitter and interrupt.  */
874                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
875                         continue;
876
877                 reg32 |= 0x3000;
878                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
879
880                 /* Set full-duplex, 1000 mbps.  */
881                 tg3_writephy(tp, MII_BMCR,
882                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
883
884                 /* Set to master mode.  */
885                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
886                         continue;
887
888                 tg3_writephy(tp, MII_TG3_CTRL,
889                              (MII_TG3_CTRL_AS_MASTER |
890                               MII_TG3_CTRL_ENABLE_AS_MASTER));
891
892                 /* Enable SM_DSP_CLOCK and 6dB.  */
893                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
894
895                 /* Block the PHY control access.  */
896                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
897                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
898
899                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
900                 if (!err)
901                         break;
902         } while (--retries);
903
904         err = tg3_phy_reset_chanpat(tp);
905         if (err)
906                 return err;
907
908         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
909         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
910
911         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
912         tg3_writephy(tp, 0x16, 0x0000);
913
914         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
915             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
916                 /* Set Extended packet length bit for jumbo frames */
917                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
918         }
919         else {
920                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
921         }
922
923         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
924
925         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
926                 reg32 &= ~0x3000;
927                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
928         } else if (!err)
929                 err = -EBUSY;
930
931         return err;
932 }
933
934 /* This will reset the tigon3 PHY if there is no valid
935  * link unless the FORCE argument is non-zero.
936  */
937 static int tg3_phy_reset(struct tg3 *tp)
938 {
939         u32 phy_status;
940         int err;
941
942         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
943         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
944         if (err != 0)
945                 return -EBUSY;
946
947         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
948             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
949             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
950                 err = tg3_phy_reset_5703_4_5(tp);
951                 if (err)
952                         return err;
953                 goto out;
954         }
955
956         err = tg3_bmcr_reset(tp);
957         if (err)
958                 return err;
959
960 out:
961         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
962                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
963                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
964                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
965                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
966                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
967                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
968         }
969         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
970                 tg3_writephy(tp, 0x1c, 0x8d68);
971                 tg3_writephy(tp, 0x1c, 0x8d68);
972         }
973         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
974                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
975                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
976                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
977                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
978                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
979                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
980                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
981                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
982         }
983         /* Set Extended packet length bit (bit 14) on all chips that */
984         /* support jumbo frames */
985         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
986                 /* Cannot do read-modify-write on 5401 */
987                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
988         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
989                 u32 phy_reg;
990
991                 /* Set bit 14 with read-modify-write to preserve other bits */
992                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
993                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
994                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
995         }
996
997         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
998          * jumbo frames transmission.
999          */
1000         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1001                 u32 phy_reg;
1002
1003                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1004                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1005                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1006         }
1007
1008         tg3_phy_set_wirespeed(tp);
1009         return 0;
1010 }
1011
1012 static void tg3_frob_aux_power(struct tg3 *tp)
1013 {
1014         struct tg3 *tp_peer = tp;
1015
1016         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1017                 return;
1018
1019         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1020                 tp_peer = pci_get_drvdata(tp->pdev_peer);
1021                 if (!tp_peer)
1022                         BUG();
1023         }
1024
1025
1026         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1027             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
1028                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1029                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1030                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1031                              (GRC_LCLCTRL_GPIO_OE0 |
1032                               GRC_LCLCTRL_GPIO_OE1 |
1033                               GRC_LCLCTRL_GPIO_OE2 |
1034                               GRC_LCLCTRL_GPIO_OUTPUT0 |
1035                               GRC_LCLCTRL_GPIO_OUTPUT1));
1036                         udelay(100);
1037                 } else {
1038                         u32 no_gpio2;
1039                         u32 grc_local_ctrl;
1040
1041                         if (tp_peer != tp &&
1042                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1043                                 return;
1044
1045                         /* On 5753 and variants, GPIO2 cannot be used. */
1046                         no_gpio2 = tp->nic_sram_data_cfg &
1047                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1048
1049                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1050                                          GRC_LCLCTRL_GPIO_OE1 |
1051                                          GRC_LCLCTRL_GPIO_OE2 |
1052                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1053                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1054                         if (no_gpio2) {
1055                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1056                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1057                         }
1058                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1059                                                 grc_local_ctrl);
1060                         udelay(100);
1061
1062                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1063
1064                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1065                                                 grc_local_ctrl);
1066                         udelay(100);
1067
1068                         if (!no_gpio2) {
1069                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1070                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1071                                        grc_local_ctrl);
1072                                 udelay(100);
1073                         }
1074                 }
1075         } else {
1076                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1077                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1078                         if (tp_peer != tp &&
1079                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1080                                 return;
1081
1082                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1083                              (GRC_LCLCTRL_GPIO_OE1 |
1084                               GRC_LCLCTRL_GPIO_OUTPUT1));
1085                         udelay(100);
1086
1087                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1088                              (GRC_LCLCTRL_GPIO_OE1));
1089                         udelay(100);
1090
1091                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1092                              (GRC_LCLCTRL_GPIO_OE1 |
1093                               GRC_LCLCTRL_GPIO_OUTPUT1));
1094                         udelay(100);
1095                 }
1096         }
1097 }
1098
1099 static int tg3_setup_phy(struct tg3 *, int);
1100
1101 #define RESET_KIND_SHUTDOWN     0
1102 #define RESET_KIND_INIT         1
1103 #define RESET_KIND_SUSPEND      2
1104
1105 static void tg3_write_sig_post_reset(struct tg3 *, int);
1106 static int tg3_halt_cpu(struct tg3 *, u32);
1107
1108 static int tg3_set_power_state(struct tg3 *tp, int state)
1109 {
1110         u32 misc_host_ctrl;
1111         u16 power_control, power_caps;
1112         int pm = tp->pm_cap;
1113
1114         /* Make sure register accesses (indirect or otherwise)
1115          * will function correctly.
1116          */
1117         pci_write_config_dword(tp->pdev,
1118                                TG3PCI_MISC_HOST_CTRL,
1119                                tp->misc_host_ctrl);
1120
1121         pci_read_config_word(tp->pdev,
1122                              pm + PCI_PM_CTRL,
1123                              &power_control);
1124         power_control |= PCI_PM_CTRL_PME_STATUS;
1125         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1126         switch (state) {
1127         case 0:
1128                 power_control |= 0;
1129                 pci_write_config_word(tp->pdev,
1130                                       pm + PCI_PM_CTRL,
1131                                       power_control);
1132                 udelay(100);    /* Delay after power state change */
1133
1134                 /* Switch out of Vaux if it is not a LOM */
1135                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1136                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1137                         udelay(100);
1138                 }
1139
1140                 return 0;
1141
1142         case 1:
1143                 power_control |= 1;
1144                 break;
1145
1146         case 2:
1147                 power_control |= 2;
1148                 break;
1149
1150         case 3:
1151                 power_control |= 3;
1152                 break;
1153
1154         default:
1155                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1156                        "requested.\n",
1157                        tp->dev->name, state);
1158                 return -EINVAL;
1159         };
1160
1161         power_control |= PCI_PM_CTRL_PME_ENABLE;
1162
1163         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1164         tw32(TG3PCI_MISC_HOST_CTRL,
1165              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1166
1167         if (tp->link_config.phy_is_low_power == 0) {
1168                 tp->link_config.phy_is_low_power = 1;
1169                 tp->link_config.orig_speed = tp->link_config.speed;
1170                 tp->link_config.orig_duplex = tp->link_config.duplex;
1171                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1172         }
1173
1174         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1175                 tp->link_config.speed = SPEED_10;
1176                 tp->link_config.duplex = DUPLEX_HALF;
1177                 tp->link_config.autoneg = AUTONEG_ENABLE;
1178                 tg3_setup_phy(tp, 0);
1179         }
1180
1181         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1182
1183         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1184                 u32 mac_mode;
1185
1186                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1187                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1188                         udelay(40);
1189
1190                         mac_mode = MAC_MODE_PORT_MODE_MII;
1191
1192                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1193                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1194                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1195                 } else {
1196                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1197                 }
1198
1199                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1200                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1201
1202                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1203                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1204                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1205
1206                 tw32_f(MAC_MODE, mac_mode);
1207                 udelay(100);
1208
1209                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1210                 udelay(10);
1211         }
1212
1213         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1214             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1215              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1216                 u32 base_val;
1217
1218                 base_val = tp->pci_clock_ctrl;
1219                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1220                              CLOCK_CTRL_TXCLK_DISABLE);
1221
1222                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1223                      CLOCK_CTRL_ALTCLK |
1224                      CLOCK_CTRL_PWRDOWN_PLL133);
1225                 udelay(40);
1226         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1227                 /* do nothing */
1228         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1229                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1230                 u32 newbits1, newbits2;
1231
1232                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1233                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1234                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1235                                     CLOCK_CTRL_TXCLK_DISABLE |
1236                                     CLOCK_CTRL_ALTCLK);
1237                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1238                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1239                         newbits1 = CLOCK_CTRL_625_CORE;
1240                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1241                 } else {
1242                         newbits1 = CLOCK_CTRL_ALTCLK;
1243                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1244                 }
1245
1246                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1247                 udelay(40);
1248
1249                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1250                 udelay(40);
1251
1252                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1253                         u32 newbits3;
1254
1255                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1256                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1257                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1258                                             CLOCK_CTRL_TXCLK_DISABLE |
1259                                             CLOCK_CTRL_44MHZ_CORE);
1260                         } else {
1261                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1262                         }
1263
1264                         tw32_f(TG3PCI_CLOCK_CTRL,
1265                                          tp->pci_clock_ctrl | newbits3);
1266                         udelay(40);
1267                 }
1268         }
1269
1270         tg3_frob_aux_power(tp);
1271
1272         /* Workaround for unstable PLL clock */
1273         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1274             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1275                 u32 val = tr32(0x7d00);
1276
1277                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1278                 tw32(0x7d00, val);
1279                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1280                         tg3_halt_cpu(tp, RX_CPU_BASE);
1281         }
1282
1283         /* Finally, set the new power state. */
1284         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1285         udelay(100);    /* Delay after power state change */
1286
1287         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1288
1289         return 0;
1290 }
1291
1292 static void tg3_link_report(struct tg3 *tp)
1293 {
1294         if (!netif_carrier_ok(tp->dev)) {
1295                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1296         } else {
1297                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1298                        tp->dev->name,
1299                        (tp->link_config.active_speed == SPEED_1000 ?
1300                         1000 :
1301                         (tp->link_config.active_speed == SPEED_100 ?
1302                          100 : 10)),
1303                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1304                         "full" : "half"));
1305
1306                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1307                        "%s for RX.\n",
1308                        tp->dev->name,
1309                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1310                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1311         }
1312 }
1313
1314 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1315 {
1316         u32 new_tg3_flags = 0;
1317         u32 old_rx_mode = tp->rx_mode;
1318         u32 old_tx_mode = tp->tx_mode;
1319
1320         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1321
1322                 /* Convert 1000BaseX flow control bits to 1000BaseT
1323                  * bits before resolving flow control.
1324                  */
1325                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1326                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1327                                        ADVERTISE_PAUSE_ASYM);
1328                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1329
1330                         if (local_adv & ADVERTISE_1000XPAUSE)
1331                                 local_adv |= ADVERTISE_PAUSE_CAP;
1332                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1333                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1334                         if (remote_adv & LPA_1000XPAUSE)
1335                                 remote_adv |= LPA_PAUSE_CAP;
1336                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1337                                 remote_adv |= LPA_PAUSE_ASYM;
1338                 }
1339
1340                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1341                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1342                                 if (remote_adv & LPA_PAUSE_CAP)
1343                                         new_tg3_flags |=
1344                                                 (TG3_FLAG_RX_PAUSE |
1345                                                 TG3_FLAG_TX_PAUSE);
1346                                 else if (remote_adv & LPA_PAUSE_ASYM)
1347                                         new_tg3_flags |=
1348                                                 (TG3_FLAG_RX_PAUSE);
1349                         } else {
1350                                 if (remote_adv & LPA_PAUSE_CAP)
1351                                         new_tg3_flags |=
1352                                                 (TG3_FLAG_RX_PAUSE |
1353                                                 TG3_FLAG_TX_PAUSE);
1354                         }
1355                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1356                         if ((remote_adv & LPA_PAUSE_CAP) &&
1357                         (remote_adv & LPA_PAUSE_ASYM))
1358                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1359                 }
1360
1361                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1362                 tp->tg3_flags |= new_tg3_flags;
1363         } else {
1364                 new_tg3_flags = tp->tg3_flags;
1365         }
1366
1367         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1368                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1369         else
1370                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1371
1372         if (old_rx_mode != tp->rx_mode) {
1373                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1374         }
1375         
1376         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1377                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1378         else
1379                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1380
1381         if (old_tx_mode != tp->tx_mode) {
1382                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1383         }
1384 }
1385
1386 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1387 {
1388         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1389         case MII_TG3_AUX_STAT_10HALF:
1390                 *speed = SPEED_10;
1391                 *duplex = DUPLEX_HALF;
1392                 break;
1393
1394         case MII_TG3_AUX_STAT_10FULL:
1395                 *speed = SPEED_10;
1396                 *duplex = DUPLEX_FULL;
1397                 break;
1398
1399         case MII_TG3_AUX_STAT_100HALF:
1400                 *speed = SPEED_100;
1401                 *duplex = DUPLEX_HALF;
1402                 break;
1403
1404         case MII_TG3_AUX_STAT_100FULL:
1405                 *speed = SPEED_100;
1406                 *duplex = DUPLEX_FULL;
1407                 break;
1408
1409         case MII_TG3_AUX_STAT_1000HALF:
1410                 *speed = SPEED_1000;
1411                 *duplex = DUPLEX_HALF;
1412                 break;
1413
1414         case MII_TG3_AUX_STAT_1000FULL:
1415                 *speed = SPEED_1000;
1416                 *duplex = DUPLEX_FULL;
1417                 break;
1418
1419         default:
1420                 *speed = SPEED_INVALID;
1421                 *duplex = DUPLEX_INVALID;
1422                 break;
1423         };
1424 }
1425
1426 static void tg3_phy_copper_begin(struct tg3 *tp)
1427 {
1428         u32 new_adv;
1429         int i;
1430
1431         if (tp->link_config.phy_is_low_power) {
1432                 /* Entering low power mode.  Disable gigabit and
1433                  * 100baseT advertisements.
1434                  */
1435                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1436
1437                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1438                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1439                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1440                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1441
1442                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1443         } else if (tp->link_config.speed == SPEED_INVALID) {
1444                 tp->link_config.advertising =
1445                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1446                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1447                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1448                          ADVERTISED_Autoneg | ADVERTISED_MII);
1449
1450                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1451                         tp->link_config.advertising &=
1452                                 ~(ADVERTISED_1000baseT_Half |
1453                                   ADVERTISED_1000baseT_Full);
1454
1455                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1456                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1457                         new_adv |= ADVERTISE_10HALF;
1458                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1459                         new_adv |= ADVERTISE_10FULL;
1460                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1461                         new_adv |= ADVERTISE_100HALF;
1462                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1463                         new_adv |= ADVERTISE_100FULL;
1464                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1465
1466                 if (tp->link_config.advertising &
1467                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1468                         new_adv = 0;
1469                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1470                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1471                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1472                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1473                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1474                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1475                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1476                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1477                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1478                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1479                 } else {
1480                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1481                 }
1482         } else {
1483                 /* Asking for a specific link mode. */
1484                 if (tp->link_config.speed == SPEED_1000) {
1485                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1486                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1487
1488                         if (tp->link_config.duplex == DUPLEX_FULL)
1489                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1490                         else
1491                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1492                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1493                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1494                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1495                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1496                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1497                 } else {
1498                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1499
1500                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1501                         if (tp->link_config.speed == SPEED_100) {
1502                                 if (tp->link_config.duplex == DUPLEX_FULL)
1503                                         new_adv |= ADVERTISE_100FULL;
1504                                 else
1505                                         new_adv |= ADVERTISE_100HALF;
1506                         } else {
1507                                 if (tp->link_config.duplex == DUPLEX_FULL)
1508                                         new_adv |= ADVERTISE_10FULL;
1509                                 else
1510                                         new_adv |= ADVERTISE_10HALF;
1511                         }
1512                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1513                 }
1514         }
1515
1516         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1517             tp->link_config.speed != SPEED_INVALID) {
1518                 u32 bmcr, orig_bmcr;
1519
1520                 tp->link_config.active_speed = tp->link_config.speed;
1521                 tp->link_config.active_duplex = tp->link_config.duplex;
1522
1523                 bmcr = 0;
1524                 switch (tp->link_config.speed) {
1525                 default:
1526                 case SPEED_10:
1527                         break;
1528
1529                 case SPEED_100:
1530                         bmcr |= BMCR_SPEED100;
1531                         break;
1532
1533                 case SPEED_1000:
1534                         bmcr |= TG3_BMCR_SPEED1000;
1535                         break;
1536                 };
1537
1538                 if (tp->link_config.duplex == DUPLEX_FULL)
1539                         bmcr |= BMCR_FULLDPLX;
1540
1541                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1542                     (bmcr != orig_bmcr)) {
1543                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1544                         for (i = 0; i < 1500; i++) {
1545                                 u32 tmp;
1546
1547                                 udelay(10);
1548                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1549                                     tg3_readphy(tp, MII_BMSR, &tmp))
1550                                         continue;
1551                                 if (!(tmp & BMSR_LSTATUS)) {
1552                                         udelay(40);
1553                                         break;
1554                                 }
1555                         }
1556                         tg3_writephy(tp, MII_BMCR, bmcr);
1557                         udelay(40);
1558                 }
1559         } else {
1560                 tg3_writephy(tp, MII_BMCR,
1561                              BMCR_ANENABLE | BMCR_ANRESTART);
1562         }
1563 }
1564
1565 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1566 {
1567         int err;
1568
1569         /* Turn off tap power management. */
1570         /* Set Extended packet length bit */
1571         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1572
1573         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1574         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1575
1576         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1577         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1578
1579         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1580         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1581
1582         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1583         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1584
1585         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1586         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1587
1588         udelay(40);
1589
1590         return err;
1591 }
1592
1593 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1594 {
1595         u32 adv_reg, all_mask;
1596
1597         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1598                 return 0;
1599
1600         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1601                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1602         if ((adv_reg & all_mask) != all_mask)
1603                 return 0;
1604         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1605                 u32 tg3_ctrl;
1606
1607                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1608                         return 0;
1609
1610                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1611                             MII_TG3_CTRL_ADV_1000_FULL);
1612                 if ((tg3_ctrl & all_mask) != all_mask)
1613                         return 0;
1614         }
1615         return 1;
1616 }
1617
1618 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1619 {
1620         int current_link_up;
1621         u32 bmsr, dummy;
1622         u16 current_speed;
1623         u8 current_duplex;
1624         int i, err;
1625
1626         tw32(MAC_EVENT, 0);
1627
1628         tw32_f(MAC_STATUS,
1629              (MAC_STATUS_SYNC_CHANGED |
1630               MAC_STATUS_CFG_CHANGED |
1631               MAC_STATUS_MI_COMPLETION |
1632               MAC_STATUS_LNKSTATE_CHANGED));
1633         udelay(40);
1634
1635         tp->mi_mode = MAC_MI_MODE_BASE;
1636         tw32_f(MAC_MI_MODE, tp->mi_mode);
1637         udelay(80);
1638
1639         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1640
1641         /* Some third-party PHYs need to be reset on link going
1642          * down.
1643          */
1644         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1645              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1646              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1647             netif_carrier_ok(tp->dev)) {
1648                 tg3_readphy(tp, MII_BMSR, &bmsr);
1649                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1650                     !(bmsr & BMSR_LSTATUS))
1651                         force_reset = 1;
1652         }
1653         if (force_reset)
1654                 tg3_phy_reset(tp);
1655
1656         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1657                 tg3_readphy(tp, MII_BMSR, &bmsr);
1658                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1659                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1660                         bmsr = 0;
1661
1662                 if (!(bmsr & BMSR_LSTATUS)) {
1663                         err = tg3_init_5401phy_dsp(tp);
1664                         if (err)
1665                                 return err;
1666
1667                         tg3_readphy(tp, MII_BMSR, &bmsr);
1668                         for (i = 0; i < 1000; i++) {
1669                                 udelay(10);
1670                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1671                                     (bmsr & BMSR_LSTATUS)) {
1672                                         udelay(40);
1673                                         break;
1674                                 }
1675                         }
1676
1677                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1678                             !(bmsr & BMSR_LSTATUS) &&
1679                             tp->link_config.active_speed == SPEED_1000) {
1680                                 err = tg3_phy_reset(tp);
1681                                 if (!err)
1682                                         err = tg3_init_5401phy_dsp(tp);
1683                                 if (err)
1684                                         return err;
1685                         }
1686                 }
1687         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1688                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1689                 /* 5701 {A0,B0} CRC bug workaround */
1690                 tg3_writephy(tp, 0x15, 0x0a75);
1691                 tg3_writephy(tp, 0x1c, 0x8c68);
1692                 tg3_writephy(tp, 0x1c, 0x8d68);
1693                 tg3_writephy(tp, 0x1c, 0x8c68);
1694         }
1695
1696         /* Clear pending interrupts... */
1697         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1698         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1699
1700         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1701                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1702         else
1703                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1704
1705         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1706             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1707                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1708                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1709                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1710                 else
1711                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1712         }
1713
1714         current_link_up = 0;
1715         current_speed = SPEED_INVALID;
1716         current_duplex = DUPLEX_INVALID;
1717
1718         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1719                 u32 val;
1720
1721                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1722                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1723                 if (!(val & (1 << 10))) {
1724                         val |= (1 << 10);
1725                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1726                         goto relink;
1727                 }
1728         }
1729
1730         bmsr = 0;
1731         for (i = 0; i < 100; i++) {
1732                 tg3_readphy(tp, MII_BMSR, &bmsr);
1733                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1734                     (bmsr & BMSR_LSTATUS))
1735                         break;
1736                 udelay(40);
1737         }
1738
1739         if (bmsr & BMSR_LSTATUS) {
1740                 u32 aux_stat, bmcr;
1741
1742                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1743                 for (i = 0; i < 2000; i++) {
1744                         udelay(10);
1745                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1746                             aux_stat)
1747                                 break;
1748                 }
1749
1750                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1751                                              &current_speed,
1752                                              &current_duplex);
1753
1754                 bmcr = 0;
1755                 for (i = 0; i < 200; i++) {
1756                         tg3_readphy(tp, MII_BMCR, &bmcr);
1757                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1758                                 continue;
1759                         if (bmcr && bmcr != 0x7fff)
1760                                 break;
1761                         udelay(10);
1762                 }
1763
1764                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1765                         if (bmcr & BMCR_ANENABLE) {
1766                                 current_link_up = 1;
1767
1768                                 /* Force autoneg restart if we are exiting
1769                                  * low power mode.
1770                                  */
1771                                 if (!tg3_copper_is_advertising_all(tp))
1772                                         current_link_up = 0;
1773                         } else {
1774                                 current_link_up = 0;
1775                         }
1776                 } else {
1777                         if (!(bmcr & BMCR_ANENABLE) &&
1778                             tp->link_config.speed == current_speed &&
1779                             tp->link_config.duplex == current_duplex) {
1780                                 current_link_up = 1;
1781                         } else {
1782                                 current_link_up = 0;
1783                         }
1784                 }
1785
1786                 tp->link_config.active_speed = current_speed;
1787                 tp->link_config.active_duplex = current_duplex;
1788         }
1789
1790         if (current_link_up == 1 &&
1791             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1792             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1793                 u32 local_adv, remote_adv;
1794
1795                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1796                         local_adv = 0;
1797                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1798
1799                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1800                         remote_adv = 0;
1801
1802                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1803
1804                 /* If we are not advertising full pause capability,
1805                  * something is wrong.  Bring the link down and reconfigure.
1806                  */
1807                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1808                         current_link_up = 0;
1809                 } else {
1810                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1811                 }
1812         }
1813 relink:
1814         if (current_link_up == 0) {
1815                 u32 tmp;
1816
1817                 tg3_phy_copper_begin(tp);
1818
1819                 tg3_readphy(tp, MII_BMSR, &tmp);
1820                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1821                     (tmp & BMSR_LSTATUS))
1822                         current_link_up = 1;
1823         }
1824
1825         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1826         if (current_link_up == 1) {
1827                 if (tp->link_config.active_speed == SPEED_100 ||
1828                     tp->link_config.active_speed == SPEED_10)
1829                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1830                 else
1831                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1832         } else
1833                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1834
1835         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1836         if (tp->link_config.active_duplex == DUPLEX_HALF)
1837                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1838
1839         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1840         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1841                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1842                     (current_link_up == 1 &&
1843                      tp->link_config.active_speed == SPEED_10))
1844                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1845         } else {
1846                 if (current_link_up == 1)
1847                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1848         }
1849
1850         /* ??? Without this setting Netgear GA302T PHY does not
1851          * ??? send/receive packets...
1852          */
1853         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1854             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1855                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1856                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1857                 udelay(80);
1858         }
1859
1860         tw32_f(MAC_MODE, tp->mac_mode);
1861         udelay(40);
1862
1863         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1864                 /* Polled via timer. */
1865                 tw32_f(MAC_EVENT, 0);
1866         } else {
1867                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1868         }
1869         udelay(40);
1870
1871         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1872             current_link_up == 1 &&
1873             tp->link_config.active_speed == SPEED_1000 &&
1874             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1875              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1876                 udelay(120);
1877                 tw32_f(MAC_STATUS,
1878                      (MAC_STATUS_SYNC_CHANGED |
1879                       MAC_STATUS_CFG_CHANGED));
1880                 udelay(40);
1881                 tg3_write_mem(tp,
1882                               NIC_SRAM_FIRMWARE_MBOX,
1883                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1884         }
1885
1886         if (current_link_up != netif_carrier_ok(tp->dev)) {
1887                 if (current_link_up)
1888                         netif_carrier_on(tp->dev);
1889                 else
1890                         netif_carrier_off(tp->dev);
1891                 tg3_link_report(tp);
1892         }
1893
1894         return 0;
1895 }
1896
1897 struct tg3_fiber_aneginfo {
1898         int state;
1899 #define ANEG_STATE_UNKNOWN              0
1900 #define ANEG_STATE_AN_ENABLE            1
1901 #define ANEG_STATE_RESTART_INIT         2
1902 #define ANEG_STATE_RESTART              3
1903 #define ANEG_STATE_DISABLE_LINK_OK      4
1904 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1905 #define ANEG_STATE_ABILITY_DETECT       6
1906 #define ANEG_STATE_ACK_DETECT_INIT      7
1907 #define ANEG_STATE_ACK_DETECT           8
1908 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1909 #define ANEG_STATE_COMPLETE_ACK         10
1910 #define ANEG_STATE_IDLE_DETECT_INIT     11
1911 #define ANEG_STATE_IDLE_DETECT          12
1912 #define ANEG_STATE_LINK_OK              13
1913 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1914 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1915
1916         u32 flags;
1917 #define MR_AN_ENABLE            0x00000001
1918 #define MR_RESTART_AN           0x00000002
1919 #define MR_AN_COMPLETE          0x00000004
1920 #define MR_PAGE_RX              0x00000008
1921 #define MR_NP_LOADED            0x00000010
1922 #define MR_TOGGLE_TX            0x00000020
1923 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1924 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1925 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1926 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1927 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1928 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1929 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1930 #define MR_TOGGLE_RX            0x00002000
1931 #define MR_NP_RX                0x00004000
1932
1933 #define MR_LINK_OK              0x80000000
1934
1935         unsigned long link_time, cur_time;
1936
1937         u32 ability_match_cfg;
1938         int ability_match_count;
1939
1940         char ability_match, idle_match, ack_match;
1941
1942         u32 txconfig, rxconfig;
1943 #define ANEG_CFG_NP             0x00000080
1944 #define ANEG_CFG_ACK            0x00000040
1945 #define ANEG_CFG_RF2            0x00000020
1946 #define ANEG_CFG_RF1            0x00000010
1947 #define ANEG_CFG_PS2            0x00000001
1948 #define ANEG_CFG_PS1            0x00008000
1949 #define ANEG_CFG_HD             0x00004000
1950 #define ANEG_CFG_FD             0x00002000
1951 #define ANEG_CFG_INVAL          0x00001f06
1952
1953 };
1954 #define ANEG_OK         0
1955 #define ANEG_DONE       1
1956 #define ANEG_TIMER_ENAB 2
1957 #define ANEG_FAILED     -1
1958
1959 #define ANEG_STATE_SETTLE_TIME  10000
1960
1961 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1962                                    struct tg3_fiber_aneginfo *ap)
1963 {
1964         unsigned long delta;
1965         u32 rx_cfg_reg;
1966         int ret;
1967
1968         if (ap->state == ANEG_STATE_UNKNOWN) {
1969                 ap->rxconfig = 0;
1970                 ap->link_time = 0;
1971                 ap->cur_time = 0;
1972                 ap->ability_match_cfg = 0;
1973                 ap->ability_match_count = 0;
1974                 ap->ability_match = 0;
1975                 ap->idle_match = 0;
1976                 ap->ack_match = 0;
1977         }
1978         ap->cur_time++;
1979
1980         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1981                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1982
1983                 if (rx_cfg_reg != ap->ability_match_cfg) {
1984                         ap->ability_match_cfg = rx_cfg_reg;
1985                         ap->ability_match = 0;
1986                         ap->ability_match_count = 0;
1987                 } else {
1988                         if (++ap->ability_match_count > 1) {
1989                                 ap->ability_match = 1;
1990                                 ap->ability_match_cfg = rx_cfg_reg;
1991                         }
1992                 }
1993                 if (rx_cfg_reg & ANEG_CFG_ACK)
1994                         ap->ack_match = 1;
1995                 else
1996                         ap->ack_match = 0;
1997
1998                 ap->idle_match = 0;
1999         } else {
2000                 ap->idle_match = 1;
2001                 ap->ability_match_cfg = 0;
2002                 ap->ability_match_count = 0;
2003                 ap->ability_match = 0;
2004                 ap->ack_match = 0;
2005
2006                 rx_cfg_reg = 0;
2007         }
2008
2009         ap->rxconfig = rx_cfg_reg;
2010         ret = ANEG_OK;
2011
2012         switch(ap->state) {
2013         case ANEG_STATE_UNKNOWN:
2014                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2015                         ap->state = ANEG_STATE_AN_ENABLE;
2016
2017                 /* fallthru */
2018         case ANEG_STATE_AN_ENABLE:
2019                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2020                 if (ap->flags & MR_AN_ENABLE) {
2021                         ap->link_time = 0;
2022                         ap->cur_time = 0;
2023                         ap->ability_match_cfg = 0;
2024                         ap->ability_match_count = 0;
2025                         ap->ability_match = 0;
2026                         ap->idle_match = 0;
2027                         ap->ack_match = 0;
2028
2029                         ap->state = ANEG_STATE_RESTART_INIT;
2030                 } else {
2031                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2032                 }
2033                 break;
2034
2035         case ANEG_STATE_RESTART_INIT:
2036                 ap->link_time = ap->cur_time;
2037                 ap->flags &= ~(MR_NP_LOADED);
2038                 ap->txconfig = 0;
2039                 tw32(MAC_TX_AUTO_NEG, 0);
2040                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2041                 tw32_f(MAC_MODE, tp->mac_mode);
2042                 udelay(40);
2043
2044                 ret = ANEG_TIMER_ENAB;
2045                 ap->state = ANEG_STATE_RESTART;
2046
2047                 /* fallthru */
2048         case ANEG_STATE_RESTART:
2049                 delta = ap->cur_time - ap->link_time;
2050                 if (delta > ANEG_STATE_SETTLE_TIME) {
2051                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2052                 } else {
2053                         ret = ANEG_TIMER_ENAB;
2054                 }
2055                 break;
2056
2057         case ANEG_STATE_DISABLE_LINK_OK:
2058                 ret = ANEG_DONE;
2059                 break;
2060
2061         case ANEG_STATE_ABILITY_DETECT_INIT:
2062                 ap->flags &= ~(MR_TOGGLE_TX);
2063                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2064                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2065                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2066                 tw32_f(MAC_MODE, tp->mac_mode);
2067                 udelay(40);
2068
2069                 ap->state = ANEG_STATE_ABILITY_DETECT;
2070                 break;
2071
2072         case ANEG_STATE_ABILITY_DETECT:
2073                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2074                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2075                 }
2076                 break;
2077
2078         case ANEG_STATE_ACK_DETECT_INIT:
2079                 ap->txconfig |= ANEG_CFG_ACK;
2080                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2081                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2082                 tw32_f(MAC_MODE, tp->mac_mode);
2083                 udelay(40);
2084
2085                 ap->state = ANEG_STATE_ACK_DETECT;
2086
2087                 /* fallthru */
2088         case ANEG_STATE_ACK_DETECT:
2089                 if (ap->ack_match != 0) {
2090                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2091                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2092                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2093                         } else {
2094                                 ap->state = ANEG_STATE_AN_ENABLE;
2095                         }
2096                 } else if (ap->ability_match != 0 &&
2097                            ap->rxconfig == 0) {
2098                         ap->state = ANEG_STATE_AN_ENABLE;
2099                 }
2100                 break;
2101
2102         case ANEG_STATE_COMPLETE_ACK_INIT:
2103                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2104                         ret = ANEG_FAILED;
2105                         break;
2106                 }
2107                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2108                                MR_LP_ADV_HALF_DUPLEX |
2109                                MR_LP_ADV_SYM_PAUSE |
2110                                MR_LP_ADV_ASYM_PAUSE |
2111                                MR_LP_ADV_REMOTE_FAULT1 |
2112                                MR_LP_ADV_REMOTE_FAULT2 |
2113                                MR_LP_ADV_NEXT_PAGE |
2114                                MR_TOGGLE_RX |
2115                                MR_NP_RX);
2116                 if (ap->rxconfig & ANEG_CFG_FD)
2117                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2118                 if (ap->rxconfig & ANEG_CFG_HD)
2119                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2120                 if (ap->rxconfig & ANEG_CFG_PS1)
2121                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2122                 if (ap->rxconfig & ANEG_CFG_PS2)
2123                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2124                 if (ap->rxconfig & ANEG_CFG_RF1)
2125                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2126                 if (ap->rxconfig & ANEG_CFG_RF2)
2127                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2128                 if (ap->rxconfig & ANEG_CFG_NP)
2129                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2130
2131                 ap->link_time = ap->cur_time;
2132
2133                 ap->flags ^= (MR_TOGGLE_TX);
2134                 if (ap->rxconfig & 0x0008)
2135                         ap->flags |= MR_TOGGLE_RX;
2136                 if (ap->rxconfig & ANEG_CFG_NP)
2137                         ap->flags |= MR_NP_RX;
2138                 ap->flags |= MR_PAGE_RX;
2139
2140                 ap->state = ANEG_STATE_COMPLETE_ACK;
2141                 ret = ANEG_TIMER_ENAB;
2142                 break;
2143
2144         case ANEG_STATE_COMPLETE_ACK:
2145                 if (ap->ability_match != 0 &&
2146                     ap->rxconfig == 0) {
2147                         ap->state = ANEG_STATE_AN_ENABLE;
2148                         break;
2149                 }
2150                 delta = ap->cur_time - ap->link_time;
2151                 if (delta > ANEG_STATE_SETTLE_TIME) {
2152                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2153                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2154                         } else {
2155                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2156                                     !(ap->flags & MR_NP_RX)) {
2157                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2158                                 } else {
2159                                         ret = ANEG_FAILED;
2160                                 }
2161                         }
2162                 }
2163                 break;
2164
2165         case ANEG_STATE_IDLE_DETECT_INIT:
2166                 ap->link_time = ap->cur_time;
2167                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2168                 tw32_f(MAC_MODE, tp->mac_mode);
2169                 udelay(40);
2170
2171                 ap->state = ANEG_STATE_IDLE_DETECT;
2172                 ret = ANEG_TIMER_ENAB;
2173                 break;
2174
2175         case ANEG_STATE_IDLE_DETECT:
2176                 if (ap->ability_match != 0 &&
2177                     ap->rxconfig == 0) {
2178                         ap->state = ANEG_STATE_AN_ENABLE;
2179                         break;
2180                 }
2181                 delta = ap->cur_time - ap->link_time;
2182                 if (delta > ANEG_STATE_SETTLE_TIME) {
2183                         /* XXX another gem from the Broadcom driver :( */
2184                         ap->state = ANEG_STATE_LINK_OK;
2185                 }
2186                 break;
2187
2188         case ANEG_STATE_LINK_OK:
2189                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2190                 ret = ANEG_DONE;
2191                 break;
2192
2193         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2194                 /* ??? unimplemented */
2195                 break;
2196
2197         case ANEG_STATE_NEXT_PAGE_WAIT:
2198                 /* ??? unimplemented */
2199                 break;
2200
2201         default:
2202                 ret = ANEG_FAILED;
2203                 break;
2204         };
2205
2206         return ret;
2207 }
2208
2209 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2210 {
2211         int res = 0;
2212         struct tg3_fiber_aneginfo aninfo;
2213         int status = ANEG_FAILED;
2214         unsigned int tick;
2215         u32 tmp;
2216
2217         tw32_f(MAC_TX_AUTO_NEG, 0);
2218
2219         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2220         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2221         udelay(40);
2222
2223         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2224         udelay(40);
2225
2226         memset(&aninfo, 0, sizeof(aninfo));
2227         aninfo.flags |= MR_AN_ENABLE;
2228         aninfo.state = ANEG_STATE_UNKNOWN;
2229         aninfo.cur_time = 0;
2230         tick = 0;
2231         while (++tick < 195000) {
2232                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2233                 if (status == ANEG_DONE || status == ANEG_FAILED)
2234                         break;
2235
2236                 udelay(1);
2237         }
2238
2239         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2240         tw32_f(MAC_MODE, tp->mac_mode);
2241         udelay(40);
2242
2243         *flags = aninfo.flags;
2244
2245         if (status == ANEG_DONE &&
2246             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2247                              MR_LP_ADV_FULL_DUPLEX)))
2248                 res = 1;
2249
2250         return res;
2251 }
2252
2253 static void tg3_init_bcm8002(struct tg3 *tp)
2254 {
2255         u32 mac_status = tr32(MAC_STATUS);
2256         int i;
2257
2258         /* Reset when initting first time or we have a link. */
2259         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2260             !(mac_status & MAC_STATUS_PCS_SYNCED))
2261                 return;
2262
2263         /* Set PLL lock range. */
2264         tg3_writephy(tp, 0x16, 0x8007);
2265
2266         /* SW reset */
2267         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2268
2269         /* Wait for reset to complete. */
2270         /* XXX schedule_timeout() ... */
2271         for (i = 0; i < 500; i++)
2272                 udelay(10);
2273
2274         /* Config mode; select PMA/Ch 1 regs. */
2275         tg3_writephy(tp, 0x10, 0x8411);
2276
2277         /* Enable auto-lock and comdet, select txclk for tx. */
2278         tg3_writephy(tp, 0x11, 0x0a10);
2279
2280         tg3_writephy(tp, 0x18, 0x00a0);
2281         tg3_writephy(tp, 0x16, 0x41ff);
2282
2283         /* Assert and deassert POR. */
2284         tg3_writephy(tp, 0x13, 0x0400);
2285         udelay(40);
2286         tg3_writephy(tp, 0x13, 0x0000);
2287
2288         tg3_writephy(tp, 0x11, 0x0a50);
2289         udelay(40);
2290         tg3_writephy(tp, 0x11, 0x0a10);
2291
2292         /* Wait for signal to stabilize */
2293         /* XXX schedule_timeout() ... */
2294         for (i = 0; i < 15000; i++)
2295                 udelay(10);
2296
2297         /* Deselect the channel register so we can read the PHYID
2298          * later.
2299          */
2300         tg3_writephy(tp, 0x10, 0x8011);
2301 }
2302
2303 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2304 {
2305         u32 sg_dig_ctrl, sg_dig_status;
2306         u32 serdes_cfg, expected_sg_dig_ctrl;
2307         int workaround, port_a;
2308         int current_link_up;
2309
2310         serdes_cfg = 0;
2311         expected_sg_dig_ctrl = 0;
2312         workaround = 0;
2313         port_a = 1;
2314         current_link_up = 0;
2315
2316         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2317             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2318                 workaround = 1;
2319                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2320                         port_a = 0;
2321
2322                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2323                 /* preserve bits 20-23 for voltage regulator */
2324                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2325         }
2326
2327         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2328
2329         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2330                 if (sg_dig_ctrl & (1 << 31)) {
2331                         if (workaround) {
2332                                 u32 val = serdes_cfg;
2333
2334                                 if (port_a)
2335                                         val |= 0xc010000;
2336                                 else
2337                                         val |= 0x4010000;
2338                                 tw32_f(MAC_SERDES_CFG, val);
2339                         }
2340                         tw32_f(SG_DIG_CTRL, 0x01388400);
2341                 }
2342                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2343                         tg3_setup_flow_control(tp, 0, 0);
2344                         current_link_up = 1;
2345                 }
2346                 goto out;
2347         }
2348
2349         /* Want auto-negotiation.  */
2350         expected_sg_dig_ctrl = 0x81388400;
2351
2352         /* Pause capability */
2353         expected_sg_dig_ctrl |= (1 << 11);
2354
2355         /* Asymettric pause */
2356         expected_sg_dig_ctrl |= (1 << 12);
2357
2358         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2359                 if (workaround)
2360                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2361                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2362                 udelay(5);
2363                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2364
2365                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2366         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2367                                  MAC_STATUS_SIGNAL_DET)) {
2368                 int i;
2369
2370                 /* Giver time to negotiate (~200ms) */
2371                 for (i = 0; i < 40000; i++) {
2372                         sg_dig_status = tr32(SG_DIG_STATUS);
2373                         if (sg_dig_status & (0x3))
2374                                 break;
2375                         udelay(5);
2376                 }
2377                 mac_status = tr32(MAC_STATUS);
2378
2379                 if ((sg_dig_status & (1 << 1)) &&
2380                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2381                         u32 local_adv, remote_adv;
2382
2383                         local_adv = ADVERTISE_PAUSE_CAP;
2384                         remote_adv = 0;
2385                         if (sg_dig_status & (1 << 19))
2386                                 remote_adv |= LPA_PAUSE_CAP;
2387                         if (sg_dig_status & (1 << 20))
2388                                 remote_adv |= LPA_PAUSE_ASYM;
2389
2390                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2391                         current_link_up = 1;
2392                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2393                 } else if (!(sg_dig_status & (1 << 1))) {
2394                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2395                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2396                         else {
2397                                 if (workaround) {
2398                                         u32 val = serdes_cfg;
2399
2400                                         if (port_a)
2401                                                 val |= 0xc010000;
2402                                         else
2403                                                 val |= 0x4010000;
2404
2405                                         tw32_f(MAC_SERDES_CFG, val);
2406                                 }
2407
2408                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2409                                 udelay(40);
2410
2411                                 /* Link parallel detection - link is up */
2412                                 /* only if we have PCS_SYNC and not */
2413                                 /* receiving config code words */
2414                                 mac_status = tr32(MAC_STATUS);
2415                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2416                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2417                                         tg3_setup_flow_control(tp, 0, 0);
2418                                         current_link_up = 1;
2419                                 }
2420                         }
2421                 }
2422         }
2423
2424 out:
2425         return current_link_up;
2426 }
2427
2428 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2429 {
2430         int current_link_up = 0;
2431
2432         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2433                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2434                 goto out;
2435         }
2436
2437         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2438                 u32 flags;
2439                 int i;
2440   
2441                 if (fiber_autoneg(tp, &flags)) {
2442                         u32 local_adv, remote_adv;
2443
2444                         local_adv = ADVERTISE_PAUSE_CAP;
2445                         remote_adv = 0;
2446                         if (flags & MR_LP_ADV_SYM_PAUSE)
2447                                 remote_adv |= LPA_PAUSE_CAP;
2448                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2449                                 remote_adv |= LPA_PAUSE_ASYM;
2450
2451                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2452
2453                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2454                         current_link_up = 1;
2455                 }
2456                 for (i = 0; i < 30; i++) {
2457                         udelay(20);
2458                         tw32_f(MAC_STATUS,
2459                                (MAC_STATUS_SYNC_CHANGED |
2460                                 MAC_STATUS_CFG_CHANGED));
2461                         udelay(40);
2462                         if ((tr32(MAC_STATUS) &
2463                              (MAC_STATUS_SYNC_CHANGED |
2464                               MAC_STATUS_CFG_CHANGED)) == 0)
2465                                 break;
2466                 }
2467
2468                 mac_status = tr32(MAC_STATUS);
2469                 if (current_link_up == 0 &&
2470                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2471                     !(mac_status & MAC_STATUS_RCVD_CFG))
2472                         current_link_up = 1;
2473         } else {
2474                 /* Forcing 1000FD link up. */
2475                 current_link_up = 1;
2476                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2477
2478                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2479                 udelay(40);
2480         }
2481
2482 out:
2483         return current_link_up;
2484 }
2485
2486 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2487 {
2488         u32 orig_pause_cfg;
2489         u16 orig_active_speed;
2490         u8 orig_active_duplex;
2491         u32 mac_status;
2492         int current_link_up;
2493         int i;
2494
2495         orig_pause_cfg =
2496                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2497                                   TG3_FLAG_TX_PAUSE));
2498         orig_active_speed = tp->link_config.active_speed;
2499         orig_active_duplex = tp->link_config.active_duplex;
2500
2501         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2502             netif_carrier_ok(tp->dev) &&
2503             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2504                 mac_status = tr32(MAC_STATUS);
2505                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2506                                MAC_STATUS_SIGNAL_DET |
2507                                MAC_STATUS_CFG_CHANGED |
2508                                MAC_STATUS_RCVD_CFG);
2509                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2510                                    MAC_STATUS_SIGNAL_DET)) {
2511                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2512                                             MAC_STATUS_CFG_CHANGED));
2513                         return 0;
2514                 }
2515         }
2516
2517         tw32_f(MAC_TX_AUTO_NEG, 0);
2518
2519         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2520         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2521         tw32_f(MAC_MODE, tp->mac_mode);
2522         udelay(40);
2523
2524         if (tp->phy_id == PHY_ID_BCM8002)
2525                 tg3_init_bcm8002(tp);
2526
2527         /* Enable link change event even when serdes polling.  */
2528         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2529         udelay(40);
2530
2531         current_link_up = 0;
2532         mac_status = tr32(MAC_STATUS);
2533
2534         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2535                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2536         else
2537                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2538
2539         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2540         tw32_f(MAC_MODE, tp->mac_mode);
2541         udelay(40);
2542
2543         tp->hw_status->status =
2544                 (SD_STATUS_UPDATED |
2545                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2546
2547         for (i = 0; i < 100; i++) {
2548                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2549                                     MAC_STATUS_CFG_CHANGED));
2550                 udelay(5);
2551                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2552                                          MAC_STATUS_CFG_CHANGED)) == 0)
2553                         break;
2554         }
2555
2556         mac_status = tr32(MAC_STATUS);
2557         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2558                 current_link_up = 0;
2559                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2560                         tw32_f(MAC_MODE, (tp->mac_mode |
2561                                           MAC_MODE_SEND_CONFIGS));
2562                         udelay(1);
2563                         tw32_f(MAC_MODE, tp->mac_mode);
2564                 }
2565         }
2566
2567         if (current_link_up == 1) {
2568                 tp->link_config.active_speed = SPEED_1000;
2569                 tp->link_config.active_duplex = DUPLEX_FULL;
2570                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2571                                     LED_CTRL_LNKLED_OVERRIDE |
2572                                     LED_CTRL_1000MBPS_ON));
2573         } else {
2574                 tp->link_config.active_speed = SPEED_INVALID;
2575                 tp->link_config.active_duplex = DUPLEX_INVALID;
2576                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2577                                     LED_CTRL_LNKLED_OVERRIDE |
2578                                     LED_CTRL_TRAFFIC_OVERRIDE));
2579         }
2580
2581         if (current_link_up != netif_carrier_ok(tp->dev)) {
2582                 if (current_link_up)
2583                         netif_carrier_on(tp->dev);
2584                 else
2585                         netif_carrier_off(tp->dev);
2586                 tg3_link_report(tp);
2587         } else {
2588                 u32 now_pause_cfg =
2589                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2590                                          TG3_FLAG_TX_PAUSE);
2591                 if (orig_pause_cfg != now_pause_cfg ||
2592                     orig_active_speed != tp->link_config.active_speed ||
2593                     orig_active_duplex != tp->link_config.active_duplex)
2594                         tg3_link_report(tp);
2595         }
2596
2597         return 0;
2598 }
2599
2600 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2601 {
2602         int current_link_up, err = 0;
2603         u32 bmsr, bmcr;
2604         u16 current_speed;
2605         u8 current_duplex;
2606
2607         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2608         tw32_f(MAC_MODE, tp->mac_mode);
2609         udelay(40);
2610
2611         tw32(MAC_EVENT, 0);
2612
2613         tw32_f(MAC_STATUS,
2614              (MAC_STATUS_SYNC_CHANGED |
2615               MAC_STATUS_CFG_CHANGED |
2616               MAC_STATUS_MI_COMPLETION |
2617               MAC_STATUS_LNKSTATE_CHANGED));
2618         udelay(40);
2619
2620         if (force_reset)
2621                 tg3_phy_reset(tp);
2622
2623         current_link_up = 0;
2624         current_speed = SPEED_INVALID;
2625         current_duplex = DUPLEX_INVALID;
2626
2627         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2628         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2629
2630         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2631
2632         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2633             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2634                 /* do nothing, just check for link up at the end */
2635         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2636                 u32 adv, new_adv;
2637
2638                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2639                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2640                                   ADVERTISE_1000XPAUSE |
2641                                   ADVERTISE_1000XPSE_ASYM |
2642                                   ADVERTISE_SLCT);
2643
2644                 /* Always advertise symmetric PAUSE just like copper */
2645                 new_adv |= ADVERTISE_1000XPAUSE;
2646
2647                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2648                         new_adv |= ADVERTISE_1000XHALF;
2649                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2650                         new_adv |= ADVERTISE_1000XFULL;
2651
2652                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2653                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2654                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2655                         tg3_writephy(tp, MII_BMCR, bmcr);
2656
2657                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2658                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2659                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2660
2661                         return err;
2662                 }
2663         } else {
2664                 u32 new_bmcr;
2665
2666                 bmcr &= ~BMCR_SPEED1000;
2667                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2668
2669                 if (tp->link_config.duplex == DUPLEX_FULL)
2670                         new_bmcr |= BMCR_FULLDPLX;
2671
2672                 if (new_bmcr != bmcr) {
2673                         /* BMCR_SPEED1000 is a reserved bit that needs
2674                          * to be set on write.
2675                          */
2676                         new_bmcr |= BMCR_SPEED1000;
2677
2678                         /* Force a linkdown */
2679                         if (netif_carrier_ok(tp->dev)) {
2680                                 u32 adv;
2681
2682                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2683                                 adv &= ~(ADVERTISE_1000XFULL |
2684                                          ADVERTISE_1000XHALF |
2685                                          ADVERTISE_SLCT);
2686                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2687                                 tg3_writephy(tp, MII_BMCR, bmcr |
2688                                                            BMCR_ANRESTART |
2689                                                            BMCR_ANENABLE);
2690                                 udelay(10);
2691                                 netif_carrier_off(tp->dev);
2692                         }
2693                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2694                         bmcr = new_bmcr;
2695                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2696                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2697                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2698                 }
2699         }
2700
2701         if (bmsr & BMSR_LSTATUS) {
2702                 current_speed = SPEED_1000;
2703                 current_link_up = 1;
2704                 if (bmcr & BMCR_FULLDPLX)
2705                         current_duplex = DUPLEX_FULL;
2706                 else
2707                         current_duplex = DUPLEX_HALF;
2708
2709                 if (bmcr & BMCR_ANENABLE) {
2710                         u32 local_adv, remote_adv, common;
2711
2712                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2713                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2714                         common = local_adv & remote_adv;
2715                         if (common & (ADVERTISE_1000XHALF |
2716                                       ADVERTISE_1000XFULL)) {
2717                                 if (common & ADVERTISE_1000XFULL)
2718                                         current_duplex = DUPLEX_FULL;
2719                                 else
2720                                         current_duplex = DUPLEX_HALF;
2721
2722                                 tg3_setup_flow_control(tp, local_adv,
2723                                                        remote_adv);
2724                         }
2725                         else
2726                                 current_link_up = 0;
2727                 }
2728         }
2729
2730         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2731         if (tp->link_config.active_duplex == DUPLEX_HALF)
2732                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2733
2734         tw32_f(MAC_MODE, tp->mac_mode);
2735         udelay(40);
2736
2737         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2738
2739         tp->link_config.active_speed = current_speed;
2740         tp->link_config.active_duplex = current_duplex;
2741
2742         if (current_link_up != netif_carrier_ok(tp->dev)) {
2743                 if (current_link_up)
2744                         netif_carrier_on(tp->dev);
2745                 else {
2746                         netif_carrier_off(tp->dev);
2747                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2748                 }
2749                 tg3_link_report(tp);
2750         }
2751         return err;
2752 }
2753
2754 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2755 {
2756         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2757                 /* Give autoneg time to complete. */
2758                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2759                 return;
2760         }
2761         if (!netif_carrier_ok(tp->dev) &&
2762             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2763                 u32 bmcr;
2764
2765                 tg3_readphy(tp, MII_BMCR, &bmcr);
2766                 if (bmcr & BMCR_ANENABLE) {
2767                         u32 phy1, phy2;
2768
2769                         /* Select shadow register 0x1f */
2770                         tg3_writephy(tp, 0x1c, 0x7c00);
2771                         tg3_readphy(tp, 0x1c, &phy1);
2772
2773                         /* Select expansion interrupt status register */
2774                         tg3_writephy(tp, 0x17, 0x0f01);
2775                         tg3_readphy(tp, 0x15, &phy2);
2776                         tg3_readphy(tp, 0x15, &phy2);
2777
2778                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2779                                 /* We have signal detect and not receiving
2780                                  * config code words, link is up by parallel
2781                                  * detection.
2782                                  */
2783
2784                                 bmcr &= ~BMCR_ANENABLE;
2785                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2786                                 tg3_writephy(tp, MII_BMCR, bmcr);
2787                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2788                         }
2789                 }
2790         }
2791         else if (netif_carrier_ok(tp->dev) &&
2792                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2793                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2794                 u32 phy2;
2795
2796                 /* Select expansion interrupt status register */
2797                 tg3_writephy(tp, 0x17, 0x0f01);
2798                 tg3_readphy(tp, 0x15, &phy2);
2799                 if (phy2 & 0x20) {
2800                         u32 bmcr;
2801
2802                         /* Config code words received, turn on autoneg. */
2803                         tg3_readphy(tp, MII_BMCR, &bmcr);
2804                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2805
2806                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2807
2808                 }
2809         }
2810 }
2811
2812 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2813 {
2814         int err;
2815
2816         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2817                 err = tg3_setup_fiber_phy(tp, force_reset);
2818         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2819                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2820         } else {
2821                 err = tg3_setup_copper_phy(tp, force_reset);
2822         }
2823
2824         if (tp->link_config.active_speed == SPEED_1000 &&
2825             tp->link_config.active_duplex == DUPLEX_HALF)
2826                 tw32(MAC_TX_LENGTHS,
2827                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2828                       (6 << TX_LENGTHS_IPG_SHIFT) |
2829                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2830         else
2831                 tw32(MAC_TX_LENGTHS,
2832                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2833                       (6 << TX_LENGTHS_IPG_SHIFT) |
2834                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2835
2836         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2837                 if (netif_carrier_ok(tp->dev)) {
2838                         tw32(HOSTCC_STAT_COAL_TICKS,
2839                              tp->coal.stats_block_coalesce_usecs);
2840                 } else {
2841                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2842                 }
2843         }
2844
2845         return err;
2846 }
2847
2848 /* Tigon3 never reports partial packet sends.  So we do not
2849  * need special logic to handle SKBs that have not had all
2850  * of their frags sent yet, like SunGEM does.
2851  */
2852 static void tg3_tx(struct tg3 *tp)
2853 {
2854         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2855         u32 sw_idx = tp->tx_cons;
2856
2857         while (sw_idx != hw_idx) {
2858                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2859                 struct sk_buff *skb = ri->skb;
2860                 int i;
2861
2862                 if (unlikely(skb == NULL))
2863                         BUG();
2864
2865                 pci_unmap_single(tp->pdev,
2866                                  pci_unmap_addr(ri, mapping),
2867                                  skb_headlen(skb),
2868                                  PCI_DMA_TODEVICE);
2869
2870                 ri->skb = NULL;
2871
2872                 sw_idx = NEXT_TX(sw_idx);
2873
2874                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2875                         if (unlikely(sw_idx == hw_idx))
2876                                 BUG();
2877
2878                         ri = &tp->tx_buffers[sw_idx];
2879                         if (unlikely(ri->skb != NULL))
2880                                 BUG();
2881
2882                         pci_unmap_page(tp->pdev,
2883                                        pci_unmap_addr(ri, mapping),
2884                                        skb_shinfo(skb)->frags[i].size,
2885                                        PCI_DMA_TODEVICE);
2886
2887                         sw_idx = NEXT_TX(sw_idx);
2888                 }
2889
2890                 dev_kfree_skb(skb);
2891         }
2892
2893         tp->tx_cons = sw_idx;
2894
2895         if (unlikely(netif_queue_stopped(tp->dev))) {
2896                 spin_lock(&tp->tx_lock);
2897                 if (netif_queue_stopped(tp->dev) &&
2898                     (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2899                         netif_wake_queue(tp->dev);
2900                 spin_unlock(&tp->tx_lock);
2901         }
2902 }
2903
2904 /* Returns size of skb allocated or < 0 on error.
2905  *
2906  * We only need to fill in the address because the other members
2907  * of the RX descriptor are invariant, see tg3_init_rings.
2908  *
2909  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2910  * posting buffers we only dirty the first cache line of the RX
2911  * descriptor (containing the address).  Whereas for the RX status
2912  * buffers the cpu only reads the last cacheline of the RX descriptor
2913  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2914  */
2915 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2916                             int src_idx, u32 dest_idx_unmasked)
2917 {
2918         struct tg3_rx_buffer_desc *desc;
2919         struct ring_info *map, *src_map;
2920         struct sk_buff *skb;
2921         dma_addr_t mapping;
2922         int skb_size, dest_idx;
2923
2924         src_map = NULL;
2925         switch (opaque_key) {
2926         case RXD_OPAQUE_RING_STD:
2927                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2928                 desc = &tp->rx_std[dest_idx];
2929                 map = &tp->rx_std_buffers[dest_idx];
2930                 if (src_idx >= 0)
2931                         src_map = &tp->rx_std_buffers[src_idx];
2932                 skb_size = tp->rx_pkt_buf_sz;
2933                 break;
2934
2935         case RXD_OPAQUE_RING_JUMBO:
2936                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2937                 desc = &tp->rx_jumbo[dest_idx];
2938                 map = &tp->rx_jumbo_buffers[dest_idx];
2939                 if (src_idx >= 0)
2940                         src_map = &tp->rx_jumbo_buffers[src_idx];
2941                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2942                 break;
2943
2944         default:
2945                 return -EINVAL;
2946         };
2947
2948         /* Do not overwrite any of the map or rp information
2949          * until we are sure we can commit to a new buffer.
2950          *
2951          * Callers depend upon this behavior and assume that
2952          * we leave everything unchanged if we fail.
2953          */
2954         skb = dev_alloc_skb(skb_size);
2955         if (skb == NULL)
2956                 return -ENOMEM;
2957
2958         skb->dev = tp->dev;
2959         skb_reserve(skb, tp->rx_offset);
2960
2961         mapping = pci_map_single(tp->pdev, skb->data,
2962                                  skb_size - tp->rx_offset,
2963                                  PCI_DMA_FROMDEVICE);
2964
2965         map->skb = skb;
2966         pci_unmap_addr_set(map, mapping, mapping);
2967
2968         if (src_map != NULL)
2969                 src_map->skb = NULL;
2970
2971         desc->addr_hi = ((u64)mapping >> 32);
2972         desc->addr_lo = ((u64)mapping & 0xffffffff);
2973
2974         return skb_size;
2975 }
2976
2977 /* We only need to move over in the address because the other
2978  * members of the RX descriptor are invariant.  See notes above
2979  * tg3_alloc_rx_skb for full details.
2980  */
2981 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2982                            int src_idx, u32 dest_idx_unmasked)
2983 {
2984         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2985         struct ring_info *src_map, *dest_map;
2986         int dest_idx;
2987
2988         switch (opaque_key) {
2989         case RXD_OPAQUE_RING_STD:
2990                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2991                 dest_desc = &tp->rx_std[dest_idx];
2992                 dest_map = &tp->rx_std_buffers[dest_idx];
2993                 src_desc = &tp->rx_std[src_idx];
2994                 src_map = &tp->rx_std_buffers[src_idx];
2995                 break;
2996
2997         case RXD_OPAQUE_RING_JUMBO:
2998                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2999                 dest_desc = &tp->rx_jumbo[dest_idx];
3000                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3001                 src_desc = &tp->rx_jumbo[src_idx];
3002                 src_map = &tp->rx_jumbo_buffers[src_idx];
3003                 break;
3004
3005         default:
3006                 return;
3007         };
3008
3009         dest_map->skb = src_map->skb;
3010         pci_unmap_addr_set(dest_map, mapping,
3011                            pci_unmap_addr(src_map, mapping));
3012         dest_desc->addr_hi = src_desc->addr_hi;
3013         dest_desc->addr_lo = src_desc->addr_lo;
3014
3015         src_map->skb = NULL;
3016 }
3017
3018 #if TG3_VLAN_TAG_USED
3019 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3020 {
3021         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3022 }
3023 #endif
3024
3025 /* The RX ring scheme is composed of multiple rings which post fresh
3026  * buffers to the chip, and one special ring the chip uses to report
3027  * status back to the host.
3028  *
3029  * The special ring reports the status of received packets to the
3030  * host.  The chip does not write into the original descriptor the
3031  * RX buffer was obtained from.  The chip simply takes the original
3032  * descriptor as provided by the host, updates the status and length
3033  * field, then writes this into the next status ring entry.
3034  *
3035  * Each ring the host uses to post buffers to the chip is described
3036  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3037  * it is first placed into the on-chip ram.  When the packet's length
3038  * is known, it walks down the TG3_BDINFO entries to select the ring.
3039  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3040  * which is within the range of the new packet's length is chosen.
3041  *
3042  * The "separate ring for rx status" scheme may sound queer, but it makes
3043  * sense from a cache coherency perspective.  If only the host writes
3044  * to the buffer post rings, and only the chip writes to the rx status
3045  * rings, then cache lines never move beyond shared-modified state.
3046  * If both the host and chip were to write into the same ring, cache line
3047  * eviction could occur since both entities want it in an exclusive state.
3048  */
3049 static int tg3_rx(struct tg3 *tp, int budget)
3050 {
3051         u32 work_mask;
3052         u32 sw_idx = tp->rx_rcb_ptr;
3053         u16 hw_idx;
3054         int received;
3055
3056         hw_idx = tp->hw_status->idx[0].rx_producer;
3057         /*
3058          * We need to order the read of hw_idx and the read of
3059          * the opaque cookie.
3060          */
3061         rmb();
3062         work_mask = 0;
3063         received = 0;
3064         while (sw_idx != hw_idx && budget > 0) {
3065                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3066                 unsigned int len;
3067                 struct sk_buff *skb;
3068                 dma_addr_t dma_addr;
3069                 u32 opaque_key, desc_idx, *post_ptr;
3070
3071                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3072                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3073                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3074                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3075                                                   mapping);
3076                         skb = tp->rx_std_buffers[desc_idx].skb;
3077                         post_ptr = &tp->rx_std_ptr;
3078                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3079                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3080                                                   mapping);
3081                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3082                         post_ptr = &tp->rx_jumbo_ptr;
3083                 }
3084                 else {
3085                         goto next_pkt_nopost;
3086                 }
3087
3088                 work_mask |= opaque_key;
3089
3090                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3091                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3092                 drop_it:
3093                         tg3_recycle_rx(tp, opaque_key,
3094                                        desc_idx, *post_ptr);
3095                 drop_it_no_recycle:
3096                         /* Other statistics kept track of by card. */
3097                         tp->net_stats.rx_dropped++;
3098                         goto next_pkt;
3099                 }
3100
3101                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3102
3103                 if (len > RX_COPY_THRESHOLD 
3104                         && tp->rx_offset == 2
3105                         /* rx_offset != 2 iff this is a 5701 card running
3106                          * in PCI-X mode [see tg3_get_invariants()] */
3107                 ) {
3108                         int skb_size;
3109
3110                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3111                                                     desc_idx, *post_ptr);
3112                         if (skb_size < 0)
3113                                 goto drop_it;
3114
3115                         pci_unmap_single(tp->pdev, dma_addr,
3116                                          skb_size - tp->rx_offset,
3117                                          PCI_DMA_FROMDEVICE);
3118
3119                         skb_put(skb, len);
3120                 } else {
3121                         struct sk_buff *copy_skb;
3122
3123                         tg3_recycle_rx(tp, opaque_key,
3124                                        desc_idx, *post_ptr);
3125
3126                         copy_skb = dev_alloc_skb(len + 2);
3127                         if (copy_skb == NULL)
3128                                 goto drop_it_no_recycle;
3129
3130                         copy_skb->dev = tp->dev;
3131                         skb_reserve(copy_skb, 2);
3132                         skb_put(copy_skb, len);
3133                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3134                         memcpy(copy_skb->data, skb->data, len);
3135                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3136
3137                         /* We'll reuse the original ring buffer. */
3138                         skb = copy_skb;
3139                 }
3140
3141                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3142                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3143                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3144                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3145                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3146                 else
3147                         skb->ip_summed = CHECKSUM_NONE;
3148
3149                 skb->protocol = eth_type_trans(skb, tp->dev);
3150 #if TG3_VLAN_TAG_USED
3151                 if (tp->vlgrp != NULL &&
3152                     desc->type_flags & RXD_FLAG_VLAN) {
3153                         tg3_vlan_rx(tp, skb,
3154                                     desc->err_vlan & RXD_VLAN_MASK);
3155                 } else
3156 #endif
3157                         netif_receive_skb(skb);
3158
3159                 tp->dev->last_rx = jiffies;
3160                 received++;
3161                 budget--;
3162
3163 next_pkt:
3164                 (*post_ptr)++;
3165 next_pkt_nopost:
3166                 sw_idx++;
3167                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3168
3169                 /* Refresh hw_idx to see if there is new work */
3170                 if (sw_idx == hw_idx) {
3171                         hw_idx = tp->hw_status->idx[0].rx_producer;
3172                         rmb();
3173                 }
3174         }
3175
3176         /* ACK the status ring. */
3177         tp->rx_rcb_ptr = sw_idx;
3178         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3179
3180         /* Refill RX ring(s). */
3181         if (work_mask & RXD_OPAQUE_RING_STD) {
3182                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3183                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3184                              sw_idx);
3185         }
3186         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3187                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3188                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3189                              sw_idx);
3190         }
3191         mmiowb();
3192
3193         return received;
3194 }
3195
3196 static int tg3_poll(struct net_device *netdev, int *budget)
3197 {
3198         struct tg3 *tp = netdev_priv(netdev);
3199         struct tg3_hw_status *sblk = tp->hw_status;
3200         int done;
3201
3202         /* handle link change and other phy events */
3203         if (!(tp->tg3_flags &
3204               (TG3_FLAG_USE_LINKCHG_REG |
3205                TG3_FLAG_POLL_SERDES))) {
3206                 if (sblk->status & SD_STATUS_LINK_CHG) {
3207                         sblk->status = SD_STATUS_UPDATED |
3208                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3209                         spin_lock(&tp->lock);
3210                         tg3_setup_phy(tp, 0);
3211                         spin_unlock(&tp->lock);
3212                 }
3213         }
3214
3215         /* run TX completion thread */
3216         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3217                 tg3_tx(tp);
3218         }
3219
3220         /* run RX thread, within the bounds set by NAPI.
3221          * All RX "locking" is done by ensuring outside
3222          * code synchronizes with dev->poll()
3223          */
3224         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3225                 int orig_budget = *budget;
3226                 int work_done;
3227
3228                 if (orig_budget > netdev->quota)
3229                         orig_budget = netdev->quota;
3230
3231                 work_done = tg3_rx(tp, orig_budget);
3232
3233                 *budget -= work_done;
3234                 netdev->quota -= work_done;
3235         }
3236
3237         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3238                 tp->last_tag = sblk->status_tag;
3239                 rmb();
3240         } else
3241                 sblk->status &= ~SD_STATUS_UPDATED;
3242
3243         /* if no more work, tell net stack and NIC we're done */
3244         done = !tg3_has_work(tp);
3245         if (done) {
3246                 netif_rx_complete(netdev);
3247                 tg3_restart_ints(tp);
3248         }
3249
3250         return (done ? 0 : 1);
3251 }
3252
3253 static void tg3_irq_quiesce(struct tg3 *tp)
3254 {
3255         BUG_ON(tp->irq_sync);
3256
3257         tp->irq_sync = 1;
3258         smp_mb();
3259
3260         synchronize_irq(tp->pdev->irq);
3261 }
3262
3263 static inline int tg3_irq_sync(struct tg3 *tp)
3264 {
3265         return tp->irq_sync;
3266 }
3267
3268 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3269  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3270  * with as well.  Most of the time, this is not necessary except when
3271  * shutting down the device.
3272  */
3273 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3274 {
3275         if (irq_sync)
3276                 tg3_irq_quiesce(tp);
3277         spin_lock_bh(&tp->lock);
3278         spin_lock(&tp->tx_lock);
3279 }
3280
3281 static inline void tg3_full_unlock(struct tg3 *tp)
3282 {
3283         spin_unlock(&tp->tx_lock);
3284         spin_unlock_bh(&tp->lock);
3285 }
3286
3287 /* MSI ISR - No need to check for interrupt sharing and no need to
3288  * flush status block and interrupt mailbox. PCI ordering rules
3289  * guarantee that MSI will arrive after the status block.
3290  */
3291 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3292 {
3293         struct net_device *dev = dev_id;
3294         struct tg3 *tp = netdev_priv(dev);
3295
3296         prefetch(tp->hw_status);
3297         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3298         /*
3299          * Writing any value to intr-mbox-0 clears PCI INTA# and
3300          * chip-internal interrupt pending events.
3301          * Writing non-zero to intr-mbox-0 additional tells the
3302          * NIC to stop sending us irqs, engaging "in-intr-handler"
3303          * event coalescing.
3304          */
3305         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3306         if (likely(!tg3_irq_sync(tp)))
3307                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3308
3309         return IRQ_RETVAL(1);
3310 }
3311
3312 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3313 {
3314         struct net_device *dev = dev_id;
3315         struct tg3 *tp = netdev_priv(dev);
3316         struct tg3_hw_status *sblk = tp->hw_status;
3317         unsigned int handled = 1;
3318
3319         /* In INTx mode, it is possible for the interrupt to arrive at
3320          * the CPU before the status block posted prior to the interrupt.
3321          * Reading the PCI State register will confirm whether the
3322          * interrupt is ours and will flush the status block.
3323          */
3324         if ((sblk->status & SD_STATUS_UPDATED) ||
3325             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3326                 /*
3327                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3328                  * chip-internal interrupt pending events.
3329                  * Writing non-zero to intr-mbox-0 additional tells the
3330                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3331                  * event coalescing.
3332                  */
3333                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3334                              0x00000001);
3335                 if (tg3_irq_sync(tp))
3336                         goto out;
3337                 sblk->status &= ~SD_STATUS_UPDATED;
3338                 if (likely(tg3_has_work(tp))) {
3339                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3340                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3341                 } else {
3342                         /* No work, shared interrupt perhaps?  re-enable
3343                          * interrupts, and flush that PCI write
3344                          */
3345                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3346                                 0x00000000);
3347                 }
3348         } else {        /* shared interrupt */
3349                 handled = 0;
3350         }
3351 out:
3352         return IRQ_RETVAL(handled);
3353 }
3354
3355 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3356 {
3357         struct net_device *dev = dev_id;
3358         struct tg3 *tp = netdev_priv(dev);
3359         struct tg3_hw_status *sblk = tp->hw_status;
3360         unsigned int handled = 1;
3361
3362         /* In INTx mode, it is possible for the interrupt to arrive at
3363          * the CPU before the status block posted prior to the interrupt.
3364          * Reading the PCI State register will confirm whether the
3365          * interrupt is ours and will flush the status block.
3366          */
3367         if ((sblk->status_tag != tp->last_tag) ||
3368             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3369                 /*
3370                  * writing any value to intr-mbox-0 clears PCI INTA# and
3371                  * chip-internal interrupt pending events.
3372                  * writing non-zero to intr-mbox-0 additional tells the
3373                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3374                  * event coalescing.
3375                  */
3376                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3377                              0x00000001);
3378                 if (tg3_irq_sync(tp))
3379                         goto out;
3380                 if (netif_rx_schedule_prep(dev)) {
3381                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3382                         /* Update last_tag to mark that this status has been
3383                          * seen. Because interrupt may be shared, we may be
3384                          * racing with tg3_poll(), so only update last_tag
3385                          * if tg3_poll() is not scheduled.
3386                          */
3387                         tp->last_tag = sblk->status_tag;
3388                         __netif_rx_schedule(dev);
3389                 }
3390         } else {        /* shared interrupt */
3391                 handled = 0;
3392         }
3393 out:
3394         return IRQ_RETVAL(handled);
3395 }
3396
3397 /* ISR for interrupt test */
3398 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3399                 struct pt_regs *regs)
3400 {
3401         struct net_device *dev = dev_id;
3402         struct tg3 *tp = netdev_priv(dev);
3403         struct tg3_hw_status *sblk = tp->hw_status;
3404
3405         if ((sblk->status & SD_STATUS_UPDATED) ||
3406             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3407                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3408                              0x00000001);
3409                 return IRQ_RETVAL(1);
3410         }
3411         return IRQ_RETVAL(0);
3412 }
3413
3414 static int tg3_init_hw(struct tg3 *);
3415 static int tg3_halt(struct tg3 *, int, int);
3416
3417 #ifdef CONFIG_NET_POLL_CONTROLLER
3418 static void tg3_poll_controller(struct net_device *dev)
3419 {
3420         struct tg3 *tp = netdev_priv(dev);
3421
3422         tg3_interrupt(tp->pdev->irq, dev, NULL);
3423 }
3424 #endif
3425
3426 static void tg3_reset_task(void *_data)
3427 {
3428         struct tg3 *tp = _data;
3429         unsigned int restart_timer;
3430
3431         tg3_netif_stop(tp);
3432
3433         tg3_full_lock(tp, 1);
3434
3435         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3436         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3437
3438         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3439         tg3_init_hw(tp);
3440
3441         tg3_netif_start(tp);
3442
3443         tg3_full_unlock(tp);
3444
3445         if (restart_timer)
3446                 mod_timer(&tp->timer, jiffies + 1);
3447 }
3448
3449 static void tg3_tx_timeout(struct net_device *dev)
3450 {
3451         struct tg3 *tp = netdev_priv(dev);
3452
3453         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3454                dev->name);
3455
3456         schedule_work(&tp->reset_task);
3457 }
3458
3459 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3460 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3461 {
3462         u32 base = (u32) mapping & 0xffffffff;
3463
3464         return ((base > 0xffffdcc0) &&
3465                 (base + len + 8 < base));
3466 }
3467
3468 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3469
3470 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3471                                        u32 last_plus_one, u32 *start,
3472                                        u32 base_flags, u32 mss)
3473 {
3474         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3475         dma_addr_t new_addr = 0;
3476         u32 entry = *start;
3477         int i, ret = 0;
3478
3479         if (!new_skb) {
3480                 ret = -1;
3481         } else {
3482                 /* New SKB is guaranteed to be linear. */
3483                 entry = *start;
3484                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3485                                           PCI_DMA_TODEVICE);
3486                 /* Make sure new skb does not cross any 4G boundaries.
3487                  * Drop the packet if it does.
3488                  */
3489                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3490                         ret = -1;
3491                         dev_kfree_skb(new_skb);
3492                         new_skb = NULL;
3493                 } else {
3494                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3495                                     base_flags, 1 | (mss << 1));
3496                         *start = NEXT_TX(entry);
3497                 }
3498         }
3499
3500         /* Now clean up the sw ring entries. */
3501         i = 0;
3502         while (entry != last_plus_one) {
3503                 int len;
3504
3505                 if (i == 0)
3506                         len = skb_headlen(skb);
3507                 else
3508                         len = skb_shinfo(skb)->frags[i-1].size;
3509                 pci_unmap_single(tp->pdev,
3510                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3511                                  len, PCI_DMA_TODEVICE);
3512                 if (i == 0) {
3513                         tp->tx_buffers[entry].skb = new_skb;
3514                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3515                 } else {
3516                         tp->tx_buffers[entry].skb = NULL;
3517                 }
3518                 entry = NEXT_TX(entry);
3519                 i++;
3520         }
3521
3522         dev_kfree_skb(skb);
3523
3524         return ret;
3525 }
3526
3527 static void tg3_set_txd(struct tg3 *tp, int entry,
3528                         dma_addr_t mapping, int len, u32 flags,
3529                         u32 mss_and_is_end)
3530 {
3531         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3532         int is_end = (mss_and_is_end & 0x1);
3533         u32 mss = (mss_and_is_end >> 1);
3534         u32 vlan_tag = 0;
3535
3536         if (is_end)
3537                 flags |= TXD_FLAG_END;
3538         if (flags & TXD_FLAG_VLAN) {
3539                 vlan_tag = flags >> 16;
3540                 flags &= 0xffff;
3541         }
3542         vlan_tag |= (mss << TXD_MSS_SHIFT);
3543
3544         txd->addr_hi = ((u64) mapping >> 32);
3545         txd->addr_lo = ((u64) mapping & 0xffffffff);
3546         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3547         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3548 }
3549
3550 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3551 {
3552         struct tg3 *tp = netdev_priv(dev);
3553         dma_addr_t mapping;
3554         u32 len, entry, base_flags, mss;
3555         int would_hit_hwbug;
3556
3557         len = skb_headlen(skb);
3558
3559         /* No BH disabling for tx_lock here.  We are running in BH disabled
3560          * context and TX reclaim runs via tp->poll inside of a software
3561          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3562          * no IRQ context deadlocks to worry about either.  Rejoice!
3563          */
3564         if (!spin_trylock(&tp->tx_lock))
3565                 return NETDEV_TX_LOCKED; 
3566
3567         /* This is a hard error, log it. */
3568         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3569                 netif_stop_queue(dev);
3570                 spin_unlock(&tp->tx_lock);
3571                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3572                        dev->name);
3573                 return NETDEV_TX_BUSY;
3574         }
3575
3576         entry = tp->tx_prod;
3577         base_flags = 0;
3578         if (skb->ip_summed == CHECKSUM_HW)
3579                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3580 #if TG3_TSO_SUPPORT != 0
3581         mss = 0;
3582         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3583             (mss = skb_shinfo(skb)->tso_size) != 0) {
3584                 int tcp_opt_len, ip_tcp_len;
3585
3586                 if (skb_header_cloned(skb) &&
3587                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3588                         dev_kfree_skb(skb);
3589                         goto out_unlock;
3590                 }
3591
3592                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3593                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3594
3595                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3596                                TXD_FLAG_CPU_POST_DMA);
3597
3598                 skb->nh.iph->check = 0;
3599                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3600                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3601                         skb->h.th->check = 0;
3602                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3603                 }
3604                 else {
3605                         skb->h.th->check =
3606                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3607                                                    skb->nh.iph->daddr,
3608                                                    0, IPPROTO_TCP, 0);
3609                 }
3610
3611                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3612                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3613                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3614                                 int tsflags;
3615
3616                                 tsflags = ((skb->nh.iph->ihl - 5) +
3617                                            (tcp_opt_len >> 2));
3618                                 mss |= (tsflags << 11);
3619                         }
3620                 } else {
3621                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3622                                 int tsflags;
3623
3624                                 tsflags = ((skb->nh.iph->ihl - 5) +
3625                                            (tcp_opt_len >> 2));
3626                                 base_flags |= tsflags << 12;
3627                         }
3628                 }
3629         }
3630 #else
3631         mss = 0;
3632 #endif
3633 #if TG3_VLAN_TAG_USED
3634         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3635                 base_flags |= (TXD_FLAG_VLAN |
3636                                (vlan_tx_tag_get(skb) << 16));
3637 #endif
3638
3639         /* Queue skb data, a.k.a. the main skb fragment. */
3640         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3641
3642         tp->tx_buffers[entry].skb = skb;
3643         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3644
3645         would_hit_hwbug = 0;
3646
3647         if (tg3_4g_overflow_test(mapping, len))
3648                 would_hit_hwbug = 1;
3649
3650         tg3_set_txd(tp, entry, mapping, len, base_flags,
3651                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3652
3653         entry = NEXT_TX(entry);
3654
3655         /* Now loop through additional data fragments, and queue them. */
3656         if (skb_shinfo(skb)->nr_frags > 0) {
3657                 unsigned int i, last;
3658
3659                 last = skb_shinfo(skb)->nr_frags - 1;
3660                 for (i = 0; i <= last; i++) {
3661                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3662
3663                         len = frag->size;
3664                         mapping = pci_map_page(tp->pdev,
3665                                                frag->page,
3666                                                frag->page_offset,
3667                                                len, PCI_DMA_TODEVICE);
3668
3669                         tp->tx_buffers[entry].skb = NULL;
3670                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3671
3672                         if (tg3_4g_overflow_test(mapping, len))
3673                                 would_hit_hwbug = 1;
3674
3675                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3676                                 tg3_set_txd(tp, entry, mapping, len,
3677                                             base_flags, (i == last)|(mss << 1));
3678                         else
3679                                 tg3_set_txd(tp, entry, mapping, len,
3680                                             base_flags, (i == last));
3681
3682                         entry = NEXT_TX(entry);
3683                 }
3684         }
3685
3686         if (would_hit_hwbug) {
3687                 u32 last_plus_one = entry;
3688                 u32 start;
3689
3690                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3691                 start &= (TG3_TX_RING_SIZE - 1);
3692
3693                 /* If the workaround fails due to memory/mapping
3694                  * failure, silently drop this packet.
3695                  */
3696                 if (tigon3_4gb_hwbug_workaround(tp, skb, last_plus_one,
3697                                                 &start, base_flags, mss))
3698                         goto out_unlock;
3699
3700                 entry = start;
3701         }
3702
3703         /* Packets are ready, update Tx producer idx local and on card. */
3704         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3705
3706         tp->tx_prod = entry;
3707         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3708                 netif_stop_queue(dev);
3709                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3710                         netif_wake_queue(tp->dev);
3711         }
3712
3713 out_unlock:
3714         mmiowb();
3715         spin_unlock(&tp->tx_lock);
3716
3717         dev->trans_start = jiffies;
3718
3719         return NETDEV_TX_OK;
3720 }
3721
3722 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3723                                int new_mtu)
3724 {
3725         dev->mtu = new_mtu;
3726
3727         if (new_mtu > ETH_DATA_LEN) {
3728                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
3729                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
3730                         ethtool_op_set_tso(dev, 0);
3731                 }
3732                 else
3733                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3734         } else {
3735                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
3736                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
3737                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
3738         }
3739 }
3740
3741 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3742 {
3743         struct tg3 *tp = netdev_priv(dev);
3744
3745         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3746                 return -EINVAL;
3747
3748         if (!netif_running(dev)) {
3749                 /* We'll just catch it later when the
3750                  * device is up'd.
3751                  */
3752                 tg3_set_mtu(dev, tp, new_mtu);
3753                 return 0;
3754         }
3755
3756         tg3_netif_stop(tp);
3757
3758         tg3_full_lock(tp, 1);
3759
3760         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3761
3762         tg3_set_mtu(dev, tp, new_mtu);
3763
3764         tg3_init_hw(tp);
3765
3766         tg3_netif_start(tp);
3767
3768         tg3_full_unlock(tp);
3769
3770         return 0;
3771 }
3772
3773 /* Free up pending packets in all rx/tx rings.
3774  *
3775  * The chip has been shut down and the driver detached from
3776  * the networking, so no interrupts or new tx packets will
3777  * end up in the driver.  tp->{tx,}lock is not held and we are not
3778  * in an interrupt context and thus may sleep.
3779  */
3780 static void tg3_free_rings(struct tg3 *tp)
3781 {
3782         struct ring_info *rxp;
3783         int i;
3784
3785         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3786                 rxp = &tp->rx_std_buffers[i];
3787
3788                 if (rxp->skb == NULL)
3789                         continue;
3790                 pci_unmap_single(tp->pdev,
3791                                  pci_unmap_addr(rxp, mapping),
3792                                  tp->rx_pkt_buf_sz - tp->rx_offset,
3793                                  PCI_DMA_FROMDEVICE);
3794                 dev_kfree_skb_any(rxp->skb);
3795                 rxp->skb = NULL;
3796         }
3797
3798         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3799                 rxp = &tp->rx_jumbo_buffers[i];
3800
3801                 if (rxp->skb == NULL)
3802                         continue;
3803                 pci_unmap_single(tp->pdev,
3804                                  pci_unmap_addr(rxp, mapping),
3805                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3806                                  PCI_DMA_FROMDEVICE);
3807                 dev_kfree_skb_any(rxp->skb);
3808                 rxp->skb = NULL;
3809         }
3810
3811         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3812                 struct tx_ring_info *txp;
3813                 struct sk_buff *skb;
3814                 int j;
3815
3816                 txp = &tp->tx_buffers[i];
3817                 skb = txp->skb;
3818
3819                 if (skb == NULL) {
3820                         i++;
3821                         continue;
3822                 }
3823
3824                 pci_unmap_single(tp->pdev,
3825                                  pci_unmap_addr(txp, mapping),
3826                                  skb_headlen(skb),
3827                                  PCI_DMA_TODEVICE);
3828                 txp->skb = NULL;
3829
3830                 i++;
3831
3832                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3833                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3834                         pci_unmap_page(tp->pdev,
3835                                        pci_unmap_addr(txp, mapping),
3836                                        skb_shinfo(skb)->frags[j].size,
3837                                        PCI_DMA_TODEVICE);
3838                         i++;
3839                 }
3840
3841                 dev_kfree_skb_any(skb);
3842         }
3843 }
3844
3845 /* Initialize tx/rx rings for packet processing.
3846  *
3847  * The chip has been shut down and the driver detached from
3848  * the networking, so no interrupts or new tx packets will
3849  * end up in the driver.  tp->{tx,}lock are held and thus
3850  * we may not sleep.
3851  */
3852 static void tg3_init_rings(struct tg3 *tp)
3853 {
3854         u32 i;
3855
3856         /* Free up all the SKBs. */
3857         tg3_free_rings(tp);
3858
3859         /* Zero out all descriptors. */
3860         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3861         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3862         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3863         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3864
3865         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
3866         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
3867             (tp->dev->mtu > ETH_DATA_LEN))
3868                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
3869
3870         /* Initialize invariants of the rings, we only set this
3871          * stuff once.  This works because the card does not
3872          * write into the rx buffer posting rings.
3873          */
3874         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3875                 struct tg3_rx_buffer_desc *rxd;
3876
3877                 rxd = &tp->rx_std[i];
3878                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
3879                         << RXD_LEN_SHIFT;
3880                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3881                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3882                                (i << RXD_OPAQUE_INDEX_SHIFT));
3883         }
3884
3885         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3886                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3887                         struct tg3_rx_buffer_desc *rxd;
3888
3889                         rxd = &tp->rx_jumbo[i];
3890                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3891                                 << RXD_LEN_SHIFT;
3892                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3893                                 RXD_FLAG_JUMBO;
3894                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3895                                (i << RXD_OPAQUE_INDEX_SHIFT));
3896                 }
3897         }
3898
3899         /* Now allocate fresh SKBs for each rx ring. */
3900         for (i = 0; i < tp->rx_pending; i++) {
3901                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3902                                      -1, i) < 0)
3903                         break;
3904         }
3905
3906         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3907                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3908                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3909                                              -1, i) < 0)
3910                                 break;
3911                 }
3912         }
3913 }
3914
3915 /*
3916  * Must not be invoked with interrupt sources disabled and
3917  * the hardware shutdown down.
3918  */
3919 static void tg3_free_consistent(struct tg3 *tp)
3920 {
3921         if (tp->rx_std_buffers) {
3922                 kfree(tp->rx_std_buffers);
3923                 tp->rx_std_buffers = NULL;
3924         }
3925         if (tp->rx_std) {
3926                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3927                                     tp->rx_std, tp->rx_std_mapping);
3928                 tp->rx_std = NULL;
3929         }
3930         if (tp->rx_jumbo) {
3931                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3932                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3933                 tp->rx_jumbo = NULL;
3934         }
3935         if (tp->rx_rcb) {
3936                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3937                                     tp->rx_rcb, tp->rx_rcb_mapping);
3938                 tp->rx_rcb = NULL;
3939         }
3940         if (tp->tx_ring) {
3941                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3942                         tp->tx_ring, tp->tx_desc_mapping);
3943                 tp->tx_ring = NULL;
3944         }
3945         if (tp->hw_status) {
3946                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3947                                     tp->hw_status, tp->status_mapping);
3948                 tp->hw_status = NULL;
3949         }
3950         if (tp->hw_stats) {
3951                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3952                                     tp->hw_stats, tp->stats_mapping);
3953                 tp->hw_stats = NULL;
3954         }
3955 }
3956
3957 /*
3958  * Must not be invoked with interrupt sources disabled and
3959  * the hardware shutdown down.  Can sleep.
3960  */
3961 static int tg3_alloc_consistent(struct tg3 *tp)
3962 {
3963         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3964                                       (TG3_RX_RING_SIZE +
3965                                        TG3_RX_JUMBO_RING_SIZE)) +
3966                                      (sizeof(struct tx_ring_info) *
3967                                       TG3_TX_RING_SIZE),
3968                                      GFP_KERNEL);
3969         if (!tp->rx_std_buffers)
3970                 return -ENOMEM;
3971
3972         memset(tp->rx_std_buffers, 0,
3973                (sizeof(struct ring_info) *
3974                 (TG3_RX_RING_SIZE +
3975                  TG3_RX_JUMBO_RING_SIZE)) +
3976                (sizeof(struct tx_ring_info) *
3977                 TG3_TX_RING_SIZE));
3978
3979         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3980         tp->tx_buffers = (struct tx_ring_info *)
3981                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3982
3983         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3984                                           &tp->rx_std_mapping);
3985         if (!tp->rx_std)
3986                 goto err_out;
3987
3988         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3989                                             &tp->rx_jumbo_mapping);
3990
3991         if (!tp->rx_jumbo)
3992                 goto err_out;
3993
3994         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3995                                           &tp->rx_rcb_mapping);
3996         if (!tp->rx_rcb)
3997                 goto err_out;
3998
3999         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4000                                            &tp->tx_desc_mapping);
4001         if (!tp->tx_ring)
4002                 goto err_out;
4003
4004         tp->hw_status = pci_alloc_consistent(tp->pdev,
4005                                              TG3_HW_STATUS_SIZE,
4006                                              &tp->status_mapping);
4007         if (!tp->hw_status)
4008                 goto err_out;
4009
4010         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4011                                             sizeof(struct tg3_hw_stats),
4012                                             &tp->stats_mapping);
4013         if (!tp->hw_stats)
4014                 goto err_out;
4015
4016         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4017         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4018
4019         return 0;
4020
4021 err_out:
4022         tg3_free_consistent(tp);
4023         return -ENOMEM;
4024 }
4025
4026 #define MAX_WAIT_CNT 1000
4027
4028 /* To stop a block, clear the enable bit and poll till it
4029  * clears.  tp->lock is held.
4030  */
4031 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4032 {
4033         unsigned int i;
4034         u32 val;
4035
4036         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4037                 switch (ofs) {
4038                 case RCVLSC_MODE:
4039                 case DMAC_MODE:
4040                 case MBFREE_MODE:
4041                 case BUFMGR_MODE:
4042                 case MEMARB_MODE:
4043                         /* We can't enable/disable these bits of the
4044                          * 5705/5750, just say success.
4045                          */
4046                         return 0;
4047
4048                 default:
4049                         break;
4050                 };
4051         }
4052
4053         val = tr32(ofs);
4054         val &= ~enable_bit;
4055         tw32_f(ofs, val);
4056
4057         for (i = 0; i < MAX_WAIT_CNT; i++) {
4058                 udelay(100);
4059                 val = tr32(ofs);
4060                 if ((val & enable_bit) == 0)
4061                         break;
4062         }
4063
4064         if (i == MAX_WAIT_CNT && !silent) {
4065                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4066                        "ofs=%lx enable_bit=%x\n",
4067                        ofs, enable_bit);
4068                 return -ENODEV;
4069         }
4070
4071         return 0;
4072 }
4073
4074 /* tp->lock is held. */
4075 static int tg3_abort_hw(struct tg3 *tp, int silent)
4076 {
4077         int i, err;
4078
4079         tg3_disable_ints(tp);
4080
4081         tp->rx_mode &= ~RX_MODE_ENABLE;
4082         tw32_f(MAC_RX_MODE, tp->rx_mode);
4083         udelay(10);
4084
4085         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4086         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4087         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4088         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4089         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4090         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4091
4092         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4093         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4094         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4095         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4096         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4097         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4098         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4099
4100         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4101         tw32_f(MAC_MODE, tp->mac_mode);
4102         udelay(40);
4103
4104         tp->tx_mode &= ~TX_MODE_ENABLE;
4105         tw32_f(MAC_TX_MODE, tp->tx_mode);
4106
4107         for (i = 0; i < MAX_WAIT_CNT; i++) {
4108                 udelay(100);
4109                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4110                         break;
4111         }
4112         if (i >= MAX_WAIT_CNT) {
4113                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4114                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4115                        tp->dev->name, tr32(MAC_TX_MODE));
4116                 err |= -ENODEV;
4117         }
4118
4119         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4120         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4121         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4122
4123         tw32(FTQ_RESET, 0xffffffff);
4124         tw32(FTQ_RESET, 0x00000000);
4125
4126         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4127         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4128
4129         if (tp->hw_status)
4130                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4131         if (tp->hw_stats)
4132                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4133
4134         return err;
4135 }
4136
4137 /* tp->lock is held. */
4138 static int tg3_nvram_lock(struct tg3 *tp)
4139 {
4140         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4141                 int i;
4142
4143                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4144                 for (i = 0; i < 8000; i++) {
4145                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4146                                 break;
4147                         udelay(20);
4148                 }
4149                 if (i == 8000)
4150                         return -ENODEV;
4151         }
4152         return 0;
4153 }
4154
4155 /* tp->lock is held. */
4156 static void tg3_nvram_unlock(struct tg3 *tp)
4157 {
4158         if (tp->tg3_flags & TG3_FLAG_NVRAM)
4159                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4160 }
4161
4162 /* tp->lock is held. */
4163 static void tg3_enable_nvram_access(struct tg3 *tp)
4164 {
4165         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4166             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4167                 u32 nvaccess = tr32(NVRAM_ACCESS);
4168
4169                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4170         }
4171 }
4172
4173 /* tp->lock is held. */
4174 static void tg3_disable_nvram_access(struct tg3 *tp)
4175 {
4176         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4177             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4178                 u32 nvaccess = tr32(NVRAM_ACCESS);
4179
4180                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4181         }
4182 }
4183
4184 /* tp->lock is held. */
4185 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4186 {
4187         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4188                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4189                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4190
4191         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4192                 switch (kind) {
4193                 case RESET_KIND_INIT:
4194                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4195                                       DRV_STATE_START);
4196                         break;
4197
4198                 case RESET_KIND_SHUTDOWN:
4199                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4200                                       DRV_STATE_UNLOAD);
4201                         break;
4202
4203                 case RESET_KIND_SUSPEND:
4204                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4205                                       DRV_STATE_SUSPEND);
4206                         break;
4207
4208                 default:
4209                         break;
4210                 };
4211         }
4212 }
4213
4214 /* tp->lock is held. */
4215 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4216 {
4217         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4218                 switch (kind) {
4219                 case RESET_KIND_INIT:
4220                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4221                                       DRV_STATE_START_DONE);
4222                         break;
4223
4224                 case RESET_KIND_SHUTDOWN:
4225                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4226                                       DRV_STATE_UNLOAD_DONE);
4227                         break;
4228
4229                 default:
4230                         break;
4231                 };
4232         }
4233 }
4234
4235 /* tp->lock is held. */
4236 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4237 {
4238         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4239                 switch (kind) {
4240                 case RESET_KIND_INIT:
4241                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4242                                       DRV_STATE_START);
4243                         break;
4244
4245                 case RESET_KIND_SHUTDOWN:
4246                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4247                                       DRV_STATE_UNLOAD);
4248                         break;
4249
4250                 case RESET_KIND_SUSPEND:
4251                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4252                                       DRV_STATE_SUSPEND);
4253                         break;
4254
4255                 default:
4256                         break;
4257                 };
4258         }
4259 }
4260
4261 static void tg3_stop_fw(struct tg3 *);
4262
4263 /* tp->lock is held. */
4264 static int tg3_chip_reset(struct tg3 *tp)
4265 {
4266         u32 val;
4267         void (*write_op)(struct tg3 *, u32, u32);
4268         int i;
4269
4270         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4271                 tg3_nvram_lock(tp);
4272
4273         /*
4274          * We must avoid the readl() that normally takes place.
4275          * It locks machines, causes machine checks, and other
4276          * fun things.  So, temporarily disable the 5701
4277          * hardware workaround, while we do the reset.
4278          */
4279         write_op = tp->write32;
4280         if (write_op == tg3_write_flush_reg32)
4281                 tp->write32 = tg3_write32;
4282
4283         /* do the reset */
4284         val = GRC_MISC_CFG_CORECLK_RESET;
4285
4286         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4287                 if (tr32(0x7e2c) == 0x60) {
4288                         tw32(0x7e2c, 0x20);
4289                 }
4290                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4291                         tw32(GRC_MISC_CFG, (1 << 29));
4292                         val |= (1 << 29);
4293                 }
4294         }
4295
4296         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4297                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4298         tw32(GRC_MISC_CFG, val);
4299
4300         /* restore 5701 hardware bug workaround write method */
4301         tp->write32 = write_op;
4302
4303         /* Unfortunately, we have to delay before the PCI read back.
4304          * Some 575X chips even will not respond to a PCI cfg access
4305          * when the reset command is given to the chip.
4306          *
4307          * How do these hardware designers expect things to work
4308          * properly if the PCI write is posted for a long period
4309          * of time?  It is always necessary to have some method by
4310          * which a register read back can occur to push the write
4311          * out which does the reset.
4312          *
4313          * For most tg3 variants the trick below was working.
4314          * Ho hum...
4315          */
4316         udelay(120);
4317
4318         /* Flush PCI posted writes.  The normal MMIO registers
4319          * are inaccessible at this time so this is the only
4320          * way to make this reliably (actually, this is no longer
4321          * the case, see above).  I tried to use indirect
4322          * register read/write but this upset some 5701 variants.
4323          */
4324         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4325
4326         udelay(120);
4327
4328         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4329                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4330                         int i;
4331                         u32 cfg_val;
4332
4333                         /* Wait for link training to complete.  */
4334                         for (i = 0; i < 5000; i++)
4335                                 udelay(100);
4336
4337                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4338                         pci_write_config_dword(tp->pdev, 0xc4,
4339                                                cfg_val | (1 << 15));
4340                 }
4341                 /* Set PCIE max payload size and clear error status.  */
4342                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4343         }
4344
4345         /* Re-enable indirect register accesses. */
4346         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4347                                tp->misc_host_ctrl);
4348
4349         /* Set MAX PCI retry to zero. */
4350         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4351         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4352             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4353                 val |= PCISTATE_RETRY_SAME_DMA;
4354         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4355
4356         pci_restore_state(tp->pdev);
4357
4358         /* Make sure PCI-X relaxed ordering bit is clear. */
4359         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4360         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4361         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4362
4363         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4364                 u32 val;
4365
4366                 /* Chip reset on 5780 will reset MSI enable bit,
4367                  * so need to restore it.
4368                  */
4369                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4370                         u16 ctrl;
4371
4372                         pci_read_config_word(tp->pdev,
4373                                              tp->msi_cap + PCI_MSI_FLAGS,
4374                                              &ctrl);
4375                         pci_write_config_word(tp->pdev,
4376                                               tp->msi_cap + PCI_MSI_FLAGS,
4377                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4378                         val = tr32(MSGINT_MODE);
4379                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4380                 }
4381
4382                 val = tr32(MEMARB_MODE);
4383                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4384
4385         } else
4386                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4387
4388         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4389                 tg3_stop_fw(tp);
4390                 tw32(0x5000, 0x400);
4391         }
4392
4393         tw32(GRC_MODE, tp->grc_mode);
4394
4395         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4396                 u32 val = tr32(0xc4);
4397
4398                 tw32(0xc4, val | (1 << 15));
4399         }
4400
4401         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4402             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4403                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4404                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4405                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4406                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4407         }
4408
4409         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4410                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4411                 tw32_f(MAC_MODE, tp->mac_mode);
4412         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4413                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4414                 tw32_f(MAC_MODE, tp->mac_mode);
4415         } else
4416                 tw32_f(MAC_MODE, 0);
4417         udelay(40);
4418
4419         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4420                 /* Wait for firmware initialization to complete. */
4421                 for (i = 0; i < 100000; i++) {
4422                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4423                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4424                                 break;
4425                         udelay(10);
4426                 }
4427                 if (i >= 100000) {
4428                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4429                                "firmware will not restart magic=%08x\n",
4430                                tp->dev->name, val);
4431                         return -ENODEV;
4432                 }
4433         }
4434
4435         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4436             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4437                 u32 val = tr32(0x7c00);
4438
4439                 tw32(0x7c00, val | (1 << 25));
4440         }
4441
4442         /* Reprobe ASF enable state.  */
4443         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4444         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4445         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4446         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4447                 u32 nic_cfg;
4448
4449                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4450                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4451                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4452                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4453                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4454                 }
4455         }
4456
4457         return 0;
4458 }
4459
4460 /* tp->lock is held. */
4461 static void tg3_stop_fw(struct tg3 *tp)
4462 {
4463         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4464                 u32 val;
4465                 int i;
4466
4467                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4468                 val = tr32(GRC_RX_CPU_EVENT);
4469                 val |= (1 << 14);
4470                 tw32(GRC_RX_CPU_EVENT, val);
4471
4472                 /* Wait for RX cpu to ACK the event.  */
4473                 for (i = 0; i < 100; i++) {
4474                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4475                                 break;
4476                         udelay(1);
4477                 }
4478         }
4479 }
4480
4481 /* tp->lock is held. */
4482 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4483 {
4484         int err;
4485
4486         tg3_stop_fw(tp);
4487
4488         tg3_write_sig_pre_reset(tp, kind);
4489
4490         tg3_abort_hw(tp, silent);
4491         err = tg3_chip_reset(tp);
4492
4493         tg3_write_sig_legacy(tp, kind);
4494         tg3_write_sig_post_reset(tp, kind);
4495
4496         if (err)
4497                 return err;
4498
4499         return 0;
4500 }
4501
4502 #define TG3_FW_RELEASE_MAJOR    0x0
4503 #define TG3_FW_RELASE_MINOR     0x0
4504 #define TG3_FW_RELEASE_FIX      0x0
4505 #define TG3_FW_START_ADDR       0x08000000
4506 #define TG3_FW_TEXT_ADDR        0x08000000
4507 #define TG3_FW_TEXT_LEN         0x9c0
4508 #define TG3_FW_RODATA_ADDR      0x080009c0
4509 #define TG3_FW_RODATA_LEN       0x60
4510 #define TG3_FW_DATA_ADDR        0x08000a40
4511 #define TG3_FW_DATA_LEN         0x20
4512 #define TG3_FW_SBSS_ADDR        0x08000a60
4513 #define TG3_FW_SBSS_LEN         0xc
4514 #define TG3_FW_BSS_ADDR         0x08000a70
4515 #define TG3_FW_BSS_LEN          0x10
4516
4517 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4518         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4519         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4520         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4521         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4522         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4523         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4524         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4525         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4526         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4527         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4528         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4529         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4530         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4531         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4532         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4533         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4534         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4535         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4536         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4537         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4538         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4539         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4540         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4541         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4542         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4543         0, 0, 0, 0, 0, 0,
4544         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4545         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4546         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4547         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4548         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4549         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4550         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4551         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4552         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4553         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4554         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4555         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4556         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4557         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4558         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4559         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4560         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4561         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4562         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4563         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4564         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4565         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4566         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4567         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4568         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4569         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4570         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4571         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4572         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4573         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4574         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4575         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4576         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4577         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4578         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4579         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4580         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4581         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4582         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4583         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4584         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4585         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4586         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4587         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4588         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4589         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4590         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4591         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4592         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4593         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4594         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4595         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4596         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4597         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4598         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4599         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4600         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4601         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4602         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4603         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4604         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4605         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4606         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4607         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4608         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4609 };
4610
4611 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4612         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4613         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4614         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4615         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4616         0x00000000
4617 };
4618
4619 #if 0 /* All zeros, don't eat up space with it. */
4620 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4621         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4622         0x00000000, 0x00000000, 0x00000000, 0x00000000
4623 };
4624 #endif
4625
4626 #define RX_CPU_SCRATCH_BASE     0x30000
4627 #define RX_CPU_SCRATCH_SIZE     0x04000
4628 #define TX_CPU_SCRATCH_BASE     0x34000
4629 #define TX_CPU_SCRATCH_SIZE     0x04000
4630
4631 /* tp->lock is held. */
4632 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4633 {
4634         int i;
4635
4636         if (offset == TX_CPU_BASE &&
4637             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4638                 BUG();
4639
4640         if (offset == RX_CPU_BASE) {
4641                 for (i = 0; i < 10000; i++) {
4642                         tw32(offset + CPU_STATE, 0xffffffff);
4643                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4644                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4645                                 break;
4646                 }
4647
4648                 tw32(offset + CPU_STATE, 0xffffffff);
4649                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4650                 udelay(10);
4651         } else {
4652                 for (i = 0; i < 10000; i++) {
4653                         tw32(offset + CPU_STATE, 0xffffffff);
4654                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4655                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4656                                 break;
4657                 }
4658         }
4659
4660         if (i >= 10000) {
4661                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4662                        "and %s CPU\n",
4663                        tp->dev->name,
4664                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4665                 return -ENODEV;
4666         }
4667         return 0;
4668 }
4669
4670 struct fw_info {
4671         unsigned int text_base;
4672         unsigned int text_len;
4673         u32 *text_data;
4674         unsigned int rodata_base;
4675         unsigned int rodata_len;
4676         u32 *rodata_data;
4677         unsigned int data_base;
4678         unsigned int data_len;
4679         u32 *data_data;
4680 };
4681
4682 /* tp->lock is held. */
4683 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4684                                  int cpu_scratch_size, struct fw_info *info)
4685 {
4686         int err, i;
4687         void (*write_op)(struct tg3 *, u32, u32);
4688
4689         if (cpu_base == TX_CPU_BASE &&
4690             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4691                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4692                        "TX cpu firmware on %s which is 5705.\n",
4693                        tp->dev->name);
4694                 return -EINVAL;
4695         }
4696
4697         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4698                 write_op = tg3_write_mem;
4699         else
4700                 write_op = tg3_write_indirect_reg32;
4701
4702         /* It is possible that bootcode is still loading at this point.
4703          * Get the nvram lock first before halting the cpu.
4704          */
4705         tg3_nvram_lock(tp);
4706         err = tg3_halt_cpu(tp, cpu_base);
4707         tg3_nvram_unlock(tp);
4708         if (err)
4709                 goto out;
4710
4711         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4712                 write_op(tp, cpu_scratch_base + i, 0);
4713         tw32(cpu_base + CPU_STATE, 0xffffffff);
4714         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4715         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4716                 write_op(tp, (cpu_scratch_base +
4717                               (info->text_base & 0xffff) +
4718                               (i * sizeof(u32))),
4719                          (info->text_data ?
4720                           info->text_data[i] : 0));
4721         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4722                 write_op(tp, (cpu_scratch_base +
4723                               (info->rodata_base & 0xffff) +
4724                               (i * sizeof(u32))),
4725                          (info->rodata_data ?
4726                           info->rodata_data[i] : 0));
4727         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4728                 write_op(tp, (cpu_scratch_base +
4729                               (info->data_base & 0xffff) +
4730                               (i * sizeof(u32))),
4731                          (info->data_data ?
4732                           info->data_data[i] : 0));
4733
4734         err = 0;
4735
4736 out:
4737         return err;
4738 }
4739
4740 /* tp->lock is held. */
4741 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4742 {
4743         struct fw_info info;
4744         int err, i;
4745
4746         info.text_base = TG3_FW_TEXT_ADDR;
4747         info.text_len = TG3_FW_TEXT_LEN;
4748         info.text_data = &tg3FwText[0];
4749         info.rodata_base = TG3_FW_RODATA_ADDR;
4750         info.rodata_len = TG3_FW_RODATA_LEN;
4751         info.rodata_data = &tg3FwRodata[0];
4752         info.data_base = TG3_FW_DATA_ADDR;
4753         info.data_len = TG3_FW_DATA_LEN;
4754         info.data_data = NULL;
4755
4756         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4757                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4758                                     &info);
4759         if (err)
4760                 return err;
4761
4762         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4763                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4764                                     &info);
4765         if (err)
4766                 return err;
4767
4768         /* Now startup only the RX cpu. */
4769         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4770         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4771
4772         for (i = 0; i < 5; i++) {
4773                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4774                         break;
4775                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4776                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4777                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4778                 udelay(1000);
4779         }
4780         if (i >= 5) {
4781                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4782                        "to set RX CPU PC, is %08x should be %08x\n",
4783                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4784                        TG3_FW_TEXT_ADDR);
4785                 return -ENODEV;
4786         }
4787         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4788         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4789
4790         return 0;
4791 }
4792
4793 #if TG3_TSO_SUPPORT != 0
4794
4795 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4796 #define TG3_TSO_FW_RELASE_MINOR         0x6
4797 #define TG3_TSO_FW_RELEASE_FIX          0x0
4798 #define TG3_TSO_FW_START_ADDR           0x08000000
4799 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4800 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4801 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4802 #define TG3_TSO_FW_RODATA_LEN           0x60
4803 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4804 #define TG3_TSO_FW_DATA_LEN             0x30
4805 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4806 #define TG3_TSO_FW_SBSS_LEN             0x2c
4807 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4808 #define TG3_TSO_FW_BSS_LEN              0x894
4809
4810 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4811         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4812         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4813         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4814         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4815         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4816         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4817         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4818         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4819         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4820         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4821         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4822         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4823         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4824         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4825         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4826         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4827         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4828         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4829         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4830         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4831         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4832         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4833         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4834         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4835         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4836         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4837         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4838         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4839         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4840         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4841         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4842         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4843         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4844         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4845         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4846         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4847         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4848         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4849         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4850         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4851         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4852         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4853         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4854         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4855         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4856         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4857         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4858         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4859         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4860         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4861         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4862         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4863         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4864         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4865         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4866         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4867         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4868         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4869         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4870         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4871         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4872         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4873         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4874         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4875         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4876         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4877         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4878         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4879         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4880         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4881         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4882         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4883         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4884         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4885         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4886         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4887         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4888         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4889         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4890         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4891         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4892         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4893         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4894         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4895         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4896         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4897         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4898         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4899         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4900         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4901         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4902         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4903         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4904         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4905         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4906         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4907         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4908         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4909         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4910         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4911         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4912         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4913         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4914         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4915         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4916         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4917         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4918         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4919         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4920         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4921         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4922         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4923         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4924         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4925         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4926         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4927         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4928         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4929         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4930         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4931         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4932         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4933         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4934         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4935         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4936         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4937         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4938         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4939         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4940         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4941         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4942         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4943         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4944         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4945         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4946         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4947         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4948         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4949         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4950         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4951         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4952         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4953         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4954         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4955         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4956         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4957         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4958         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4959         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4960         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4961         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4962         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4963         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4964         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4965         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4966         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4967         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4968         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4969         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4970         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4971         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4972         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4973         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4974         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4975         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4976         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4977         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4978         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4979         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4980         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4981         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4982         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4983         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4984         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4985         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4986         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4987         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4988         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4989         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4990         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4991         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4992         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4993         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4994         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4995         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4996         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4997         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4998         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4999         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5000         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5001         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5002         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5003         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5004         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5005         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5006         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5007         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5008         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5009         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5010         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5011         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5012         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5013         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5014         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5015         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5016         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5017         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5018         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5019         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5020         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5021         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5022         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5023         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5024         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5025         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5026         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5027         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5028         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5029         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5030         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5031         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5032         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5033         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5034         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5035         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5036         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5037         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5038         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5039         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5040         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5041         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5042         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5043         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5044         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5045         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5046         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5047         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5048         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5049         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5050         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5051         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5052         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5053         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5054         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5055         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5056         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5057         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5058         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5059         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5060         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5061         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5062         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5063         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5064         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5065         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5066         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5067         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5068         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5069         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5070         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5071         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5072         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5073         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5074         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5075         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5076         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5077         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5078         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5079         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5080         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5081         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5082         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5083         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5084         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5085         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5086         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5087         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5088         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5089         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5090         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5091         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5092         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5093         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5094         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5095 };
5096
5097 static u32 tg3TsoFwRodata[] = {
5098         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5099         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5100         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5101         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5102         0x00000000,
5103 };
5104
5105 static u32 tg3TsoFwData[] = {
5106         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5107         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5108         0x00000000,
5109 };
5110
5111 /* 5705 needs a special version of the TSO firmware.  */
5112 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5113 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5114 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5115 #define TG3_TSO5_FW_START_ADDR          0x00010000
5116 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5117 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5118 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5119 #define TG3_TSO5_FW_RODATA_LEN          0x50
5120 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5121 #define TG3_TSO5_FW_DATA_LEN            0x20
5122 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5123 #define TG3_TSO5_FW_SBSS_LEN            0x28
5124 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5125 #define TG3_TSO5_FW_BSS_LEN             0x88
5126
5127 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5128         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5129         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5130         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5131         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5132         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5133         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5134         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5135         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5136         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5137         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5138         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5139         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5140         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5141         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5142         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5143         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5144         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5145         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5146         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5147         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5148         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5149         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5150         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5151         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5152         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5153         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5154         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5155         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5156         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5157         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5158         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5159         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5160         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5161         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5162         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5163         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5164         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5165         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5166         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5167         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5168         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5169         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5170         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5171         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5172         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5173         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5174         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5175         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5176         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5177         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5178         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5179         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5180         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5181         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5182         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5183         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5184         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5185         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5186         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5187         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5188         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5189         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5190         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5191         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5192         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5193         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5194         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5195         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5196         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5197         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5198         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5199         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5200         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5201         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5202         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5203         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5204         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5205         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5206         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5207         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5208         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5209         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5210         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5211         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5212         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5213         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5214         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5215         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5216         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5217         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5218         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5219         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5220         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5221         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5222         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5223         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5224         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5225         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5226         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5227         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5228         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5229         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5230         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5231         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5232         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5233         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5234         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5235         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5236         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5237         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5238         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5239         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5240         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5241         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5242         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5243         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5244         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5245         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5246         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5247         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5248         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5249         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5250         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5251         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5252         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5253         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5254         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5255         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5256         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5257         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5258         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5259         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5260         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5261         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5262         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5263         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5264         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5265         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5266         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5267         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5268         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5269         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5270         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5271         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5272         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5273         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5274         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5275         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5276         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5277         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5278         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5279         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5280         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5281         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5282         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5283         0x00000000, 0x00000000, 0x00000000,
5284 };
5285
5286 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5287         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5288         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5289         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5290         0x00000000, 0x00000000, 0x00000000,
5291 };
5292
5293 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5294         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5295         0x00000000, 0x00000000, 0x00000000,
5296 };
5297
5298 /* tp->lock is held. */
5299 static int tg3_load_tso_firmware(struct tg3 *tp)
5300 {
5301         struct fw_info info;
5302         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5303         int err, i;
5304
5305         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5306                 return 0;
5307
5308         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5309                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5310                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5311                 info.text_data = &tg3Tso5FwText[0];
5312                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5313                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5314                 info.rodata_data = &tg3Tso5FwRodata[0];
5315                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5316                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5317                 info.data_data = &tg3Tso5FwData[0];
5318                 cpu_base = RX_CPU_BASE;
5319                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5320                 cpu_scratch_size = (info.text_len +
5321                                     info.rodata_len +
5322                                     info.data_len +
5323                                     TG3_TSO5_FW_SBSS_LEN +
5324                                     TG3_TSO5_FW_BSS_LEN);
5325         } else {
5326                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5327                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5328                 info.text_data = &tg3TsoFwText[0];
5329                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5330                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5331                 info.rodata_data = &tg3TsoFwRodata[0];
5332                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5333                 info.data_len = TG3_TSO_FW_DATA_LEN;
5334                 info.data_data = &tg3TsoFwData[0];
5335                 cpu_base = TX_CPU_BASE;
5336                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5337                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5338         }
5339
5340         err = tg3_load_firmware_cpu(tp, cpu_base,
5341                                     cpu_scratch_base, cpu_scratch_size,
5342                                     &info);
5343         if (err)
5344                 return err;
5345
5346         /* Now startup the cpu. */
5347         tw32(cpu_base + CPU_STATE, 0xffffffff);
5348         tw32_f(cpu_base + CPU_PC,    info.text_base);
5349
5350         for (i = 0; i < 5; i++) {
5351                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5352                         break;
5353                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5354                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5355                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5356                 udelay(1000);
5357         }
5358         if (i >= 5) {
5359                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5360                        "to set CPU PC, is %08x should be %08x\n",
5361                        tp->dev->name, tr32(cpu_base + CPU_PC),
5362                        info.text_base);
5363                 return -ENODEV;
5364         }
5365         tw32(cpu_base + CPU_STATE, 0xffffffff);
5366         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5367         return 0;
5368 }
5369
5370 #endif /* TG3_TSO_SUPPORT != 0 */
5371
5372 /* tp->lock is held. */
5373 static void __tg3_set_mac_addr(struct tg3 *tp)
5374 {
5375         u32 addr_high, addr_low;
5376         int i;
5377
5378         addr_high = ((tp->dev->dev_addr[0] << 8) |
5379                      tp->dev->dev_addr[1]);
5380         addr_low = ((tp->dev->dev_addr[2] << 24) |
5381                     (tp->dev->dev_addr[3] << 16) |
5382                     (tp->dev->dev_addr[4] <<  8) |
5383                     (tp->dev->dev_addr[5] <<  0));
5384         for (i = 0; i < 4; i++) {
5385                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5386                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5387         }
5388
5389         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5390             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5391                 for (i = 0; i < 12; i++) {
5392                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5393                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5394                 }
5395         }
5396
5397         addr_high = (tp->dev->dev_addr[0] +
5398                      tp->dev->dev_addr[1] +
5399                      tp->dev->dev_addr[2] +
5400                      tp->dev->dev_addr[3] +
5401                      tp->dev->dev_addr[4] +
5402                      tp->dev->dev_addr[5]) &
5403                 TX_BACKOFF_SEED_MASK;
5404         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5405 }
5406
5407 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5408 {
5409         struct tg3 *tp = netdev_priv(dev);
5410         struct sockaddr *addr = p;
5411
5412         if (!is_valid_ether_addr(addr->sa_data))
5413                 return -EINVAL;
5414
5415         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5416
5417         spin_lock_bh(&tp->lock);
5418         __tg3_set_mac_addr(tp);
5419         spin_unlock_bh(&tp->lock);
5420
5421         return 0;
5422 }
5423
5424 /* tp->lock is held. */
5425 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5426                            dma_addr_t mapping, u32 maxlen_flags,
5427                            u32 nic_addr)
5428 {
5429         tg3_write_mem(tp,
5430                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5431                       ((u64) mapping >> 32));
5432         tg3_write_mem(tp,
5433                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5434                       ((u64) mapping & 0xffffffff));
5435         tg3_write_mem(tp,
5436                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5437                        maxlen_flags);
5438
5439         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5440                 tg3_write_mem(tp,
5441                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5442                               nic_addr);
5443 }
5444
5445 static void __tg3_set_rx_mode(struct net_device *);
5446 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5447 {
5448         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5449         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5450         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5451         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5452         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5453                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5454                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5455         }
5456         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5457         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5458         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5459                 u32 val = ec->stats_block_coalesce_usecs;
5460
5461                 if (!netif_carrier_ok(tp->dev))
5462                         val = 0;
5463
5464                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5465         }
5466 }
5467
5468 /* tp->lock is held. */
5469 static int tg3_reset_hw(struct tg3 *tp)
5470 {
5471         u32 val, rdmac_mode;
5472         int i, err, limit;
5473
5474         tg3_disable_ints(tp);
5475
5476         tg3_stop_fw(tp);
5477
5478         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5479
5480         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5481                 tg3_abort_hw(tp, 1);
5482         }
5483
5484         err = tg3_chip_reset(tp);
5485         if (err)
5486                 return err;
5487
5488         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5489
5490         /* This works around an issue with Athlon chipsets on
5491          * B3 tigon3 silicon.  This bit has no effect on any
5492          * other revision.  But do not set this on PCI Express
5493          * chips.
5494          */
5495         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5496                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5497         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5498
5499         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5500             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5501                 val = tr32(TG3PCI_PCISTATE);
5502                 val |= PCISTATE_RETRY_SAME_DMA;
5503                 tw32(TG3PCI_PCISTATE, val);
5504         }
5505
5506         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5507                 /* Enable some hw fixes.  */
5508                 val = tr32(TG3PCI_MSI_DATA);
5509                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5510                 tw32(TG3PCI_MSI_DATA, val);
5511         }
5512
5513         /* Descriptor ring init may make accesses to the
5514          * NIC SRAM area to setup the TX descriptors, so we
5515          * can only do this after the hardware has been
5516          * successfully reset.
5517          */
5518         tg3_init_rings(tp);
5519
5520         /* This value is determined during the probe time DMA
5521          * engine test, tg3_test_dma.
5522          */
5523         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5524
5525         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5526                           GRC_MODE_4X_NIC_SEND_RINGS |
5527                           GRC_MODE_NO_TX_PHDR_CSUM |
5528                           GRC_MODE_NO_RX_PHDR_CSUM);
5529         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5530         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5531                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5532         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5533                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5534
5535         tw32(GRC_MODE,
5536              tp->grc_mode |
5537              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5538
5539         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5540         val = tr32(GRC_MISC_CFG);
5541         val &= ~0xff;
5542         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5543         tw32(GRC_MISC_CFG, val);
5544
5545         /* Initialize MBUF/DESC pool. */
5546         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5547                 /* Do nothing.  */
5548         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5549                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5550                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5551                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5552                 else
5553                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5554                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5555                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5556         }
5557 #if TG3_TSO_SUPPORT != 0
5558         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5559                 int fw_len;
5560
5561                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5562                           TG3_TSO5_FW_RODATA_LEN +
5563                           TG3_TSO5_FW_DATA_LEN +
5564                           TG3_TSO5_FW_SBSS_LEN +
5565                           TG3_TSO5_FW_BSS_LEN);
5566                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5567                 tw32(BUFMGR_MB_POOL_ADDR,
5568                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5569                 tw32(BUFMGR_MB_POOL_SIZE,
5570                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5571         }
5572 #endif
5573
5574         if (tp->dev->mtu <= ETH_DATA_LEN) {
5575                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5576                      tp->bufmgr_config.mbuf_read_dma_low_water);
5577                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5578                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5579                 tw32(BUFMGR_MB_HIGH_WATER,
5580                      tp->bufmgr_config.mbuf_high_water);
5581         } else {
5582                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5583                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5584                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5585                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5586                 tw32(BUFMGR_MB_HIGH_WATER,
5587                      tp->bufmgr_config.mbuf_high_water_jumbo);
5588         }
5589         tw32(BUFMGR_DMA_LOW_WATER,
5590              tp->bufmgr_config.dma_low_water);
5591         tw32(BUFMGR_DMA_HIGH_WATER,
5592              tp->bufmgr_config.dma_high_water);
5593
5594         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5595         for (i = 0; i < 2000; i++) {
5596                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5597                         break;
5598                 udelay(10);
5599         }
5600         if (i >= 2000) {
5601                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5602                        tp->dev->name);
5603                 return -ENODEV;
5604         }
5605
5606         /* Setup replenish threshold. */
5607         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5608
5609         /* Initialize TG3_BDINFO's at:
5610          *  RCVDBDI_STD_BD:     standard eth size rx ring
5611          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5612          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5613          *
5614          * like so:
5615          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5616          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5617          *                              ring attribute flags
5618          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5619          *
5620          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5621          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5622          *
5623          * The size of each ring is fixed in the firmware, but the location is
5624          * configurable.
5625          */
5626         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5627              ((u64) tp->rx_std_mapping >> 32));
5628         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5629              ((u64) tp->rx_std_mapping & 0xffffffff));
5630         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5631              NIC_SRAM_RX_BUFFER_DESC);
5632
5633         /* Don't even try to program the JUMBO/MINI buffer descriptor
5634          * configs on 5705.
5635          */
5636         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5637                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5638                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5639         } else {
5640                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5641                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5642
5643                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5644                      BDINFO_FLAGS_DISABLED);
5645
5646                 /* Setup replenish threshold. */
5647                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5648
5649                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5650                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5651                              ((u64) tp->rx_jumbo_mapping >> 32));
5652                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5653                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5654                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5655                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5656                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5657                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5658                 } else {
5659                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5660                              BDINFO_FLAGS_DISABLED);
5661                 }
5662
5663         }
5664
5665         /* There is only one send ring on 5705/5750, no need to explicitly
5666          * disable the others.
5667          */
5668         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5669                 /* Clear out send RCB ring in SRAM. */
5670                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5671                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5672                                       BDINFO_FLAGS_DISABLED);
5673         }
5674
5675         tp->tx_prod = 0;
5676         tp->tx_cons = 0;
5677         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5678         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5679
5680         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5681                        tp->tx_desc_mapping,
5682                        (TG3_TX_RING_SIZE <<
5683                         BDINFO_FLAGS_MAXLEN_SHIFT),
5684                        NIC_SRAM_TX_BUFFER_DESC);
5685
5686         /* There is only one receive return ring on 5705/5750, no need
5687          * to explicitly disable the others.
5688          */
5689         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5690                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5691                      i += TG3_BDINFO_SIZE) {
5692                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5693                                       BDINFO_FLAGS_DISABLED);
5694                 }
5695         }
5696
5697         tp->rx_rcb_ptr = 0;
5698         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5699
5700         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5701                        tp->rx_rcb_mapping,
5702                        (TG3_RX_RCB_RING_SIZE(tp) <<
5703                         BDINFO_FLAGS_MAXLEN_SHIFT),
5704                        0);
5705
5706         tp->rx_std_ptr = tp->rx_pending;
5707         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5708                      tp->rx_std_ptr);
5709
5710         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
5711                                                 tp->rx_jumbo_pending : 0;
5712         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5713                      tp->rx_jumbo_ptr);
5714
5715         /* Initialize MAC address and backoff seed. */
5716         __tg3_set_mac_addr(tp);
5717
5718         /* MTU + ethernet header + FCS + optional VLAN tag */
5719         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5720
5721         /* The slot time is changed by tg3_setup_phy if we
5722          * run at gigabit with half duplex.
5723          */
5724         tw32(MAC_TX_LENGTHS,
5725              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5726              (6 << TX_LENGTHS_IPG_SHIFT) |
5727              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5728
5729         /* Receive rules. */
5730         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5731         tw32(RCVLPC_CONFIG, 0x0181);
5732
5733         /* Calculate RDMAC_MODE setting early, we need it to determine
5734          * the RCVLPC_STATE_ENABLE mask.
5735          */
5736         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5737                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5738                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5739                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5740                       RDMAC_MODE_LNGREAD_ENAB);
5741         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5742                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5743
5744         /* If statement applies to 5705 and 5750 PCI devices only */
5745         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5746              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5747             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5748                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5749                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5750                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5751                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5752                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5753                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5754                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5755                 }
5756         }
5757
5758         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5759                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5760
5761 #if TG3_TSO_SUPPORT != 0
5762         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5763                 rdmac_mode |= (1 << 27);
5764 #endif
5765
5766         /* Receive/send statistics. */
5767         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5768             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5769                 val = tr32(RCVLPC_STATS_ENABLE);
5770                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5771                 tw32(RCVLPC_STATS_ENABLE, val);
5772         } else {
5773                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5774         }
5775         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5776         tw32(SNDDATAI_STATSENAB, 0xffffff);
5777         tw32(SNDDATAI_STATSCTRL,
5778              (SNDDATAI_SCTRL_ENABLE |
5779               SNDDATAI_SCTRL_FASTUPD));
5780
5781         /* Setup host coalescing engine. */
5782         tw32(HOSTCC_MODE, 0);
5783         for (i = 0; i < 2000; i++) {
5784                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5785                         break;
5786                 udelay(10);
5787         }
5788
5789         __tg3_set_coalesce(tp, &tp->coal);
5790
5791         /* set status block DMA address */
5792         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5793              ((u64) tp->status_mapping >> 32));
5794         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5795              ((u64) tp->status_mapping & 0xffffffff));
5796
5797         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5798                 /* Status/statistics block address.  See tg3_timer,
5799                  * the tg3_periodic_fetch_stats call there, and
5800                  * tg3_get_stats to see how this works for 5705/5750 chips.
5801                  */
5802                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5803                      ((u64) tp->stats_mapping >> 32));
5804                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5805                      ((u64) tp->stats_mapping & 0xffffffff));
5806                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5807                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5808         }
5809
5810         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5811
5812         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5813         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5814         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5815                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5816
5817         /* Clear statistics/status block in chip, and status block in ram. */
5818         for (i = NIC_SRAM_STATS_BLK;
5819              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5820              i += sizeof(u32)) {
5821                 tg3_write_mem(tp, i, 0);
5822                 udelay(40);
5823         }
5824         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5825
5826         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5827                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
5828                 /* reset to prevent losing 1st rx packet intermittently */
5829                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5830                 udelay(10);
5831         }
5832
5833         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5834                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5835         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5836         udelay(40);
5837
5838         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5839          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5840          * register to preserve the GPIO settings for LOMs. The GPIOs,
5841          * whether used as inputs or outputs, are set by boot code after
5842          * reset.
5843          */
5844         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5845                 u32 gpio_mask;
5846
5847                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5848                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
5849
5850                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5851                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5852                                      GRC_LCLCTRL_GPIO_OUTPUT3;
5853
5854                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5855
5856                 /* GPIO1 must be driven high for eeprom write protect */
5857                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5858                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5859         }
5860         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5861         udelay(100);
5862
5863         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5864         tp->last_tag = 0;
5865
5866         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5867                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5868                 udelay(40);
5869         }
5870
5871         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5872                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5873                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5874                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5875                WDMAC_MODE_LNGREAD_ENAB);
5876
5877         /* If statement applies to 5705 and 5750 PCI devices only */
5878         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5879              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5880             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5881                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5882                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5883                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5884                         /* nothing */
5885                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5886                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5887                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5888                         val |= WDMAC_MODE_RX_ACCEL;
5889                 }
5890         }
5891
5892         tw32_f(WDMAC_MODE, val);
5893         udelay(40);
5894
5895         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5896                 val = tr32(TG3PCI_X_CAPS);
5897                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5898                         val &= ~PCIX_CAPS_BURST_MASK;
5899                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5900                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5901                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5902                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5903                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5904                                 val |= (tp->split_mode_max_reqs <<
5905                                         PCIX_CAPS_SPLIT_SHIFT);
5906                 }
5907                 tw32(TG3PCI_X_CAPS, val);
5908         }
5909
5910         tw32_f(RDMAC_MODE, rdmac_mode);
5911         udelay(40);
5912
5913         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5914         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5915                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5916         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5917         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5918         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5919         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5920         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5921 #if TG3_TSO_SUPPORT != 0
5922         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5923                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5924 #endif
5925         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5926         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5927
5928         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5929                 err = tg3_load_5701_a0_firmware_fix(tp);
5930                 if (err)
5931                         return err;
5932         }
5933
5934 #if TG3_TSO_SUPPORT != 0
5935         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5936                 err = tg3_load_tso_firmware(tp);
5937                 if (err)
5938                         return err;
5939         }
5940 #endif
5941
5942         tp->tx_mode = TX_MODE_ENABLE;
5943         tw32_f(MAC_TX_MODE, tp->tx_mode);
5944         udelay(100);
5945
5946         tp->rx_mode = RX_MODE_ENABLE;
5947         tw32_f(MAC_RX_MODE, tp->rx_mode);
5948         udelay(10);
5949
5950         if (tp->link_config.phy_is_low_power) {
5951                 tp->link_config.phy_is_low_power = 0;
5952                 tp->link_config.speed = tp->link_config.orig_speed;
5953                 tp->link_config.duplex = tp->link_config.orig_duplex;
5954                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5955         }
5956
5957         tp->mi_mode = MAC_MI_MODE_BASE;
5958         tw32_f(MAC_MI_MODE, tp->mi_mode);
5959         udelay(80);
5960
5961         tw32(MAC_LED_CTRL, tp->led_ctrl);
5962
5963         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5964         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5965                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5966                 udelay(10);
5967         }
5968         tw32_f(MAC_RX_MODE, tp->rx_mode);
5969         udelay(10);
5970
5971         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5972                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5973                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5974                         /* Set drive transmission level to 1.2V  */
5975                         /* only if the signal pre-emphasis bit is not set  */
5976                         val = tr32(MAC_SERDES_CFG);
5977                         val &= 0xfffff000;
5978                         val |= 0x880;
5979                         tw32(MAC_SERDES_CFG, val);
5980                 }
5981                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5982                         tw32(MAC_SERDES_CFG, 0x616000);
5983         }
5984
5985         /* Prevent chip from dropping frames when flow control
5986          * is enabled.
5987          */
5988         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5989
5990         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5991             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5992                 /* Use hardware link auto-negotiation */
5993                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5994         }
5995
5996         err = tg3_setup_phy(tp, 1);
5997         if (err)
5998                 return err;
5999
6000         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6001                 u32 tmp;
6002
6003                 /* Clear CRC stats. */
6004                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6005                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6006                         tg3_readphy(tp, 0x14, &tmp);
6007                 }
6008         }
6009
6010         __tg3_set_rx_mode(tp->dev);
6011
6012         /* Initialize receive rules. */
6013         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6014         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6015         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6016         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6017
6018         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6019             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6020                 limit = 8;
6021         else
6022                 limit = 16;
6023         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6024                 limit -= 4;
6025         switch (limit) {
6026         case 16:
6027                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6028         case 15:
6029                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6030         case 14:
6031                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6032         case 13:
6033                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6034         case 12:
6035                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6036         case 11:
6037                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6038         case 10:
6039                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6040         case 9:
6041                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6042         case 8:
6043                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6044         case 7:
6045                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6046         case 6:
6047                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6048         case 5:
6049                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6050         case 4:
6051                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6052         case 3:
6053                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6054         case 2:
6055         case 1:
6056
6057         default:
6058                 break;
6059         };
6060
6061         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6062
6063         return 0;
6064 }
6065
6066 /* Called at device open time to get the chip ready for
6067  * packet processing.  Invoked with tp->lock held.
6068  */
6069 static int tg3_init_hw(struct tg3 *tp)
6070 {
6071         int err;
6072
6073         /* Force the chip into D0. */
6074         err = tg3_set_power_state(tp, 0);
6075         if (err)
6076                 goto out;
6077
6078         tg3_switch_clocks(tp);
6079
6080         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6081
6082         err = tg3_reset_hw(tp);
6083
6084 out:
6085         return err;
6086 }
6087
6088 #define TG3_STAT_ADD32(PSTAT, REG) \
6089 do {    u32 __val = tr32(REG); \
6090         (PSTAT)->low += __val; \
6091         if ((PSTAT)->low < __val) \
6092                 (PSTAT)->high += 1; \
6093 } while (0)
6094
6095 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6096 {
6097         struct tg3_hw_stats *sp = tp->hw_stats;
6098
6099         if (!netif_carrier_ok(tp->dev))
6100                 return;
6101
6102         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6103         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6104         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6105         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6106         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6107         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6108         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6109         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6110         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6111         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6112         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6113         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6114         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6115
6116         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6117         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6118         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6119         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6120         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6121         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6122         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6123         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6124         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6125         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6126         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6127         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6128         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6129         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6130 }
6131
6132 static void tg3_timer(unsigned long __opaque)
6133 {
6134         struct tg3 *tp = (struct tg3 *) __opaque;
6135
6136         spin_lock(&tp->lock);
6137
6138         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6139                 /* All of this garbage is because when using non-tagged
6140                  * IRQ status the mailbox/status_block protocol the chip
6141                  * uses with the cpu is race prone.
6142                  */
6143                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6144                         tw32(GRC_LOCAL_CTRL,
6145                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6146                 } else {
6147                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6148                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6149                 }
6150
6151                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6152                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6153                         spin_unlock(&tp->lock);
6154                         schedule_work(&tp->reset_task);
6155                         return;
6156                 }
6157         }
6158
6159         /* This part only runs once per second. */
6160         if (!--tp->timer_counter) {
6161                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6162                         tg3_periodic_fetch_stats(tp);
6163
6164                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6165                         u32 mac_stat;
6166                         int phy_event;
6167
6168                         mac_stat = tr32(MAC_STATUS);
6169
6170                         phy_event = 0;
6171                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6172                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6173                                         phy_event = 1;
6174                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6175                                 phy_event = 1;
6176
6177                         if (phy_event)
6178                                 tg3_setup_phy(tp, 0);
6179                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6180                         u32 mac_stat = tr32(MAC_STATUS);
6181                         int need_setup = 0;
6182
6183                         if (netif_carrier_ok(tp->dev) &&
6184                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6185                                 need_setup = 1;
6186                         }
6187                         if (! netif_carrier_ok(tp->dev) &&
6188                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6189                                          MAC_STATUS_SIGNAL_DET))) {
6190                                 need_setup = 1;
6191                         }
6192                         if (need_setup) {
6193                                 tw32_f(MAC_MODE,
6194                                      (tp->mac_mode &
6195                                       ~MAC_MODE_PORT_MODE_MASK));
6196                                 udelay(40);
6197                                 tw32_f(MAC_MODE, tp->mac_mode);
6198                                 udelay(40);
6199                                 tg3_setup_phy(tp, 0);
6200                         }
6201                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6202                         tg3_serdes_parallel_detect(tp);
6203
6204                 tp->timer_counter = tp->timer_multiplier;
6205         }
6206
6207         /* Heartbeat is only sent once every 2 seconds.  */
6208         if (!--tp->asf_counter) {
6209                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6210                         u32 val;
6211
6212                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_MBOX,
6213                                            FWCMD_NICDRV_ALIVE2);
6214                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6215                         /* 5 seconds timeout */
6216                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6217                         val = tr32(GRC_RX_CPU_EVENT);
6218                         val |= (1 << 14);
6219                         tw32(GRC_RX_CPU_EVENT, val);
6220                 }
6221                 tp->asf_counter = tp->asf_multiplier;
6222         }
6223
6224         spin_unlock(&tp->lock);
6225
6226         tp->timer.expires = jiffies + tp->timer_offset;
6227         add_timer(&tp->timer);
6228 }
6229
6230 static int tg3_test_interrupt(struct tg3 *tp)
6231 {
6232         struct net_device *dev = tp->dev;
6233         int err, i;
6234         u32 int_mbox = 0;
6235
6236         if (!netif_running(dev))
6237                 return -ENODEV;
6238
6239         tg3_disable_ints(tp);
6240
6241         free_irq(tp->pdev->irq, dev);
6242
6243         err = request_irq(tp->pdev->irq, tg3_test_isr,
6244                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6245         if (err)
6246                 return err;
6247
6248         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6249         tg3_enable_ints(tp);
6250
6251         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6252                HOSTCC_MODE_NOW);
6253
6254         for (i = 0; i < 5; i++) {
6255                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6256                                         TG3_64BIT_REG_LOW);
6257                 if (int_mbox != 0)
6258                         break;
6259                 msleep(10);
6260         }
6261
6262         tg3_disable_ints(tp);
6263
6264         free_irq(tp->pdev->irq, dev);
6265         
6266         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6267                 err = request_irq(tp->pdev->irq, tg3_msi,
6268                                   SA_SAMPLE_RANDOM, dev->name, dev);
6269         else {
6270                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6271                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6272                         fn = tg3_interrupt_tagged;
6273                 err = request_irq(tp->pdev->irq, fn,
6274                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6275         }
6276
6277         if (err)
6278                 return err;
6279
6280         if (int_mbox != 0)
6281                 return 0;
6282
6283         return -EIO;
6284 }
6285
6286 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6287  * successfully restored
6288  */
6289 static int tg3_test_msi(struct tg3 *tp)
6290 {
6291         struct net_device *dev = tp->dev;
6292         int err;
6293         u16 pci_cmd;
6294
6295         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6296                 return 0;
6297
6298         /* Turn off SERR reporting in case MSI terminates with Master
6299          * Abort.
6300          */
6301         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6302         pci_write_config_word(tp->pdev, PCI_COMMAND,
6303                               pci_cmd & ~PCI_COMMAND_SERR);
6304
6305         err = tg3_test_interrupt(tp);
6306
6307         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6308
6309         if (!err)
6310                 return 0;
6311
6312         /* other failures */
6313         if (err != -EIO)
6314                 return err;
6315
6316         /* MSI test failed, go back to INTx mode */
6317         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6318                "switching to INTx mode. Please report this failure to "
6319                "the PCI maintainer and include system chipset information.\n",
6320                        tp->dev->name);
6321
6322         free_irq(tp->pdev->irq, dev);
6323         pci_disable_msi(tp->pdev);
6324
6325         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6326
6327         {
6328                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6329                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6330                         fn = tg3_interrupt_tagged;
6331
6332                 err = request_irq(tp->pdev->irq, fn,
6333                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6334         }
6335         if (err)
6336                 return err;
6337
6338         /* Need to reset the chip because the MSI cycle may have terminated
6339          * with Master Abort.
6340          */
6341         tg3_full_lock(tp, 1);
6342
6343         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6344         err = tg3_init_hw(tp);
6345
6346         tg3_full_unlock(tp);
6347
6348         if (err)
6349                 free_irq(tp->pdev->irq, dev);
6350
6351         return err;
6352 }
6353
6354 static int tg3_open(struct net_device *dev)
6355 {
6356         struct tg3 *tp = netdev_priv(dev);
6357         int err;
6358
6359         tg3_full_lock(tp, 0);
6360
6361         tg3_disable_ints(tp);
6362         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6363
6364         tg3_full_unlock(tp);
6365
6366         /* The placement of this call is tied
6367          * to the setup and use of Host TX descriptors.
6368          */
6369         err = tg3_alloc_consistent(tp);
6370         if (err)
6371                 return err;
6372
6373         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6374             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6375             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
6376                 /* All MSI supporting chips should support tagged
6377                  * status.  Assert that this is the case.
6378                  */
6379                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6380                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6381                                "Not using MSI.\n", tp->dev->name);
6382                 } else if (pci_enable_msi(tp->pdev) == 0) {
6383                         u32 msi_mode;
6384
6385                         msi_mode = tr32(MSGINT_MODE);
6386                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6387                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6388                 }
6389         }
6390         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6391                 err = request_irq(tp->pdev->irq, tg3_msi,
6392                                   SA_SAMPLE_RANDOM, dev->name, dev);
6393         else {
6394                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6395                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6396                         fn = tg3_interrupt_tagged;
6397
6398                 err = request_irq(tp->pdev->irq, fn,
6399                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6400         }
6401
6402         if (err) {
6403                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6404                         pci_disable_msi(tp->pdev);
6405                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6406                 }
6407                 tg3_free_consistent(tp);
6408                 return err;
6409         }
6410
6411         tg3_full_lock(tp, 0);
6412
6413         err = tg3_init_hw(tp);
6414         if (err) {
6415                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6416                 tg3_free_rings(tp);
6417         } else {
6418                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6419                         tp->timer_offset = HZ;
6420                 else
6421                         tp->timer_offset = HZ / 10;
6422
6423                 BUG_ON(tp->timer_offset > HZ);
6424                 tp->timer_counter = tp->timer_multiplier =
6425                         (HZ / tp->timer_offset);
6426                 tp->asf_counter = tp->asf_multiplier =
6427                         ((HZ / tp->timer_offset) * 2);
6428
6429                 init_timer(&tp->timer);
6430                 tp->timer.expires = jiffies + tp->timer_offset;
6431                 tp->timer.data = (unsigned long) tp;
6432                 tp->timer.function = tg3_timer;
6433         }
6434
6435         tg3_full_unlock(tp);
6436
6437         if (err) {
6438                 free_irq(tp->pdev->irq, dev);
6439                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6440                         pci_disable_msi(tp->pdev);
6441                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6442                 }
6443                 tg3_free_consistent(tp);
6444                 return err;
6445         }
6446
6447         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6448                 err = tg3_test_msi(tp);
6449
6450                 if (err) {
6451                         tg3_full_lock(tp, 0);
6452
6453                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6454                                 pci_disable_msi(tp->pdev);
6455                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6456                         }
6457                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6458                         tg3_free_rings(tp);
6459                         tg3_free_consistent(tp);
6460
6461                         tg3_full_unlock(tp);
6462
6463                         return err;
6464                 }
6465         }
6466
6467         tg3_full_lock(tp, 0);
6468
6469         add_timer(&tp->timer);
6470         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6471         tg3_enable_ints(tp);
6472
6473         tg3_full_unlock(tp);
6474
6475         netif_start_queue(dev);
6476
6477         return 0;
6478 }
6479
6480 #if 0
6481 /*static*/ void tg3_dump_state(struct tg3 *tp)
6482 {
6483         u32 val32, val32_2, val32_3, val32_4, val32_5;
6484         u16 val16;
6485         int i;
6486
6487         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6488         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6489         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6490                val16, val32);
6491
6492         /* MAC block */
6493         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6494                tr32(MAC_MODE), tr32(MAC_STATUS));
6495         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6496                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6497         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6498                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6499         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6500                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6501
6502         /* Send data initiator control block */
6503         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6504                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6505         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6506                tr32(SNDDATAI_STATSCTRL));
6507
6508         /* Send data completion control block */
6509         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6510
6511         /* Send BD ring selector block */
6512         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6513                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6514
6515         /* Send BD initiator control block */
6516         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6517                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6518
6519         /* Send BD completion control block */
6520         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6521
6522         /* Receive list placement control block */
6523         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6524                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6525         printk("       RCVLPC_STATSCTRL[%08x]\n",
6526                tr32(RCVLPC_STATSCTRL));
6527
6528         /* Receive data and receive BD initiator control block */
6529         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6530                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6531
6532         /* Receive data completion control block */
6533         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6534                tr32(RCVDCC_MODE));
6535
6536         /* Receive BD initiator control block */
6537         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6538                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6539
6540         /* Receive BD completion control block */
6541         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6542                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6543
6544         /* Receive list selector control block */
6545         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6546                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6547
6548         /* Mbuf cluster free block */
6549         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6550                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6551
6552         /* Host coalescing control block */
6553         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6554                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6555         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6556                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6557                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6558         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6559                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6560                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6561         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6562                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6563         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6564                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6565
6566         /* Memory arbiter control block */
6567         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6568                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6569
6570         /* Buffer manager control block */
6571         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6572                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6573         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6574                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6575         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6576                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6577                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6578                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6579
6580         /* Read DMA control block */
6581         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6582                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6583
6584         /* Write DMA control block */
6585         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6586                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6587
6588         /* DMA completion block */
6589         printk("DEBUG: DMAC_MODE[%08x]\n",
6590                tr32(DMAC_MODE));
6591
6592         /* GRC block */
6593         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6594                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6595         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6596                tr32(GRC_LOCAL_CTRL));
6597
6598         /* TG3_BDINFOs */
6599         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6600                tr32(RCVDBDI_JUMBO_BD + 0x0),
6601                tr32(RCVDBDI_JUMBO_BD + 0x4),
6602                tr32(RCVDBDI_JUMBO_BD + 0x8),
6603                tr32(RCVDBDI_JUMBO_BD + 0xc));
6604         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6605                tr32(RCVDBDI_STD_BD + 0x0),
6606                tr32(RCVDBDI_STD_BD + 0x4),
6607                tr32(RCVDBDI_STD_BD + 0x8),
6608                tr32(RCVDBDI_STD_BD + 0xc));
6609         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6610                tr32(RCVDBDI_MINI_BD + 0x0),
6611                tr32(RCVDBDI_MINI_BD + 0x4),
6612                tr32(RCVDBDI_MINI_BD + 0x8),
6613                tr32(RCVDBDI_MINI_BD + 0xc));
6614
6615         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6616         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6617         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6618         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6619         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6620                val32, val32_2, val32_3, val32_4);
6621
6622         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6623         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6624         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6625         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6626         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6627                val32, val32_2, val32_3, val32_4);
6628
6629         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6630         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6631         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6632         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6633         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6634         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6635                val32, val32_2, val32_3, val32_4, val32_5);
6636
6637         /* SW status block */
6638         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6639                tp->hw_status->status,
6640                tp->hw_status->status_tag,
6641                tp->hw_status->rx_jumbo_consumer,
6642                tp->hw_status->rx_consumer,
6643                tp->hw_status->rx_mini_consumer,
6644                tp->hw_status->idx[0].rx_producer,
6645                tp->hw_status->idx[0].tx_consumer);
6646
6647         /* SW statistics block */
6648         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6649                ((u32 *)tp->hw_stats)[0],
6650                ((u32 *)tp->hw_stats)[1],
6651                ((u32 *)tp->hw_stats)[2],
6652                ((u32 *)tp->hw_stats)[3]);
6653
6654         /* Mailboxes */
6655         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6656                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6657                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6658                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6659                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6660
6661         /* NIC side send descriptors. */
6662         for (i = 0; i < 6; i++) {
6663                 unsigned long txd;
6664
6665                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6666                         + (i * sizeof(struct tg3_tx_buffer_desc));
6667                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6668                        i,
6669                        readl(txd + 0x0), readl(txd + 0x4),
6670                        readl(txd + 0x8), readl(txd + 0xc));
6671         }
6672
6673         /* NIC side RX descriptors. */
6674         for (i = 0; i < 6; i++) {
6675                 unsigned long rxd;
6676
6677                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6678                         + (i * sizeof(struct tg3_rx_buffer_desc));
6679                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6680                        i,
6681                        readl(rxd + 0x0), readl(rxd + 0x4),
6682                        readl(rxd + 0x8), readl(rxd + 0xc));
6683                 rxd += (4 * sizeof(u32));
6684                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6685                        i,
6686                        readl(rxd + 0x0), readl(rxd + 0x4),
6687                        readl(rxd + 0x8), readl(rxd + 0xc));
6688         }
6689
6690         for (i = 0; i < 6; i++) {
6691                 unsigned long rxd;
6692
6693                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6694                         + (i * sizeof(struct tg3_rx_buffer_desc));
6695                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6696                        i,
6697                        readl(rxd + 0x0), readl(rxd + 0x4),
6698                        readl(rxd + 0x8), readl(rxd + 0xc));
6699                 rxd += (4 * sizeof(u32));
6700                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6701                        i,
6702                        readl(rxd + 0x0), readl(rxd + 0x4),
6703                        readl(rxd + 0x8), readl(rxd + 0xc));
6704         }
6705 }
6706 #endif
6707
6708 static struct net_device_stats *tg3_get_stats(struct net_device *);
6709 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6710
6711 static int tg3_close(struct net_device *dev)
6712 {
6713         struct tg3 *tp = netdev_priv(dev);
6714
6715         netif_stop_queue(dev);
6716
6717         del_timer_sync(&tp->timer);
6718
6719         tg3_full_lock(tp, 1);
6720 #if 0
6721         tg3_dump_state(tp);
6722 #endif
6723
6724         tg3_disable_ints(tp);
6725
6726         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6727         tg3_free_rings(tp);
6728         tp->tg3_flags &=
6729                 ~(TG3_FLAG_INIT_COMPLETE |
6730                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6731         netif_carrier_off(tp->dev);
6732
6733         tg3_full_unlock(tp);
6734
6735         free_irq(tp->pdev->irq, dev);
6736         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6737                 pci_disable_msi(tp->pdev);
6738                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6739         }
6740
6741         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6742                sizeof(tp->net_stats_prev));
6743         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6744                sizeof(tp->estats_prev));
6745
6746         tg3_free_consistent(tp);
6747
6748         return 0;
6749 }
6750
6751 static inline unsigned long get_stat64(tg3_stat64_t *val)
6752 {
6753         unsigned long ret;
6754
6755 #if (BITS_PER_LONG == 32)
6756         ret = val->low;
6757 #else
6758         ret = ((u64)val->high << 32) | ((u64)val->low);
6759 #endif
6760         return ret;
6761 }
6762
6763 static unsigned long calc_crc_errors(struct tg3 *tp)
6764 {
6765         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6766
6767         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6768             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6769              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6770                 u32 val;
6771
6772                 spin_lock_bh(&tp->lock);
6773                 if (!tg3_readphy(tp, 0x1e, &val)) {
6774                         tg3_writephy(tp, 0x1e, val | 0x8000);
6775                         tg3_readphy(tp, 0x14, &val);
6776                 } else
6777                         val = 0;
6778                 spin_unlock_bh(&tp->lock);
6779
6780                 tp->phy_crc_errors += val;
6781
6782                 return tp->phy_crc_errors;
6783         }
6784
6785         return get_stat64(&hw_stats->rx_fcs_errors);
6786 }
6787
6788 #define ESTAT_ADD(member) \
6789         estats->member =        old_estats->member + \
6790                                 get_stat64(&hw_stats->member)
6791
6792 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6793 {
6794         struct tg3_ethtool_stats *estats = &tp->estats;
6795         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6796         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6797
6798         if (!hw_stats)
6799                 return old_estats;
6800
6801         ESTAT_ADD(rx_octets);
6802         ESTAT_ADD(rx_fragments);
6803         ESTAT_ADD(rx_ucast_packets);
6804         ESTAT_ADD(rx_mcast_packets);
6805         ESTAT_ADD(rx_bcast_packets);
6806         ESTAT_ADD(rx_fcs_errors);
6807         ESTAT_ADD(rx_align_errors);
6808         ESTAT_ADD(rx_xon_pause_rcvd);
6809         ESTAT_ADD(rx_xoff_pause_rcvd);
6810         ESTAT_ADD(rx_mac_ctrl_rcvd);
6811         ESTAT_ADD(rx_xoff_entered);
6812         ESTAT_ADD(rx_frame_too_long_errors);
6813         ESTAT_ADD(rx_jabbers);
6814         ESTAT_ADD(rx_undersize_packets);
6815         ESTAT_ADD(rx_in_length_errors);
6816         ESTAT_ADD(rx_out_length_errors);
6817         ESTAT_ADD(rx_64_or_less_octet_packets);
6818         ESTAT_ADD(rx_65_to_127_octet_packets);
6819         ESTAT_ADD(rx_128_to_255_octet_packets);
6820         ESTAT_ADD(rx_256_to_511_octet_packets);
6821         ESTAT_ADD(rx_512_to_1023_octet_packets);
6822         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6823         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6824         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6825         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6826         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6827
6828         ESTAT_ADD(tx_octets);
6829         ESTAT_ADD(tx_collisions);
6830         ESTAT_ADD(tx_xon_sent);
6831         ESTAT_ADD(tx_xoff_sent);
6832         ESTAT_ADD(tx_flow_control);
6833         ESTAT_ADD(tx_mac_errors);
6834         ESTAT_ADD(tx_single_collisions);
6835         ESTAT_ADD(tx_mult_collisions);
6836         ESTAT_ADD(tx_deferred);
6837         ESTAT_ADD(tx_excessive_collisions);
6838         ESTAT_ADD(tx_late_collisions);
6839         ESTAT_ADD(tx_collide_2times);
6840         ESTAT_ADD(tx_collide_3times);
6841         ESTAT_ADD(tx_collide_4times);
6842         ESTAT_ADD(tx_collide_5times);
6843         ESTAT_ADD(tx_collide_6times);
6844         ESTAT_ADD(tx_collide_7times);
6845         ESTAT_ADD(tx_collide_8times);
6846         ESTAT_ADD(tx_collide_9times);
6847         ESTAT_ADD(tx_collide_10times);
6848         ESTAT_ADD(tx_collide_11times);
6849         ESTAT_ADD(tx_collide_12times);
6850         ESTAT_ADD(tx_collide_13times);
6851         ESTAT_ADD(tx_collide_14times);
6852         ESTAT_ADD(tx_collide_15times);
6853         ESTAT_ADD(tx_ucast_packets);
6854         ESTAT_ADD(tx_mcast_packets);
6855         ESTAT_ADD(tx_bcast_packets);
6856         ESTAT_ADD(tx_carrier_sense_errors);
6857         ESTAT_ADD(tx_discards);
6858         ESTAT_ADD(tx_errors);
6859
6860         ESTAT_ADD(dma_writeq_full);
6861         ESTAT_ADD(dma_write_prioq_full);
6862         ESTAT_ADD(rxbds_empty);
6863         ESTAT_ADD(rx_discards);
6864         ESTAT_ADD(rx_errors);
6865         ESTAT_ADD(rx_threshold_hit);
6866
6867         ESTAT_ADD(dma_readq_full);
6868         ESTAT_ADD(dma_read_prioq_full);
6869         ESTAT_ADD(tx_comp_queue_full);
6870
6871         ESTAT_ADD(ring_set_send_prod_index);
6872         ESTAT_ADD(ring_status_update);
6873         ESTAT_ADD(nic_irqs);
6874         ESTAT_ADD(nic_avoided_irqs);
6875         ESTAT_ADD(nic_tx_threshold_hit);
6876
6877         return estats;
6878 }
6879
6880 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6881 {
6882         struct tg3 *tp = netdev_priv(dev);
6883         struct net_device_stats *stats = &tp->net_stats;
6884         struct net_device_stats *old_stats = &tp->net_stats_prev;
6885         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6886
6887         if (!hw_stats)
6888                 return old_stats;
6889
6890         stats->rx_packets = old_stats->rx_packets +
6891                 get_stat64(&hw_stats->rx_ucast_packets) +
6892                 get_stat64(&hw_stats->rx_mcast_packets) +
6893                 get_stat64(&hw_stats->rx_bcast_packets);
6894                 
6895         stats->tx_packets = old_stats->tx_packets +
6896                 get_stat64(&hw_stats->tx_ucast_packets) +
6897                 get_stat64(&hw_stats->tx_mcast_packets) +
6898                 get_stat64(&hw_stats->tx_bcast_packets);
6899
6900         stats->rx_bytes = old_stats->rx_bytes +
6901                 get_stat64(&hw_stats->rx_octets);
6902         stats->tx_bytes = old_stats->tx_bytes +
6903                 get_stat64(&hw_stats->tx_octets);
6904
6905         stats->rx_errors = old_stats->rx_errors +
6906                 get_stat64(&hw_stats->rx_errors);
6907         stats->tx_errors = old_stats->tx_errors +
6908                 get_stat64(&hw_stats->tx_errors) +
6909                 get_stat64(&hw_stats->tx_mac_errors) +
6910                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6911                 get_stat64(&hw_stats->tx_discards);
6912
6913         stats->multicast = old_stats->multicast +
6914                 get_stat64(&hw_stats->rx_mcast_packets);
6915         stats->collisions = old_stats->collisions +
6916                 get_stat64(&hw_stats->tx_collisions);
6917
6918         stats->rx_length_errors = old_stats->rx_length_errors +
6919                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6920                 get_stat64(&hw_stats->rx_undersize_packets);
6921
6922         stats->rx_over_errors = old_stats->rx_over_errors +
6923                 get_stat64(&hw_stats->rxbds_empty);
6924         stats->rx_frame_errors = old_stats->rx_frame_errors +
6925                 get_stat64(&hw_stats->rx_align_errors);
6926         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6927                 get_stat64(&hw_stats->tx_discards);
6928         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6929                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6930
6931         stats->rx_crc_errors = old_stats->rx_crc_errors +
6932                 calc_crc_errors(tp);
6933
6934         stats->rx_missed_errors = old_stats->rx_missed_errors +
6935                 get_stat64(&hw_stats->rx_discards);
6936
6937         return stats;
6938 }
6939
6940 static inline u32 calc_crc(unsigned char *buf, int len)
6941 {
6942         u32 reg;
6943         u32 tmp;
6944         int j, k;
6945
6946         reg = 0xffffffff;
6947
6948         for (j = 0; j < len; j++) {
6949                 reg ^= buf[j];
6950
6951                 for (k = 0; k < 8; k++) {
6952                         tmp = reg & 0x01;
6953
6954                         reg >>= 1;
6955
6956                         if (tmp) {
6957                                 reg ^= 0xedb88320;
6958                         }
6959                 }
6960         }
6961
6962         return ~reg;
6963 }
6964
6965 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6966 {
6967         /* accept or reject all multicast frames */
6968         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6969         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6970         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6971         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6972 }
6973
6974 static void __tg3_set_rx_mode(struct net_device *dev)
6975 {
6976         struct tg3 *tp = netdev_priv(dev);
6977         u32 rx_mode;
6978
6979         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6980                                   RX_MODE_KEEP_VLAN_TAG);
6981
6982         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6983          * flag clear.
6984          */
6985 #if TG3_VLAN_TAG_USED
6986         if (!tp->vlgrp &&
6987             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6988                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6989 #else
6990         /* By definition, VLAN is disabled always in this
6991          * case.
6992          */
6993         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6994                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6995 #endif
6996
6997         if (dev->flags & IFF_PROMISC) {
6998                 /* Promiscuous mode. */
6999                 rx_mode |= RX_MODE_PROMISC;
7000         } else if (dev->flags & IFF_ALLMULTI) {
7001                 /* Accept all multicast. */
7002                 tg3_set_multi (tp, 1);
7003         } else if (dev->mc_count < 1) {
7004                 /* Reject all multicast. */
7005                 tg3_set_multi (tp, 0);
7006         } else {
7007                 /* Accept one or more multicast(s). */
7008                 struct dev_mc_list *mclist;
7009                 unsigned int i;
7010                 u32 mc_filter[4] = { 0, };
7011                 u32 regidx;
7012                 u32 bit;
7013                 u32 crc;
7014
7015                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7016                      i++, mclist = mclist->next) {
7017
7018                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7019                         bit = ~crc & 0x7f;
7020                         regidx = (bit & 0x60) >> 5;
7021                         bit &= 0x1f;
7022                         mc_filter[regidx] |= (1 << bit);
7023                 }
7024
7025                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7026                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7027                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7028                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7029         }
7030
7031         if (rx_mode != tp->rx_mode) {
7032                 tp->rx_mode = rx_mode;
7033                 tw32_f(MAC_RX_MODE, rx_mode);
7034                 udelay(10);
7035         }
7036 }
7037
7038 static void tg3_set_rx_mode(struct net_device *dev)
7039 {
7040         struct tg3 *tp = netdev_priv(dev);
7041
7042         tg3_full_lock(tp, 0);
7043         __tg3_set_rx_mode(dev);
7044         tg3_full_unlock(tp);
7045 }
7046
7047 #define TG3_REGDUMP_LEN         (32 * 1024)
7048
7049 static int tg3_get_regs_len(struct net_device *dev)
7050 {
7051         return TG3_REGDUMP_LEN;
7052 }
7053
7054 static void tg3_get_regs(struct net_device *dev,
7055                 struct ethtool_regs *regs, void *_p)
7056 {
7057         u32 *p = _p;
7058         struct tg3 *tp = netdev_priv(dev);
7059         u8 *orig_p = _p;
7060         int i;
7061
7062         regs->version = 0;
7063
7064         memset(p, 0, TG3_REGDUMP_LEN);
7065
7066         tg3_full_lock(tp, 0);
7067
7068 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7069 #define GET_REG32_LOOP(base,len)                \
7070 do {    p = (u32 *)(orig_p + (base));           \
7071         for (i = 0; i < len; i += 4)            \
7072                 __GET_REG32((base) + i);        \
7073 } while (0)
7074 #define GET_REG32_1(reg)                        \
7075 do {    p = (u32 *)(orig_p + (reg));            \
7076         __GET_REG32((reg));                     \
7077 } while (0)
7078
7079         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7080         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7081         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7082         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7083         GET_REG32_1(SNDDATAC_MODE);
7084         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7085         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7086         GET_REG32_1(SNDBDC_MODE);
7087         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7088         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7089         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7090         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7091         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7092         GET_REG32_1(RCVDCC_MODE);
7093         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7094         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7095         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7096         GET_REG32_1(MBFREE_MODE);
7097         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7098         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7099         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7100         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7101         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7102         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
7103         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
7104         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7105         GET_REG32_LOOP(FTQ_RESET, 0x120);
7106         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7107         GET_REG32_1(DMAC_MODE);
7108         GET_REG32_LOOP(GRC_MODE, 0x4c);
7109         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7110                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7111
7112 #undef __GET_REG32
7113 #undef GET_REG32_LOOP
7114 #undef GET_REG32_1
7115
7116         tg3_full_unlock(tp);
7117 }
7118
7119 static int tg3_get_eeprom_len(struct net_device *dev)
7120 {
7121         struct tg3 *tp = netdev_priv(dev);
7122
7123         return tp->nvram_size;
7124 }
7125
7126 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7127
7128 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7129 {
7130         struct tg3 *tp = netdev_priv(dev);
7131         int ret;
7132         u8  *pd;
7133         u32 i, offset, len, val, b_offset, b_count;
7134
7135         offset = eeprom->offset;
7136         len = eeprom->len;
7137         eeprom->len = 0;
7138
7139         eeprom->magic = TG3_EEPROM_MAGIC;
7140
7141         if (offset & 3) {
7142                 /* adjustments to start on required 4 byte boundary */
7143                 b_offset = offset & 3;
7144                 b_count = 4 - b_offset;
7145                 if (b_count > len) {
7146                         /* i.e. offset=1 len=2 */
7147                         b_count = len;
7148                 }
7149                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7150                 if (ret)
7151                         return ret;
7152                 val = cpu_to_le32(val);
7153                 memcpy(data, ((char*)&val) + b_offset, b_count);
7154                 len -= b_count;
7155                 offset += b_count;
7156                 eeprom->len += b_count;
7157         }
7158
7159         /* read bytes upto the last 4 byte boundary */
7160         pd = &data[eeprom->len];
7161         for (i = 0; i < (len - (len & 3)); i += 4) {
7162                 ret = tg3_nvram_read(tp, offset + i, &val);
7163                 if (ret) {
7164                         eeprom->len += i;
7165                         return ret;
7166                 }
7167                 val = cpu_to_le32(val);
7168                 memcpy(pd + i, &val, 4);
7169         }
7170         eeprom->len += i;
7171
7172         if (len & 3) {
7173                 /* read last bytes not ending on 4 byte boundary */
7174                 pd = &data[eeprom->len];
7175                 b_count = len & 3;
7176                 b_offset = offset + len - b_count;
7177                 ret = tg3_nvram_read(tp, b_offset, &val);
7178                 if (ret)
7179                         return ret;
7180                 val = cpu_to_le32(val);
7181                 memcpy(pd, ((char*)&val), b_count);
7182                 eeprom->len += b_count;
7183         }
7184         return 0;
7185 }
7186
7187 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7188
7189 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7190 {
7191         struct tg3 *tp = netdev_priv(dev);
7192         int ret;
7193         u32 offset, len, b_offset, odd_len, start, end;
7194         u8 *buf;
7195
7196         if (eeprom->magic != TG3_EEPROM_MAGIC)
7197                 return -EINVAL;
7198
7199         offset = eeprom->offset;
7200         len = eeprom->len;
7201
7202         if ((b_offset = (offset & 3))) {
7203                 /* adjustments to start on required 4 byte boundary */
7204                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7205                 if (ret)
7206                         return ret;
7207                 start = cpu_to_le32(start);
7208                 len += b_offset;
7209                 offset &= ~3;
7210                 if (len < 4)
7211                         len = 4;
7212         }
7213
7214         odd_len = 0;
7215         if (len & 3) {
7216                 /* adjustments to end on required 4 byte boundary */
7217                 odd_len = 1;
7218                 len = (len + 3) & ~3;
7219                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7220                 if (ret)
7221                         return ret;
7222                 end = cpu_to_le32(end);
7223         }
7224
7225         buf = data;
7226         if (b_offset || odd_len) {
7227                 buf = kmalloc(len, GFP_KERNEL);
7228                 if (buf == 0)
7229                         return -ENOMEM;
7230                 if (b_offset)
7231                         memcpy(buf, &start, 4);
7232                 if (odd_len)
7233                         memcpy(buf+len-4, &end, 4);
7234                 memcpy(buf + b_offset, data, eeprom->len);
7235         }
7236
7237         ret = tg3_nvram_write_block(tp, offset, len, buf);
7238
7239         if (buf != data)
7240                 kfree(buf);
7241
7242         return ret;
7243 }
7244
7245 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7246 {
7247         struct tg3 *tp = netdev_priv(dev);
7248   
7249         cmd->supported = (SUPPORTED_Autoneg);
7250
7251         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7252                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7253                                    SUPPORTED_1000baseT_Full);
7254
7255         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
7256                 cmd->supported |= (SUPPORTED_100baseT_Half |
7257                                   SUPPORTED_100baseT_Full |
7258                                   SUPPORTED_10baseT_Half |
7259                                   SUPPORTED_10baseT_Full |
7260                                   SUPPORTED_MII);
7261         else
7262                 cmd->supported |= SUPPORTED_FIBRE;
7263   
7264         cmd->advertising = tp->link_config.advertising;
7265         if (netif_running(dev)) {
7266                 cmd->speed = tp->link_config.active_speed;
7267                 cmd->duplex = tp->link_config.active_duplex;
7268         }
7269         cmd->port = 0;
7270         cmd->phy_address = PHY_ADDR;
7271         cmd->transceiver = 0;
7272         cmd->autoneg = tp->link_config.autoneg;
7273         cmd->maxtxpkt = 0;
7274         cmd->maxrxpkt = 0;
7275         return 0;
7276 }
7277   
7278 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7279 {
7280         struct tg3 *tp = netdev_priv(dev);
7281   
7282         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 
7283                 /* These are the only valid advertisement bits allowed.  */
7284                 if (cmd->autoneg == AUTONEG_ENABLE &&
7285                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7286                                           ADVERTISED_1000baseT_Full |
7287                                           ADVERTISED_Autoneg |
7288                                           ADVERTISED_FIBRE)))
7289                         return -EINVAL;
7290                 /* Fiber can only do SPEED_1000.  */
7291                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7292                          (cmd->speed != SPEED_1000))
7293                         return -EINVAL;
7294         /* Copper cannot force SPEED_1000.  */
7295         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7296                    (cmd->speed == SPEED_1000))
7297                 return -EINVAL;
7298         else if ((cmd->speed == SPEED_1000) &&
7299                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7300                 return -EINVAL;
7301
7302         tg3_full_lock(tp, 0);
7303
7304         tp->link_config.autoneg = cmd->autoneg;
7305         if (cmd->autoneg == AUTONEG_ENABLE) {
7306                 tp->link_config.advertising = cmd->advertising;
7307                 tp->link_config.speed = SPEED_INVALID;
7308                 tp->link_config.duplex = DUPLEX_INVALID;
7309         } else {
7310                 tp->link_config.advertising = 0;
7311                 tp->link_config.speed = cmd->speed;
7312                 tp->link_config.duplex = cmd->duplex;
7313         }
7314   
7315         if (netif_running(dev))
7316                 tg3_setup_phy(tp, 1);
7317
7318         tg3_full_unlock(tp);
7319   
7320         return 0;
7321 }
7322   
7323 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7324 {
7325         struct tg3 *tp = netdev_priv(dev);
7326   
7327         strcpy(info->driver, DRV_MODULE_NAME);
7328         strcpy(info->version, DRV_MODULE_VERSION);
7329         strcpy(info->bus_info, pci_name(tp->pdev));
7330 }
7331   
7332 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7333 {
7334         struct tg3 *tp = netdev_priv(dev);
7335   
7336         wol->supported = WAKE_MAGIC;
7337         wol->wolopts = 0;
7338         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7339                 wol->wolopts = WAKE_MAGIC;
7340         memset(&wol->sopass, 0, sizeof(wol->sopass));
7341 }
7342   
7343 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7344 {
7345         struct tg3 *tp = netdev_priv(dev);
7346   
7347         if (wol->wolopts & ~WAKE_MAGIC)
7348                 return -EINVAL;
7349         if ((wol->wolopts & WAKE_MAGIC) &&
7350             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7351             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7352                 return -EINVAL;
7353   
7354         spin_lock_bh(&tp->lock);
7355         if (wol->wolopts & WAKE_MAGIC)
7356                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7357         else
7358                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7359         spin_unlock_bh(&tp->lock);
7360   
7361         return 0;
7362 }
7363   
7364 static u32 tg3_get_msglevel(struct net_device *dev)
7365 {
7366         struct tg3 *tp = netdev_priv(dev);
7367         return tp->msg_enable;
7368 }
7369   
7370 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7371 {
7372         struct tg3 *tp = netdev_priv(dev);
7373         tp->msg_enable = value;
7374 }
7375   
7376 #if TG3_TSO_SUPPORT != 0
7377 static int tg3_set_tso(struct net_device *dev, u32 value)
7378 {
7379         struct tg3 *tp = netdev_priv(dev);
7380
7381         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7382                 if (value)
7383                         return -EINVAL;
7384                 return 0;
7385         }
7386         return ethtool_op_set_tso(dev, value);
7387 }
7388 #endif
7389   
7390 static int tg3_nway_reset(struct net_device *dev)
7391 {
7392         struct tg3 *tp = netdev_priv(dev);
7393         u32 bmcr;
7394         int r;
7395   
7396         if (!netif_running(dev))
7397                 return -EAGAIN;
7398
7399         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7400                 return -EINVAL;
7401
7402         spin_lock_bh(&tp->lock);
7403         r = -EINVAL;
7404         tg3_readphy(tp, MII_BMCR, &bmcr);
7405         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7406             ((bmcr & BMCR_ANENABLE) ||
7407              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7408                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7409                                            BMCR_ANENABLE);
7410                 r = 0;
7411         }
7412         spin_unlock_bh(&tp->lock);
7413   
7414         return r;
7415 }
7416   
7417 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7418 {
7419         struct tg3 *tp = netdev_priv(dev);
7420   
7421         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7422         ering->rx_mini_max_pending = 0;
7423         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7424
7425         ering->rx_pending = tp->rx_pending;
7426         ering->rx_mini_pending = 0;
7427         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7428         ering->tx_pending = tp->tx_pending;
7429 }
7430   
7431 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7432 {
7433         struct tg3 *tp = netdev_priv(dev);
7434         int irq_sync = 0;
7435   
7436         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7437             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7438             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7439                 return -EINVAL;
7440   
7441         if (netif_running(dev)) {
7442                 tg3_netif_stop(tp);
7443                 irq_sync = 1;
7444         }
7445
7446         tg3_full_lock(tp, irq_sync);
7447   
7448         tp->rx_pending = ering->rx_pending;
7449
7450         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7451             tp->rx_pending > 63)
7452                 tp->rx_pending = 63;
7453         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7454         tp->tx_pending = ering->tx_pending;
7455
7456         if (netif_running(dev)) {
7457                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7458                 tg3_init_hw(tp);
7459                 tg3_netif_start(tp);
7460         }
7461
7462         tg3_full_unlock(tp);
7463   
7464         return 0;
7465 }
7466   
7467 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7468 {
7469         struct tg3 *tp = netdev_priv(dev);
7470   
7471         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7472         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7473         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7474 }
7475   
7476 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7477 {
7478         struct tg3 *tp = netdev_priv(dev);
7479         int irq_sync = 0;
7480   
7481         if (netif_running(dev)) {
7482                 tg3_netif_stop(tp);
7483                 irq_sync = 1;
7484         }
7485
7486         tg3_full_lock(tp, irq_sync);
7487
7488         if (epause->autoneg)
7489                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7490         else
7491                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7492         if (epause->rx_pause)
7493                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7494         else
7495                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7496         if (epause->tx_pause)
7497                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7498         else
7499                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7500
7501         if (netif_running(dev)) {
7502                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7503                 tg3_init_hw(tp);
7504                 tg3_netif_start(tp);
7505         }
7506
7507         tg3_full_unlock(tp);
7508   
7509         return 0;
7510 }
7511   
7512 static u32 tg3_get_rx_csum(struct net_device *dev)
7513 {
7514         struct tg3 *tp = netdev_priv(dev);
7515         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7516 }
7517   
7518 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7519 {
7520         struct tg3 *tp = netdev_priv(dev);
7521   
7522         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7523                 if (data != 0)
7524                         return -EINVAL;
7525                 return 0;
7526         }
7527   
7528         spin_lock_bh(&tp->lock);
7529         if (data)
7530                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7531         else
7532                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7533         spin_unlock_bh(&tp->lock);
7534   
7535         return 0;
7536 }
7537   
7538 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7539 {
7540         struct tg3 *tp = netdev_priv(dev);
7541   
7542         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7543                 if (data != 0)
7544                         return -EINVAL;
7545                 return 0;
7546         }
7547   
7548         if (data)
7549                 dev->features |= NETIF_F_IP_CSUM;
7550         else
7551                 dev->features &= ~NETIF_F_IP_CSUM;
7552
7553         return 0;
7554 }
7555
7556 static int tg3_get_stats_count (struct net_device *dev)
7557 {
7558         return TG3_NUM_STATS;
7559 }
7560
7561 static int tg3_get_test_count (struct net_device *dev)
7562 {
7563         return TG3_NUM_TEST;
7564 }
7565
7566 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7567 {
7568         switch (stringset) {
7569         case ETH_SS_STATS:
7570                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7571                 break;
7572         case ETH_SS_TEST:
7573                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7574                 break;
7575         default:
7576                 WARN_ON(1);     /* we need a WARN() */
7577                 break;
7578         }
7579 }
7580
7581 static int tg3_phys_id(struct net_device *dev, u32 data)
7582 {
7583         struct tg3 *tp = netdev_priv(dev);
7584         int i;
7585
7586         if (!netif_running(tp->dev))
7587                 return -EAGAIN;
7588
7589         if (data == 0)
7590                 data = 2;
7591
7592         for (i = 0; i < (data * 2); i++) {
7593                 if ((i % 2) == 0)
7594                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7595                                            LED_CTRL_1000MBPS_ON |
7596                                            LED_CTRL_100MBPS_ON |
7597                                            LED_CTRL_10MBPS_ON |
7598                                            LED_CTRL_TRAFFIC_OVERRIDE |
7599                                            LED_CTRL_TRAFFIC_BLINK |
7600                                            LED_CTRL_TRAFFIC_LED);
7601         
7602                 else
7603                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7604                                            LED_CTRL_TRAFFIC_OVERRIDE);
7605
7606                 if (msleep_interruptible(500))
7607                         break;
7608         }
7609         tw32(MAC_LED_CTRL, tp->led_ctrl);
7610         return 0;
7611 }
7612
7613 static void tg3_get_ethtool_stats (struct net_device *dev,
7614                                    struct ethtool_stats *estats, u64 *tmp_stats)
7615 {
7616         struct tg3 *tp = netdev_priv(dev);
7617         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7618 }
7619
7620 #define NVRAM_TEST_SIZE 0x100
7621
7622 static int tg3_test_nvram(struct tg3 *tp)
7623 {
7624         u32 *buf, csum;
7625         int i, j, err = 0;
7626
7627         buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7628         if (buf == NULL)
7629                 return -ENOMEM;
7630
7631         for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7632                 u32 val;
7633
7634                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7635                         break;
7636                 buf[j] = cpu_to_le32(val);
7637         }
7638         if (i < NVRAM_TEST_SIZE)
7639                 goto out;
7640
7641         err = -EIO;
7642         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7643                 goto out;
7644
7645         /* Bootstrap checksum at offset 0x10 */
7646         csum = calc_crc((unsigned char *) buf, 0x10);
7647         if(csum != cpu_to_le32(buf[0x10/4]))
7648                 goto out;
7649
7650         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7651         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7652         if (csum != cpu_to_le32(buf[0xfc/4]))
7653                  goto out;
7654
7655         err = 0;
7656
7657 out:
7658         kfree(buf);
7659         return err;
7660 }
7661
7662 #define TG3_SERDES_TIMEOUT_SEC  2
7663 #define TG3_COPPER_TIMEOUT_SEC  6
7664
7665 static int tg3_test_link(struct tg3 *tp)
7666 {
7667         int i, max;
7668
7669         if (!netif_running(tp->dev))
7670                 return -ENODEV;
7671
7672         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
7673                 max = TG3_SERDES_TIMEOUT_SEC;
7674         else
7675                 max = TG3_COPPER_TIMEOUT_SEC;
7676
7677         for (i = 0; i < max; i++) {
7678                 if (netif_carrier_ok(tp->dev))
7679                         return 0;
7680
7681                 if (msleep_interruptible(1000))
7682                         break;
7683         }
7684
7685         return -EIO;
7686 }
7687
7688 /* Only test the commonly used registers */
7689 static int tg3_test_registers(struct tg3 *tp)
7690 {
7691         int i, is_5705;
7692         u32 offset, read_mask, write_mask, val, save_val, read_val;
7693         static struct {
7694                 u16 offset;
7695                 u16 flags;
7696 #define TG3_FL_5705     0x1
7697 #define TG3_FL_NOT_5705 0x2
7698 #define TG3_FL_NOT_5788 0x4
7699                 u32 read_mask;
7700                 u32 write_mask;
7701         } reg_tbl[] = {
7702                 /* MAC Control Registers */
7703                 { MAC_MODE, TG3_FL_NOT_5705,
7704                         0x00000000, 0x00ef6f8c },
7705                 { MAC_MODE, TG3_FL_5705,
7706                         0x00000000, 0x01ef6b8c },
7707                 { MAC_STATUS, TG3_FL_NOT_5705,
7708                         0x03800107, 0x00000000 },
7709                 { MAC_STATUS, TG3_FL_5705,
7710                         0x03800100, 0x00000000 },
7711                 { MAC_ADDR_0_HIGH, 0x0000,
7712                         0x00000000, 0x0000ffff },
7713                 { MAC_ADDR_0_LOW, 0x0000,
7714                         0x00000000, 0xffffffff },
7715                 { MAC_RX_MTU_SIZE, 0x0000,
7716                         0x00000000, 0x0000ffff },
7717                 { MAC_TX_MODE, 0x0000,
7718                         0x00000000, 0x00000070 },
7719                 { MAC_TX_LENGTHS, 0x0000,
7720                         0x00000000, 0x00003fff },
7721                 { MAC_RX_MODE, TG3_FL_NOT_5705,
7722                         0x00000000, 0x000007fc },
7723                 { MAC_RX_MODE, TG3_FL_5705,
7724                         0x00000000, 0x000007dc },
7725                 { MAC_HASH_REG_0, 0x0000,
7726                         0x00000000, 0xffffffff },
7727                 { MAC_HASH_REG_1, 0x0000,
7728                         0x00000000, 0xffffffff },
7729                 { MAC_HASH_REG_2, 0x0000,
7730                         0x00000000, 0xffffffff },
7731                 { MAC_HASH_REG_3, 0x0000,
7732                         0x00000000, 0xffffffff },
7733
7734                 /* Receive Data and Receive BD Initiator Control Registers. */
7735                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7736                         0x00000000, 0xffffffff },
7737                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7738                         0x00000000, 0xffffffff },
7739                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7740                         0x00000000, 0x00000003 },
7741                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7742                         0x00000000, 0xffffffff },
7743                 { RCVDBDI_STD_BD+0, 0x0000,
7744                         0x00000000, 0xffffffff },
7745                 { RCVDBDI_STD_BD+4, 0x0000,
7746                         0x00000000, 0xffffffff },
7747                 { RCVDBDI_STD_BD+8, 0x0000,
7748                         0x00000000, 0xffff0002 },
7749                 { RCVDBDI_STD_BD+0xc, 0x0000,
7750                         0x00000000, 0xffffffff },
7751         
7752                 /* Receive BD Initiator Control Registers. */
7753                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7754                         0x00000000, 0xffffffff },
7755                 { RCVBDI_STD_THRESH, TG3_FL_5705,
7756                         0x00000000, 0x000003ff },
7757                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7758                         0x00000000, 0xffffffff },
7759         
7760                 /* Host Coalescing Control Registers. */
7761                 { HOSTCC_MODE, TG3_FL_NOT_5705,
7762                         0x00000000, 0x00000004 },
7763                 { HOSTCC_MODE, TG3_FL_5705,
7764                         0x00000000, 0x000000f6 },
7765                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7766                         0x00000000, 0xffffffff },
7767                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7768                         0x00000000, 0x000003ff },
7769                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7770                         0x00000000, 0xffffffff },
7771                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7772                         0x00000000, 0x000003ff },
7773                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7774                         0x00000000, 0xffffffff },
7775                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7776                         0x00000000, 0x000000ff },
7777                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7778                         0x00000000, 0xffffffff },
7779                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7780                         0x00000000, 0x000000ff },
7781                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7782                         0x00000000, 0xffffffff },
7783                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7784                         0x00000000, 0xffffffff },
7785                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7786                         0x00000000, 0xffffffff },
7787                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7788                         0x00000000, 0x000000ff },
7789                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7790                         0x00000000, 0xffffffff },
7791                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7792                         0x00000000, 0x000000ff },
7793                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7794                         0x00000000, 0xffffffff },
7795                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7796                         0x00000000, 0xffffffff },
7797                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7798                         0x00000000, 0xffffffff },
7799                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7800                         0x00000000, 0xffffffff },
7801                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7802                         0x00000000, 0xffffffff },
7803                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7804                         0xffffffff, 0x00000000 },
7805                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7806                         0xffffffff, 0x00000000 },
7807
7808                 /* Buffer Manager Control Registers. */
7809                 { BUFMGR_MB_POOL_ADDR, 0x0000,
7810                         0x00000000, 0x007fff80 },
7811                 { BUFMGR_MB_POOL_SIZE, 0x0000,
7812                         0x00000000, 0x007fffff },
7813                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7814                         0x00000000, 0x0000003f },
7815                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7816                         0x00000000, 0x000001ff },
7817                 { BUFMGR_MB_HIGH_WATER, 0x0000,
7818                         0x00000000, 0x000001ff },
7819                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7820                         0xffffffff, 0x00000000 },
7821                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7822                         0xffffffff, 0x00000000 },
7823         
7824                 /* Mailbox Registers */
7825                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7826                         0x00000000, 0x000001ff },
7827                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7828                         0x00000000, 0x000001ff },
7829                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7830                         0x00000000, 0x000007ff },
7831                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7832                         0x00000000, 0x000001ff },
7833
7834                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7835         };
7836
7837         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7838                 is_5705 = 1;
7839         else
7840                 is_5705 = 0;
7841
7842         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
7843                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
7844                         continue;
7845
7846                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
7847                         continue;
7848
7849                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7850                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
7851                         continue;
7852
7853                 offset = (u32) reg_tbl[i].offset;
7854                 read_mask = reg_tbl[i].read_mask;
7855                 write_mask = reg_tbl[i].write_mask;
7856
7857                 /* Save the original register content */
7858                 save_val = tr32(offset);
7859
7860                 /* Determine the read-only value. */
7861                 read_val = save_val & read_mask;
7862
7863                 /* Write zero to the register, then make sure the read-only bits
7864                  * are not changed and the read/write bits are all zeros.
7865                  */
7866                 tw32(offset, 0);
7867
7868                 val = tr32(offset);
7869
7870                 /* Test the read-only and read/write bits. */
7871                 if (((val & read_mask) != read_val) || (val & write_mask))
7872                         goto out;
7873
7874                 /* Write ones to all the bits defined by RdMask and WrMask, then
7875                  * make sure the read-only bits are not changed and the
7876                  * read/write bits are all ones.
7877                  */
7878                 tw32(offset, read_mask | write_mask);
7879
7880                 val = tr32(offset);
7881
7882                 /* Test the read-only bits. */
7883                 if ((val & read_mask) != read_val)
7884                         goto out;
7885
7886                 /* Test the read/write bits. */
7887                 if ((val & write_mask) != write_mask)
7888                         goto out;
7889
7890                 tw32(offset, save_val);
7891         }
7892
7893         return 0;
7894
7895 out:
7896         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
7897         tw32(offset, save_val);
7898         return -EIO;
7899 }
7900
7901 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
7902 {
7903         static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7904         int i;
7905         u32 j;
7906
7907         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
7908                 for (j = 0; j < len; j += 4) {
7909                         u32 val;
7910
7911                         tg3_write_mem(tp, offset + j, test_pattern[i]);
7912                         tg3_read_mem(tp, offset + j, &val);
7913                         if (val != test_pattern[i])
7914                                 return -EIO;
7915                 }
7916         }
7917         return 0;
7918 }
7919
7920 static int tg3_test_memory(struct tg3 *tp)
7921 {
7922         static struct mem_entry {
7923                 u32 offset;
7924                 u32 len;
7925         } mem_tbl_570x[] = {
7926                 { 0x00000000, 0x01000},
7927                 { 0x00002000, 0x1c000},
7928                 { 0xffffffff, 0x00000}
7929         }, mem_tbl_5705[] = {
7930                 { 0x00000100, 0x0000c},
7931                 { 0x00000200, 0x00008},
7932                 { 0x00000b50, 0x00400},
7933                 { 0x00004000, 0x00800},
7934                 { 0x00006000, 0x01000},
7935                 { 0x00008000, 0x02000},
7936                 { 0x00010000, 0x0e000},
7937                 { 0xffffffff, 0x00000}
7938         };
7939         struct mem_entry *mem_tbl;
7940         int err = 0;
7941         int i;
7942
7943         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7944                 mem_tbl = mem_tbl_5705;
7945         else
7946                 mem_tbl = mem_tbl_570x;
7947
7948         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
7949                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
7950                     mem_tbl[i].len)) != 0)
7951                         break;
7952         }
7953         
7954         return err;
7955 }
7956
7957 #define TG3_MAC_LOOPBACK        0
7958 #define TG3_PHY_LOOPBACK        1
7959
7960 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
7961 {
7962         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
7963         u32 desc_idx;
7964         struct sk_buff *skb, *rx_skb;
7965         u8 *tx_data;
7966         dma_addr_t map;
7967         int num_pkts, tx_len, rx_len, i, err;
7968         struct tg3_rx_buffer_desc *desc;
7969
7970         if (loopback_mode == TG3_MAC_LOOPBACK) {
7971                 /* HW errata - mac loopback fails in some cases on 5780.
7972                  * Normal traffic and PHY loopback are not affected by
7973                  * errata.
7974                  */
7975                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
7976                         return 0;
7977
7978                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7979                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
7980                            MAC_MODE_PORT_MODE_GMII;
7981                 tw32(MAC_MODE, mac_mode);
7982         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
7983                 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
7984                                            BMCR_SPEED1000);
7985                 udelay(40);
7986                 /* reset to prevent losing 1st rx packet intermittently */
7987                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7988                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7989                         udelay(10);
7990                         tw32_f(MAC_RX_MODE, tp->rx_mode);
7991                 }
7992                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7993                            MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
7994                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
7995                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7996                 tw32(MAC_MODE, mac_mode);
7997         }
7998         else
7999                 return -EINVAL;
8000
8001         err = -EIO;
8002
8003         tx_len = 1514;
8004         skb = dev_alloc_skb(tx_len);
8005         tx_data = skb_put(skb, tx_len);
8006         memcpy(tx_data, tp->dev->dev_addr, 6);
8007         memset(tx_data + 6, 0x0, 8);
8008
8009         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8010
8011         for (i = 14; i < tx_len; i++)
8012                 tx_data[i] = (u8) (i & 0xff);
8013
8014         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8015
8016         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8017              HOSTCC_MODE_NOW);
8018
8019         udelay(10);
8020
8021         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8022
8023         num_pkts = 0;
8024
8025         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8026
8027         tp->tx_prod++;
8028         num_pkts++;
8029
8030         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8031                      tp->tx_prod);
8032         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8033
8034         udelay(10);
8035
8036         for (i = 0; i < 10; i++) {
8037                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8038                        HOSTCC_MODE_NOW);
8039
8040                 udelay(10);
8041
8042                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8043                 rx_idx = tp->hw_status->idx[0].rx_producer;
8044                 if ((tx_idx == tp->tx_prod) &&
8045                     (rx_idx == (rx_start_idx + num_pkts)))
8046                         break;
8047         }
8048
8049         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8050         dev_kfree_skb(skb);
8051
8052         if (tx_idx != tp->tx_prod)
8053                 goto out;
8054
8055         if (rx_idx != rx_start_idx + num_pkts)
8056                 goto out;
8057
8058         desc = &tp->rx_rcb[rx_start_idx];
8059         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8060         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8061         if (opaque_key != RXD_OPAQUE_RING_STD)
8062                 goto out;
8063
8064         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8065             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8066                 goto out;
8067
8068         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8069         if (rx_len != tx_len)
8070                 goto out;
8071
8072         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8073
8074         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8075         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8076
8077         for (i = 14; i < tx_len; i++) {
8078                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8079                         goto out;
8080         }
8081         err = 0;
8082         
8083         /* tg3_free_rings will unmap and free the rx_skb */
8084 out:
8085         return err;
8086 }
8087
8088 #define TG3_MAC_LOOPBACK_FAILED         1
8089 #define TG3_PHY_LOOPBACK_FAILED         2
8090 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8091                                          TG3_PHY_LOOPBACK_FAILED)
8092
8093 static int tg3_test_loopback(struct tg3 *tp)
8094 {
8095         int err = 0;
8096
8097         if (!netif_running(tp->dev))
8098                 return TG3_LOOPBACK_FAILED;
8099
8100         tg3_reset_hw(tp);
8101
8102         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8103                 err |= TG3_MAC_LOOPBACK_FAILED;
8104         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8105                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8106                         err |= TG3_PHY_LOOPBACK_FAILED;
8107         }
8108
8109         return err;
8110 }
8111
8112 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8113                           u64 *data)
8114 {
8115         struct tg3 *tp = netdev_priv(dev);
8116
8117         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8118
8119         if (tg3_test_nvram(tp) != 0) {
8120                 etest->flags |= ETH_TEST_FL_FAILED;
8121                 data[0] = 1;
8122         }
8123         if (tg3_test_link(tp) != 0) {
8124                 etest->flags |= ETH_TEST_FL_FAILED;
8125                 data[1] = 1;
8126         }
8127         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8128                 int irq_sync = 0;
8129
8130                 if (netif_running(dev)) {
8131                         tg3_netif_stop(tp);
8132                         irq_sync = 1;
8133                 }
8134
8135                 tg3_full_lock(tp, irq_sync);
8136
8137                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8138                 tg3_nvram_lock(tp);
8139                 tg3_halt_cpu(tp, RX_CPU_BASE);
8140                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8141                         tg3_halt_cpu(tp, TX_CPU_BASE);
8142                 tg3_nvram_unlock(tp);
8143
8144                 if (tg3_test_registers(tp) != 0) {
8145                         etest->flags |= ETH_TEST_FL_FAILED;
8146                         data[2] = 1;
8147                 }
8148                 if (tg3_test_memory(tp) != 0) {
8149                         etest->flags |= ETH_TEST_FL_FAILED;
8150                         data[3] = 1;
8151                 }
8152                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8153                         etest->flags |= ETH_TEST_FL_FAILED;
8154
8155                 tg3_full_unlock(tp);
8156
8157                 if (tg3_test_interrupt(tp) != 0) {
8158                         etest->flags |= ETH_TEST_FL_FAILED;
8159                         data[5] = 1;
8160                 }
8161
8162                 tg3_full_lock(tp, 0);
8163
8164                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8165                 if (netif_running(dev)) {
8166                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8167                         tg3_init_hw(tp);
8168                         tg3_netif_start(tp);
8169                 }
8170
8171                 tg3_full_unlock(tp);
8172         }
8173 }
8174
8175 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8176 {
8177         struct mii_ioctl_data *data = if_mii(ifr);
8178         struct tg3 *tp = netdev_priv(dev);
8179         int err;
8180
8181         switch(cmd) {
8182         case SIOCGMIIPHY:
8183                 data->phy_id = PHY_ADDR;
8184
8185                 /* fallthru */
8186         case SIOCGMIIREG: {
8187                 u32 mii_regval;
8188
8189                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8190                         break;                  /* We have no PHY */
8191
8192                 spin_lock_bh(&tp->lock);
8193                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8194                 spin_unlock_bh(&tp->lock);
8195
8196                 data->val_out = mii_regval;
8197
8198                 return err;
8199         }
8200
8201         case SIOCSMIIREG:
8202                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8203                         break;                  /* We have no PHY */
8204
8205                 if (!capable(CAP_NET_ADMIN))
8206                         return -EPERM;
8207
8208                 spin_lock_bh(&tp->lock);
8209                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8210                 spin_unlock_bh(&tp->lock);
8211
8212                 return err;
8213
8214         default:
8215                 /* do nothing */
8216                 break;
8217         }
8218         return -EOPNOTSUPP;
8219 }
8220
8221 #if TG3_VLAN_TAG_USED
8222 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8223 {
8224         struct tg3 *tp = netdev_priv(dev);
8225
8226         tg3_full_lock(tp, 0);
8227
8228         tp->vlgrp = grp;
8229
8230         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8231         __tg3_set_rx_mode(dev);
8232
8233         tg3_full_unlock(tp);
8234 }
8235
8236 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8237 {
8238         struct tg3 *tp = netdev_priv(dev);
8239
8240         tg3_full_lock(tp, 0);
8241         if (tp->vlgrp)
8242                 tp->vlgrp->vlan_devices[vid] = NULL;
8243         tg3_full_unlock(tp);
8244 }
8245 #endif
8246
8247 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8248 {
8249         struct tg3 *tp = netdev_priv(dev);
8250
8251         memcpy(ec, &tp->coal, sizeof(*ec));
8252         return 0;
8253 }
8254
8255 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8256 {
8257         struct tg3 *tp = netdev_priv(dev);
8258         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8259         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8260
8261         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8262                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8263                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8264                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8265                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8266         }
8267
8268         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8269             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8270             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8271             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8272             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8273             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8274             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8275             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8276             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8277             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8278                 return -EINVAL;
8279
8280         /* No rx interrupts will be generated if both are zero */
8281         if ((ec->rx_coalesce_usecs == 0) &&
8282             (ec->rx_max_coalesced_frames == 0))
8283                 return -EINVAL;
8284
8285         /* No tx interrupts will be generated if both are zero */
8286         if ((ec->tx_coalesce_usecs == 0) &&
8287             (ec->tx_max_coalesced_frames == 0))
8288                 return -EINVAL;
8289
8290         /* Only copy relevant parameters, ignore all others. */
8291         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8292         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8293         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8294         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8295         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8296         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8297         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8298         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8299         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8300
8301         if (netif_running(dev)) {
8302                 tg3_full_lock(tp, 0);
8303                 __tg3_set_coalesce(tp, &tp->coal);
8304                 tg3_full_unlock(tp);
8305         }
8306         return 0;
8307 }
8308
8309 static struct ethtool_ops tg3_ethtool_ops = {
8310         .get_settings           = tg3_get_settings,
8311         .set_settings           = tg3_set_settings,
8312         .get_drvinfo            = tg3_get_drvinfo,
8313         .get_regs_len           = tg3_get_regs_len,
8314         .get_regs               = tg3_get_regs,
8315         .get_wol                = tg3_get_wol,
8316         .set_wol                = tg3_set_wol,
8317         .get_msglevel           = tg3_get_msglevel,
8318         .set_msglevel           = tg3_set_msglevel,
8319         .nway_reset             = tg3_nway_reset,
8320         .get_link               = ethtool_op_get_link,
8321         .get_eeprom_len         = tg3_get_eeprom_len,
8322         .get_eeprom             = tg3_get_eeprom,
8323         .set_eeprom             = tg3_set_eeprom,
8324         .get_ringparam          = tg3_get_ringparam,
8325         .set_ringparam          = tg3_set_ringparam,
8326         .get_pauseparam         = tg3_get_pauseparam,
8327         .set_pauseparam         = tg3_set_pauseparam,
8328         .get_rx_csum            = tg3_get_rx_csum,
8329         .set_rx_csum            = tg3_set_rx_csum,
8330         .get_tx_csum            = ethtool_op_get_tx_csum,
8331         .set_tx_csum            = tg3_set_tx_csum,
8332         .get_sg                 = ethtool_op_get_sg,
8333         .set_sg                 = ethtool_op_set_sg,
8334 #if TG3_TSO_SUPPORT != 0
8335         .get_tso                = ethtool_op_get_tso,
8336         .set_tso                = tg3_set_tso,
8337 #endif
8338         .self_test_count        = tg3_get_test_count,
8339         .self_test              = tg3_self_test,
8340         .get_strings            = tg3_get_strings,
8341         .phys_id                = tg3_phys_id,
8342         .get_stats_count        = tg3_get_stats_count,
8343         .get_ethtool_stats      = tg3_get_ethtool_stats,
8344         .get_coalesce           = tg3_get_coalesce,
8345         .set_coalesce           = tg3_set_coalesce,
8346         .get_perm_addr          = ethtool_op_get_perm_addr,
8347 };
8348
8349 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8350 {
8351         u32 cursize, val;
8352
8353         tp->nvram_size = EEPROM_CHIP_SIZE;
8354
8355         if (tg3_nvram_read(tp, 0, &val) != 0)
8356                 return;
8357
8358         if (swab32(val) != TG3_EEPROM_MAGIC)
8359                 return;
8360
8361         /*
8362          * Size the chip by reading offsets at increasing powers of two.
8363          * When we encounter our validation signature, we know the addressing
8364          * has wrapped around, and thus have our chip size.
8365          */
8366         cursize = 0x800;
8367
8368         while (cursize < tp->nvram_size) {
8369                 if (tg3_nvram_read(tp, cursize, &val) != 0)
8370                         return;
8371
8372                 if (swab32(val) == TG3_EEPROM_MAGIC)
8373                         break;
8374
8375                 cursize <<= 1;
8376         }
8377
8378         tp->nvram_size = cursize;
8379 }
8380                 
8381 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8382 {
8383         u32 val;
8384
8385         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8386                 if (val != 0) {
8387                         tp->nvram_size = (val >> 16) * 1024;
8388                         return;
8389                 }
8390         }
8391         tp->nvram_size = 0x20000;
8392 }
8393
8394 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8395 {
8396         u32 nvcfg1;
8397
8398         nvcfg1 = tr32(NVRAM_CFG1);
8399         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8400                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8401         }
8402         else {
8403                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8404                 tw32(NVRAM_CFG1, nvcfg1);
8405         }
8406
8407         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8408             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8409                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8410                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8411                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8412                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8413                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8414                                 break;
8415                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8416                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8417                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8418                                 break;
8419                         case FLASH_VENDOR_ATMEL_EEPROM:
8420                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8421                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8422                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8423                                 break;
8424                         case FLASH_VENDOR_ST:
8425                                 tp->nvram_jedecnum = JEDEC_ST;
8426                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8427                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8428                                 break;
8429                         case FLASH_VENDOR_SAIFUN:
8430                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
8431                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8432                                 break;
8433                         case FLASH_VENDOR_SST_SMALL:
8434                         case FLASH_VENDOR_SST_LARGE:
8435                                 tp->nvram_jedecnum = JEDEC_SST;
8436                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8437                                 break;
8438                 }
8439         }
8440         else {
8441                 tp->nvram_jedecnum = JEDEC_ATMEL;
8442                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8443                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8444         }
8445 }
8446
8447 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8448 {
8449         u32 nvcfg1;
8450
8451         nvcfg1 = tr32(NVRAM_CFG1);
8452
8453         /* NVRAM protection for TPM */
8454         if (nvcfg1 & (1 << 27))
8455                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8456
8457         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8458                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8459                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8460                         tp->nvram_jedecnum = JEDEC_ATMEL;
8461                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8462                         break;
8463                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8464                         tp->nvram_jedecnum = JEDEC_ATMEL;
8465                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8466                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8467                         break;
8468                 case FLASH_5752VENDOR_ST_M45PE10:
8469                 case FLASH_5752VENDOR_ST_M45PE20:
8470                 case FLASH_5752VENDOR_ST_M45PE40:
8471                         tp->nvram_jedecnum = JEDEC_ST;
8472                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8473                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8474                         break;
8475         }
8476
8477         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8478                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8479                         case FLASH_5752PAGE_SIZE_256:
8480                                 tp->nvram_pagesize = 256;
8481                                 break;
8482                         case FLASH_5752PAGE_SIZE_512:
8483                                 tp->nvram_pagesize = 512;
8484                                 break;
8485                         case FLASH_5752PAGE_SIZE_1K:
8486                                 tp->nvram_pagesize = 1024;
8487                                 break;
8488                         case FLASH_5752PAGE_SIZE_2K:
8489                                 tp->nvram_pagesize = 2048;
8490                                 break;
8491                         case FLASH_5752PAGE_SIZE_4K:
8492                                 tp->nvram_pagesize = 4096;
8493                                 break;
8494                         case FLASH_5752PAGE_SIZE_264:
8495                                 tp->nvram_pagesize = 264;
8496                                 break;
8497                 }
8498         }
8499         else {
8500                 /* For eeprom, set pagesize to maximum eeprom size */
8501                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8502
8503                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8504                 tw32(NVRAM_CFG1, nvcfg1);
8505         }
8506 }
8507
8508 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
8509 static void __devinit tg3_nvram_init(struct tg3 *tp)
8510 {
8511         int j;
8512
8513         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8514                 return;
8515
8516         tw32_f(GRC_EEPROM_ADDR,
8517              (EEPROM_ADDR_FSM_RESET |
8518               (EEPROM_DEFAULT_CLOCK_PERIOD <<
8519                EEPROM_ADDR_CLKPERD_SHIFT)));
8520
8521         /* XXX schedule_timeout() ... */
8522         for (j = 0; j < 100; j++)
8523                 udelay(10);
8524
8525         /* Enable seeprom accesses. */
8526         tw32_f(GRC_LOCAL_CTRL,
8527              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8528         udelay(100);
8529
8530         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8531             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8532                 tp->tg3_flags |= TG3_FLAG_NVRAM;
8533
8534                 tg3_enable_nvram_access(tp);
8535
8536                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8537                         tg3_get_5752_nvram_info(tp);
8538                 else
8539                         tg3_get_nvram_info(tp);
8540
8541                 tg3_get_nvram_size(tp);
8542
8543                 tg3_disable_nvram_access(tp);
8544
8545         } else {
8546                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
8547
8548                 tg3_get_eeprom_size(tp);
8549         }
8550 }
8551
8552 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
8553                                         u32 offset, u32 *val)
8554 {
8555         u32 tmp;
8556         int i;
8557
8558         if (offset > EEPROM_ADDR_ADDR_MASK ||
8559             (offset % 4) != 0)
8560                 return -EINVAL;
8561
8562         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
8563                                         EEPROM_ADDR_DEVID_MASK |
8564                                         EEPROM_ADDR_READ);
8565         tw32(GRC_EEPROM_ADDR,
8566              tmp |
8567              (0 << EEPROM_ADDR_DEVID_SHIFT) |
8568              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
8569               EEPROM_ADDR_ADDR_MASK) |
8570              EEPROM_ADDR_READ | EEPROM_ADDR_START);
8571
8572         for (i = 0; i < 10000; i++) {
8573                 tmp = tr32(GRC_EEPROM_ADDR);
8574
8575                 if (tmp & EEPROM_ADDR_COMPLETE)
8576                         break;
8577                 udelay(100);
8578         }
8579         if (!(tmp & EEPROM_ADDR_COMPLETE))
8580                 return -EBUSY;
8581
8582         *val = tr32(GRC_EEPROM_DATA);
8583         return 0;
8584 }
8585
8586 #define NVRAM_CMD_TIMEOUT 10000
8587
8588 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
8589 {
8590         int i;
8591
8592         tw32(NVRAM_CMD, nvram_cmd);
8593         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
8594                 udelay(10);
8595                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
8596                         udelay(10);
8597                         break;
8598                 }
8599         }
8600         if (i == NVRAM_CMD_TIMEOUT) {
8601                 return -EBUSY;
8602         }
8603         return 0;
8604 }
8605
8606 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8607 {
8608         int ret;
8609
8610         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8611                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
8612                 return -EINVAL;
8613         }
8614
8615         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
8616                 return tg3_nvram_read_using_eeprom(tp, offset, val);
8617
8618         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
8619                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8620                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8621
8622                 offset = ((offset / tp->nvram_pagesize) <<
8623                           ATMEL_AT45DB0X1B_PAGE_POS) +
8624                         (offset % tp->nvram_pagesize);
8625         }
8626
8627         if (offset > NVRAM_ADDR_MSK)
8628                 return -EINVAL;
8629
8630         tg3_nvram_lock(tp);
8631
8632         tg3_enable_nvram_access(tp);
8633
8634         tw32(NVRAM_ADDR, offset);
8635         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
8636                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
8637
8638         if (ret == 0)
8639                 *val = swab32(tr32(NVRAM_RDDATA));
8640
8641         tg3_nvram_unlock(tp);
8642
8643         tg3_disable_nvram_access(tp);
8644
8645         return ret;
8646 }
8647
8648 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
8649                                     u32 offset, u32 len, u8 *buf)
8650 {
8651         int i, j, rc = 0;
8652         u32 val;
8653
8654         for (i = 0; i < len; i += 4) {
8655                 u32 addr, data;
8656
8657                 addr = offset + i;
8658
8659                 memcpy(&data, buf + i, 4);
8660
8661                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
8662
8663                 val = tr32(GRC_EEPROM_ADDR);
8664                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
8665
8666                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
8667                         EEPROM_ADDR_READ);
8668                 tw32(GRC_EEPROM_ADDR, val |
8669                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
8670                         (addr & EEPROM_ADDR_ADDR_MASK) |
8671                         EEPROM_ADDR_START |
8672                         EEPROM_ADDR_WRITE);
8673                 
8674                 for (j = 0; j < 10000; j++) {
8675                         val = tr32(GRC_EEPROM_ADDR);
8676
8677                         if (val & EEPROM_ADDR_COMPLETE)
8678                                 break;
8679                         udelay(100);
8680                 }
8681                 if (!(val & EEPROM_ADDR_COMPLETE)) {
8682                         rc = -EBUSY;
8683                         break;
8684                 }
8685         }
8686
8687         return rc;
8688 }
8689
8690 /* offset and length are dword aligned */
8691 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8692                 u8 *buf)
8693 {
8694         int ret = 0;
8695         u32 pagesize = tp->nvram_pagesize;
8696         u32 pagemask = pagesize - 1;
8697         u32 nvram_cmd;
8698         u8 *tmp;
8699
8700         tmp = kmalloc(pagesize, GFP_KERNEL);
8701         if (tmp == NULL)
8702                 return -ENOMEM;
8703
8704         while (len) {
8705                 int j;
8706                 u32 phy_addr, page_off, size;
8707
8708                 phy_addr = offset & ~pagemask;
8709         
8710                 for (j = 0; j < pagesize; j += 4) {
8711                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
8712                                                 (u32 *) (tmp + j))))
8713                                 break;
8714                 }
8715                 if (ret)
8716                         break;
8717
8718                 page_off = offset & pagemask;
8719                 size = pagesize;
8720                 if (len < size)
8721                         size = len;
8722
8723                 len -= size;
8724
8725                 memcpy(tmp + page_off, buf, size);
8726
8727                 offset = offset + (pagesize - page_off);
8728
8729                 tg3_enable_nvram_access(tp);
8730
8731                 /*
8732                  * Before we can erase the flash page, we need
8733                  * to issue a special "write enable" command.
8734                  */
8735                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8736
8737                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8738                         break;
8739
8740                 /* Erase the target page */
8741                 tw32(NVRAM_ADDR, phy_addr);
8742
8743                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
8744                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
8745
8746                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8747                         break;
8748
8749                 /* Issue another write enable to start the write. */
8750                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8751
8752                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8753                         break;
8754
8755                 for (j = 0; j < pagesize; j += 4) {
8756                         u32 data;
8757
8758                         data = *((u32 *) (tmp + j));
8759                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
8760
8761                         tw32(NVRAM_ADDR, phy_addr + j);
8762
8763                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
8764                                 NVRAM_CMD_WR;
8765
8766                         if (j == 0)
8767                                 nvram_cmd |= NVRAM_CMD_FIRST;
8768                         else if (j == (pagesize - 4))
8769                                 nvram_cmd |= NVRAM_CMD_LAST;
8770
8771                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8772                                 break;
8773                 }
8774                 if (ret)
8775                         break;
8776         }
8777
8778         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8779         tg3_nvram_exec_cmd(tp, nvram_cmd);
8780
8781         kfree(tmp);
8782
8783         return ret;
8784 }
8785
8786 /* offset and length are dword aligned */
8787 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
8788                 u8 *buf)
8789 {
8790         int i, ret = 0;
8791
8792         for (i = 0; i < len; i += 4, offset += 4) {
8793                 u32 data, page_off, phy_addr, nvram_cmd;
8794
8795                 memcpy(&data, buf + i, 4);
8796                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8797
8798                 page_off = offset % tp->nvram_pagesize;
8799
8800                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8801                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8802
8803                         phy_addr = ((offset / tp->nvram_pagesize) <<
8804                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
8805                 }
8806                 else {
8807                         phy_addr = offset;
8808                 }
8809
8810                 tw32(NVRAM_ADDR, phy_addr);
8811
8812                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
8813
8814                 if ((page_off == 0) || (i == 0))
8815                         nvram_cmd |= NVRAM_CMD_FIRST;
8816                 else if (page_off == (tp->nvram_pagesize - 4))
8817                         nvram_cmd |= NVRAM_CMD_LAST;
8818
8819                 if (i == (len - 4))
8820                         nvram_cmd |= NVRAM_CMD_LAST;
8821
8822                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
8823                     (tp->nvram_jedecnum == JEDEC_ST) &&
8824                     (nvram_cmd & NVRAM_CMD_FIRST)) {
8825
8826                         if ((ret = tg3_nvram_exec_cmd(tp,
8827                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
8828                                 NVRAM_CMD_DONE)))
8829
8830                                 break;
8831                 }
8832                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8833                         /* We always do complete word writes to eeprom. */
8834                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
8835                 }
8836
8837                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8838                         break;
8839         }
8840         return ret;
8841 }
8842
8843 /* offset and length are dword aligned */
8844 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
8845 {
8846         int ret;
8847
8848         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8849                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
8850                 return -EINVAL;
8851         }
8852
8853         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8854                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
8855                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
8856                 udelay(40);
8857         }
8858
8859         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
8860                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
8861         }
8862         else {
8863                 u32 grc_mode;
8864
8865                 tg3_nvram_lock(tp);
8866
8867                 tg3_enable_nvram_access(tp);
8868                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
8869                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
8870                         tw32(NVRAM_WRITE1, 0x406);
8871
8872                 grc_mode = tr32(GRC_MODE);
8873                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
8874
8875                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
8876                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8877
8878                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
8879                                 buf);
8880                 }
8881                 else {
8882                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
8883                                 buf);
8884                 }
8885
8886                 grc_mode = tr32(GRC_MODE);
8887                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
8888
8889                 tg3_disable_nvram_access(tp);
8890                 tg3_nvram_unlock(tp);
8891         }
8892
8893         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8894                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8895                 udelay(40);
8896         }
8897
8898         return ret;
8899 }
8900
8901 struct subsys_tbl_ent {
8902         u16 subsys_vendor, subsys_devid;
8903         u32 phy_id;
8904 };
8905
8906 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
8907         /* Broadcom boards. */
8908         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
8909         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
8910         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
8911         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
8912         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
8913         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
8914         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
8915         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
8916         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
8917         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
8918         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
8919
8920         /* 3com boards. */
8921         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
8922         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
8923         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
8924         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
8925         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
8926
8927         /* DELL boards. */
8928         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
8929         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
8930         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
8931         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
8932
8933         /* Compaq boards. */
8934         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
8935         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
8936         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
8937         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
8938         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
8939
8940         /* IBM boards. */
8941         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
8942 };
8943
8944 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
8945 {
8946         int i;
8947
8948         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
8949                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
8950                      tp->pdev->subsystem_vendor) &&
8951                     (subsys_id_to_phy_id[i].subsys_devid ==
8952                      tp->pdev->subsystem_device))
8953                         return &subsys_id_to_phy_id[i];
8954         }
8955         return NULL;
8956 }
8957
8958 /* Since this function may be called in D3-hot power state during
8959  * tg3_init_one(), only config cycles are allowed.
8960  */
8961 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
8962 {
8963         u32 val;
8964
8965         /* Make sure register accesses (indirect or otherwise)
8966          * will function correctly.
8967          */
8968         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8969                                tp->misc_host_ctrl);
8970
8971         tp->phy_id = PHY_ID_INVALID;
8972         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8973
8974         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8975         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8976                 u32 nic_cfg, led_cfg;
8977                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
8978                 int eeprom_phy_serdes = 0;
8979
8980                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8981                 tp->nic_sram_data_cfg = nic_cfg;
8982
8983                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
8984                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
8985                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8986                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8987                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
8988                     (ver > 0) && (ver < 0x100))
8989                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
8990
8991                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
8992                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
8993                         eeprom_phy_serdes = 1;
8994
8995                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
8996                 if (nic_phy_id != 0) {
8997                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
8998                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
8999
9000                         eeprom_phy_id  = (id1 >> 16) << 10;
9001                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9002                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9003                 } else
9004                         eeprom_phy_id = 0;
9005
9006                 tp->phy_id = eeprom_phy_id;
9007                 if (eeprom_phy_serdes) {
9008                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9009                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9010                         else
9011                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9012                 }
9013
9014                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9015                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9016                                     SHASTA_EXT_LED_MODE_MASK);
9017                 else
9018                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9019
9020                 switch (led_cfg) {
9021                 default:
9022                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9023                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9024                         break;
9025
9026                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9027                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9028                         break;
9029
9030                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9031                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9032
9033                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9034                          * read on some older 5700/5701 bootcode.
9035                          */
9036                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9037                             ASIC_REV_5700 ||
9038                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9039                             ASIC_REV_5701)
9040                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9041
9042                         break;
9043
9044                 case SHASTA_EXT_LED_SHARED:
9045                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9046                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9047                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9048                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9049                                                  LED_CTRL_MODE_PHY_2);
9050                         break;
9051
9052                 case SHASTA_EXT_LED_MAC:
9053                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9054                         break;
9055
9056                 case SHASTA_EXT_LED_COMBO:
9057                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9058                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9059                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9060                                                  LED_CTRL_MODE_PHY_2);
9061                         break;
9062
9063                 };
9064
9065                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9066                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9067                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9068                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9069
9070                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9071                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9072                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
9073                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9074
9075                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9076                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9077                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9078                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9079                 }
9080                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9081                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9082
9083                 if (cfg2 & (1 << 17))
9084                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9085
9086                 /* serdes signal pre-emphasis in register 0x590 set by */
9087                 /* bootcode if bit 18 is set */
9088                 if (cfg2 & (1 << 18))
9089                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9090         }
9091 }
9092
9093 static int __devinit tg3_phy_probe(struct tg3 *tp)
9094 {
9095         u32 hw_phy_id_1, hw_phy_id_2;
9096         u32 hw_phy_id, hw_phy_id_masked;
9097         int err;
9098
9099         /* Reading the PHY ID register can conflict with ASF
9100          * firwmare access to the PHY hardware.
9101          */
9102         err = 0;
9103         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9104                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9105         } else {
9106                 /* Now read the physical PHY_ID from the chip and verify
9107                  * that it is sane.  If it doesn't look good, we fall back
9108                  * to either the hard-coded table based PHY_ID and failing
9109                  * that the value found in the eeprom area.
9110                  */
9111                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9112                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9113
9114                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9115                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9116                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9117
9118                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9119         }
9120
9121         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9122                 tp->phy_id = hw_phy_id;
9123                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9124                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9125                 else
9126                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9127         } else {
9128                 if (tp->phy_id != PHY_ID_INVALID) {
9129                         /* Do nothing, phy ID already set up in
9130                          * tg3_get_eeprom_hw_cfg().
9131                          */
9132                 } else {
9133                         struct subsys_tbl_ent *p;
9134
9135                         /* No eeprom signature?  Try the hardcoded
9136                          * subsys device table.
9137                          */
9138                         p = lookup_by_subsys(tp);
9139                         if (!p)
9140                                 return -ENODEV;
9141
9142                         tp->phy_id = p->phy_id;
9143                         if (!tp->phy_id ||
9144                             tp->phy_id == PHY_ID_BCM8002)
9145                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9146                 }
9147         }
9148
9149         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9150             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9151                 u32 bmsr, adv_reg, tg3_ctrl;
9152
9153                 tg3_readphy(tp, MII_BMSR, &bmsr);
9154                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9155                     (bmsr & BMSR_LSTATUS))
9156                         goto skip_phy_reset;
9157                     
9158                 err = tg3_phy_reset(tp);
9159                 if (err)
9160                         return err;
9161
9162                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9163                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9164                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9165                 tg3_ctrl = 0;
9166                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9167                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9168                                     MII_TG3_CTRL_ADV_1000_FULL);
9169                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9170                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9171                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9172                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9173                 }
9174
9175                 if (!tg3_copper_is_advertising_all(tp)) {
9176                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9177
9178                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9179                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9180
9181                         tg3_writephy(tp, MII_BMCR,
9182                                      BMCR_ANENABLE | BMCR_ANRESTART);
9183                 }
9184                 tg3_phy_set_wirespeed(tp);
9185
9186                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9187                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9188                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9189         }
9190
9191 skip_phy_reset:
9192         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9193                 err = tg3_init_5401phy_dsp(tp);
9194                 if (err)
9195                         return err;
9196         }
9197
9198         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9199                 err = tg3_init_5401phy_dsp(tp);
9200         }
9201
9202         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9203                 tp->link_config.advertising =
9204                         (ADVERTISED_1000baseT_Half |
9205                          ADVERTISED_1000baseT_Full |
9206                          ADVERTISED_Autoneg |
9207                          ADVERTISED_FIBRE);
9208         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9209                 tp->link_config.advertising &=
9210                         ~(ADVERTISED_1000baseT_Half |
9211                           ADVERTISED_1000baseT_Full);
9212
9213         return err;
9214 }
9215
9216 static void __devinit tg3_read_partno(struct tg3 *tp)
9217 {
9218         unsigned char vpd_data[256];
9219         int i;
9220
9221         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9222                 /* Sun decided not to put the necessary bits in the
9223                  * NVRAM of their onboard tg3 parts :(
9224                  */
9225                 strcpy(tp->board_part_number, "Sun 570X");
9226                 return;
9227         }
9228
9229         for (i = 0; i < 256; i += 4) {
9230                 u32 tmp;
9231
9232                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9233                         goto out_not_found;
9234
9235                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
9236                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
9237                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9238                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9239         }
9240
9241         /* Now parse and find the part number. */
9242         for (i = 0; i < 256; ) {
9243                 unsigned char val = vpd_data[i];
9244                 int block_end;
9245
9246                 if (val == 0x82 || val == 0x91) {
9247                         i = (i + 3 +
9248                              (vpd_data[i + 1] +
9249                               (vpd_data[i + 2] << 8)));
9250                         continue;
9251                 }
9252
9253                 if (val != 0x90)
9254                         goto out_not_found;
9255
9256                 block_end = (i + 3 +
9257                              (vpd_data[i + 1] +
9258                               (vpd_data[i + 2] << 8)));
9259                 i += 3;
9260                 while (i < block_end) {
9261                         if (vpd_data[i + 0] == 'P' &&
9262                             vpd_data[i + 1] == 'N') {
9263                                 int partno_len = vpd_data[i + 2];
9264
9265                                 if (partno_len > 24)
9266                                         goto out_not_found;
9267
9268                                 memcpy(tp->board_part_number,
9269                                        &vpd_data[i + 3],
9270                                        partno_len);
9271
9272                                 /* Success. */
9273                                 return;
9274                         }
9275                 }
9276
9277                 /* Part number not found. */
9278                 goto out_not_found;
9279         }
9280
9281 out_not_found:
9282         strcpy(tp->board_part_number, "none");
9283 }
9284
9285 #ifdef CONFIG_SPARC64
9286 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9287 {
9288         struct pci_dev *pdev = tp->pdev;
9289         struct pcidev_cookie *pcp = pdev->sysdata;
9290
9291         if (pcp != NULL) {
9292                 int node = pcp->prom_node;
9293                 u32 venid;
9294                 int err;
9295
9296                 err = prom_getproperty(node, "subsystem-vendor-id",
9297                                        (char *) &venid, sizeof(venid));
9298                 if (err == 0 || err == -1)
9299                         return 0;
9300                 if (venid == PCI_VENDOR_ID_SUN)
9301                         return 1;
9302         }
9303         return 0;
9304 }
9305 #endif
9306
9307 static int __devinit tg3_get_invariants(struct tg3 *tp)
9308 {
9309         static struct pci_device_id write_reorder_chipsets[] = {
9310                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9311                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9312                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9313                              PCI_DEVICE_ID_VIA_8385_0) },
9314                 { },
9315         };
9316         u32 misc_ctrl_reg;
9317         u32 cacheline_sz_reg;
9318         u32 pci_state_reg, grc_misc_cfg;
9319         u32 val;
9320         u16 pci_cmd;
9321         int err;
9322
9323 #ifdef CONFIG_SPARC64
9324         if (tg3_is_sun_570X(tp))
9325                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9326 #endif
9327
9328         /* Force memory write invalidate off.  If we leave it on,
9329          * then on 5700_BX chips we have to enable a workaround.
9330          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9331          * to match the cacheline size.  The Broadcom driver have this
9332          * workaround but turns MWI off all the times so never uses
9333          * it.  This seems to suggest that the workaround is insufficient.
9334          */
9335         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9336         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9337         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9338
9339         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9340          * has the register indirect write enable bit set before
9341          * we try to access any of the MMIO registers.  It is also
9342          * critical that the PCI-X hw workaround situation is decided
9343          * before that as well.
9344          */
9345         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9346                               &misc_ctrl_reg);
9347
9348         tp->pci_chip_rev_id = (misc_ctrl_reg >>
9349                                MISC_HOST_CTRL_CHIPREV_SHIFT);
9350
9351         /* Wrong chip ID in 5752 A0. This code can be removed later
9352          * as A0 is not in production.
9353          */
9354         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9355                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9356
9357         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
9358          * we need to disable memory and use config. cycles
9359          * only to access all registers. The 5702/03 chips
9360          * can mistakenly decode the special cycles from the
9361          * ICH chipsets as memory write cycles, causing corruption
9362          * of register and memory space. Only certain ICH bridges
9363          * will drive special cycles with non-zero data during the
9364          * address phase which can fall within the 5703's address
9365          * range. This is not an ICH bug as the PCI spec allows
9366          * non-zero address during special cycles. However, only
9367          * these ICH bridges are known to drive non-zero addresses
9368          * during special cycles.
9369          *
9370          * Since special cycles do not cross PCI bridges, we only
9371          * enable this workaround if the 5703 is on the secondary
9372          * bus of these ICH bridges.
9373          */
9374         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
9375             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
9376                 static struct tg3_dev_id {
9377                         u32     vendor;
9378                         u32     device;
9379                         u32     rev;
9380                 } ich_chipsets[] = {
9381                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
9382                           PCI_ANY_ID },
9383                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
9384                           PCI_ANY_ID },
9385                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
9386                           0xa },
9387                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
9388                           PCI_ANY_ID },
9389                         { },
9390                 };
9391                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
9392                 struct pci_dev *bridge = NULL;
9393
9394                 while (pci_id->vendor != 0) {
9395                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
9396                                                 bridge);
9397                         if (!bridge) {
9398                                 pci_id++;
9399                                 continue;
9400                         }
9401                         if (pci_id->rev != PCI_ANY_ID) {
9402                                 u8 rev;
9403
9404                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
9405                                                      &rev);
9406                                 if (rev > pci_id->rev)
9407                                         continue;
9408                         }
9409                         if (bridge->subordinate &&
9410                             (bridge->subordinate->number ==
9411                              tp->pdev->bus->number)) {
9412
9413                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
9414                                 pci_dev_put(bridge);
9415                                 break;
9416                         }
9417                 }
9418         }
9419
9420         /* Find msi capability. */
9421         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
9422             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9423                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
9424                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
9425         }
9426
9427         /* Initialize misc host control in PCI block. */
9428         tp->misc_host_ctrl |= (misc_ctrl_reg &
9429                                MISC_HOST_CTRL_CHIPREV);
9430         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9431                                tp->misc_host_ctrl);
9432
9433         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9434                               &cacheline_sz_reg);
9435
9436         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
9437         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
9438         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
9439         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
9440
9441         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
9442             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
9443             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
9444                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
9445
9446         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
9447             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
9448                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
9449
9450         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9451                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
9452
9453         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
9454             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
9455             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
9456                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
9457
9458         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9459                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9460
9461         /* If we have an AMD 762 or VIA K8T800 chipset, write
9462          * reordering to the mailbox registers done by the host
9463          * controller can cause major troubles.  We read back from
9464          * every mailbox register write to force the writes to be
9465          * posted to the chip in order.
9466          */
9467         if (pci_dev_present(write_reorder_chipsets) &&
9468             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9469                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9470
9471         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9472             tp->pci_lat_timer < 64) {
9473                 tp->pci_lat_timer = 64;
9474
9475                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
9476                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
9477                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
9478                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
9479
9480                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9481                                        cacheline_sz_reg);
9482         }
9483
9484         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9485                               &pci_state_reg);
9486
9487         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
9488                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
9489
9490                 /* If this is a 5700 BX chipset, and we are in PCI-X
9491                  * mode, enable register write workaround.
9492                  *
9493                  * The workaround is to use indirect register accesses
9494                  * for all chip writes not to mailbox registers.
9495                  */
9496                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
9497                         u32 pm_reg;
9498                         u16 pci_cmd;
9499
9500                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9501
9502                         /* The chip can have it's power management PCI config
9503                          * space registers clobbered due to this bug.
9504                          * So explicitly force the chip into D0 here.
9505                          */
9506                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9507                                               &pm_reg);
9508                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
9509                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9510                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9511                                                pm_reg);
9512
9513                         /* Also, force SERR#/PERR# in PCI command. */
9514                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9515                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
9516                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9517                 }
9518         }
9519
9520         /* 5700 BX chips need to have their TX producer index mailboxes
9521          * written twice to workaround a bug.
9522          */
9523         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9524                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9525
9526         /* Back to back register writes can cause problems on this chip,
9527          * the workaround is to read back all reg writes except those to
9528          * mailbox regs.  See tg3_write_indirect_reg32().
9529          *
9530          * PCI Express 5750_A0 rev chips need this workaround too.
9531          */
9532         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9533             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
9534              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
9535                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
9536
9537         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
9538                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
9539         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
9540                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
9541
9542         /* Chip-specific fixup from Broadcom driver */
9543         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
9544             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
9545                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
9546                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9547         }
9548
9549         /* Default fast path register access methods */
9550         tp->read32 = tg3_read32;
9551         tp->write32 = tg3_write32;
9552         tp->read32_mbox = tg3_read32;
9553         tp->write32_mbox = tg3_write32;
9554         tp->write32_tx_mbox = tg3_write32;
9555         tp->write32_rx_mbox = tg3_write32;
9556
9557         /* Various workaround register access methods */
9558         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
9559                 tp->write32 = tg3_write_indirect_reg32;
9560         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
9561                 tp->write32 = tg3_write_flush_reg32;
9562
9563         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
9564             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
9565                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9566                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
9567                         tp->write32_rx_mbox = tg3_write_flush_reg32;
9568         }
9569
9570         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
9571                 tp->read32 = tg3_read_indirect_reg32;
9572                 tp->write32 = tg3_write_indirect_reg32;
9573                 tp->read32_mbox = tg3_read_indirect_mbox;
9574                 tp->write32_mbox = tg3_write_indirect_mbox;
9575                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
9576                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
9577
9578                 iounmap(tp->regs);
9579                 tp->regs = NULL;
9580
9581                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9582                 pci_cmd &= ~PCI_COMMAND_MEMORY;
9583                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9584         }
9585
9586         /* Get eeprom hw config before calling tg3_set_power_state().
9587          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9588          * determined before calling tg3_set_power_state() so that
9589          * we know whether or not to switch out of Vaux power.
9590          * When the flag is set, it means that GPIO1 is used for eeprom
9591          * write protect and also implies that it is a LOM where GPIOs
9592          * are not used to switch power.
9593          */ 
9594         tg3_get_eeprom_hw_cfg(tp);
9595
9596         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9597          * GPIO1 driven high will bring 5700's external PHY out of reset.
9598          * It is also used as eeprom write protect on LOMs.
9599          */
9600         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
9601         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9602             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
9603                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9604                                        GRC_LCLCTRL_GPIO_OUTPUT1);
9605         /* Unused GPIO3 must be driven as output on 5752 because there
9606          * are no pull-up resistors on unused GPIO pins.
9607          */
9608         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9609                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
9610
9611         /* Force the chip into D0. */
9612         err = tg3_set_power_state(tp, 0);
9613         if (err) {
9614                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
9615                        pci_name(tp->pdev));
9616                 return err;
9617         }
9618
9619         /* 5700 B0 chips do not support checksumming correctly due
9620          * to hardware bugs.
9621          */
9622         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
9623                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
9624
9625         /* Pseudo-header checksum is done by hardware logic and not
9626          * the offload processers, so make the chip do the pseudo-
9627          * header checksums on receive.  For transmit it is more
9628          * convenient to do the pseudo-header checksum in software
9629          * as Linux does that on transmit for us in all cases.
9630          */
9631         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
9632         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
9633
9634         /* Derive initial jumbo mode from MTU assigned in
9635          * ether_setup() via the alloc_etherdev() call
9636          */
9637         if (tp->dev->mtu > ETH_DATA_LEN &&
9638             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
9639                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
9640
9641         /* Determine WakeOnLan speed to use. */
9642         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9643             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9644             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
9645             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
9646                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
9647         } else {
9648                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
9649         }
9650
9651         /* A few boards don't want Ethernet@WireSpeed phy feature */
9652         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9653             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
9654              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
9655              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
9656             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
9657                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
9658
9659         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
9660             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
9661                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
9662         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
9663                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
9664
9665         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
9666                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
9667
9668         tp->coalesce_mode = 0;
9669         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
9670             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
9671                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
9672
9673         /* Initialize MAC MI mode, polling disabled. */
9674         tw32_f(MAC_MI_MODE, tp->mi_mode);
9675         udelay(80);
9676
9677         /* Initialize data/descriptor byte/word swapping. */
9678         val = tr32(GRC_MODE);
9679         val &= GRC_MODE_HOST_STACKUP;
9680         tw32(GRC_MODE, val | tp->grc_mode);
9681
9682         tg3_switch_clocks(tp);
9683
9684         /* Clear this out for sanity. */
9685         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9686
9687         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9688                               &pci_state_reg);
9689         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
9690             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
9691                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
9692
9693                 if (chiprevid == CHIPREV_ID_5701_A0 ||
9694                     chiprevid == CHIPREV_ID_5701_B0 ||
9695                     chiprevid == CHIPREV_ID_5701_B2 ||
9696                     chiprevid == CHIPREV_ID_5701_B5) {
9697                         void __iomem *sram_base;
9698
9699                         /* Write some dummy words into the SRAM status block
9700                          * area, see if it reads back correctly.  If the return
9701                          * value is bad, force enable the PCIX workaround.
9702                          */
9703                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
9704
9705                         writel(0x00000000, sram_base);
9706                         writel(0x00000000, sram_base + 4);
9707                         writel(0xffffffff, sram_base + 4);
9708                         if (readl(sram_base) != 0x00000000)
9709                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9710                 }
9711         }
9712
9713         udelay(50);
9714         tg3_nvram_init(tp);
9715
9716         grc_misc_cfg = tr32(GRC_MISC_CFG);
9717         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
9718
9719         /* Broadcom's driver says that CIOBE multisplit has a bug */
9720 #if 0
9721         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9722             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
9723                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
9724                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
9725         }
9726 #endif
9727         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9728             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
9729              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
9730                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
9731
9732         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9733             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9734                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9735         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9736                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9737                                       HOSTCC_MODE_CLRTICK_TXBD);
9738
9739                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9740                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9741                                        tp->misc_host_ctrl);
9742         }
9743
9744         /* these are limited to 10/100 only */
9745         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9746              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
9747             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9748              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9749              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
9750               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
9751               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
9752             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9753              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
9754               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
9755                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
9756
9757         err = tg3_phy_probe(tp);
9758         if (err) {
9759                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
9760                        pci_name(tp->pdev), err);
9761                 /* ... but do not return immediately ... */
9762         }
9763
9764         tg3_read_partno(tp);
9765
9766         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
9767                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9768         } else {
9769                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9770                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
9771                 else
9772                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9773         }
9774
9775         /* 5700 {AX,BX} chips have a broken status block link
9776          * change bit implementation, so we must use the
9777          * status register in those cases.
9778          */
9779         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9780                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
9781         else
9782                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
9783
9784         /* The led_ctrl is set during tg3_phy_probe, here we might
9785          * have to force the link status polling mechanism based
9786          * upon subsystem IDs.
9787          */
9788         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
9789             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9790                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
9791                                   TG3_FLAG_USE_LINKCHG_REG);
9792         }
9793
9794         /* For all SERDES we poll the MAC status register. */
9795         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9796                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
9797         else
9798                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
9799
9800         /* It seems all chips can get confused if TX buffers
9801          * straddle the 4GB address boundary in some cases.
9802          */
9803         tp->dev->hard_start_xmit = tg3_start_xmit;
9804
9805         tp->rx_offset = 2;
9806         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
9807             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
9808                 tp->rx_offset = 0;
9809
9810         /* By default, disable wake-on-lan.  User can change this
9811          * using ETHTOOL_SWOL.
9812          */
9813         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9814
9815         return err;
9816 }
9817
9818 #ifdef CONFIG_SPARC64
9819 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
9820 {
9821         struct net_device *dev = tp->dev;
9822         struct pci_dev *pdev = tp->pdev;
9823         struct pcidev_cookie *pcp = pdev->sysdata;
9824
9825         if (pcp != NULL) {
9826                 int node = pcp->prom_node;
9827
9828                 if (prom_getproplen(node, "local-mac-address") == 6) {
9829                         prom_getproperty(node, "local-mac-address",
9830                                          dev->dev_addr, 6);
9831                         memcpy(dev->perm_addr, dev->dev_addr, 6);
9832                         return 0;
9833                 }
9834         }
9835         return -ENODEV;
9836 }
9837
9838 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
9839 {
9840         struct net_device *dev = tp->dev;
9841
9842         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
9843         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
9844         return 0;
9845 }
9846 #endif
9847
9848 static int __devinit tg3_get_device_address(struct tg3 *tp)
9849 {
9850         struct net_device *dev = tp->dev;
9851         u32 hi, lo, mac_offset;
9852
9853 #ifdef CONFIG_SPARC64
9854         if (!tg3_get_macaddr_sparc(tp))
9855                 return 0;
9856 #endif
9857
9858         mac_offset = 0x7c;
9859         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9860              !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
9861             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9862                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
9863                         mac_offset = 0xcc;
9864                 if (tg3_nvram_lock(tp))
9865                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
9866                 else
9867                         tg3_nvram_unlock(tp);
9868         }
9869
9870         /* First try to get it from MAC address mailbox. */
9871         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
9872         if ((hi >> 16) == 0x484b) {
9873                 dev->dev_addr[0] = (hi >>  8) & 0xff;
9874                 dev->dev_addr[1] = (hi >>  0) & 0xff;
9875
9876                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
9877                 dev->dev_addr[2] = (lo >> 24) & 0xff;
9878                 dev->dev_addr[3] = (lo >> 16) & 0xff;
9879                 dev->dev_addr[4] = (lo >>  8) & 0xff;
9880                 dev->dev_addr[5] = (lo >>  0) & 0xff;
9881         }
9882         /* Next, try NVRAM. */
9883         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
9884                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
9885                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
9886                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
9887                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
9888                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
9889                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
9890                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
9891                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
9892         }
9893         /* Finally just fetch it out of the MAC control regs. */
9894         else {
9895                 hi = tr32(MAC_ADDR_0_HIGH);
9896                 lo = tr32(MAC_ADDR_0_LOW);
9897
9898                 dev->dev_addr[5] = lo & 0xff;
9899                 dev->dev_addr[4] = (lo >> 8) & 0xff;
9900                 dev->dev_addr[3] = (lo >> 16) & 0xff;
9901                 dev->dev_addr[2] = (lo >> 24) & 0xff;
9902                 dev->dev_addr[1] = hi & 0xff;
9903                 dev->dev_addr[0] = (hi >> 8) & 0xff;
9904         }
9905
9906         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
9907 #ifdef CONFIG_SPARC64
9908                 if (!tg3_get_default_macaddr_sparc(tp))
9909                         return 0;
9910 #endif
9911                 return -EINVAL;
9912         }
9913         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
9914         return 0;
9915 }
9916
9917 #define BOUNDARY_SINGLE_CACHELINE       1
9918 #define BOUNDARY_MULTI_CACHELINE        2
9919
9920 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
9921 {
9922         int cacheline_size;
9923         u8 byte;
9924         int goal;
9925
9926         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
9927         if (byte == 0)
9928                 cacheline_size = 1024;
9929         else
9930                 cacheline_size = (int) byte * 4;
9931
9932         /* On 5703 and later chips, the boundary bits have no
9933          * effect.
9934          */
9935         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9936             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
9937             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9938                 goto out;
9939
9940 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
9941         goal = BOUNDARY_MULTI_CACHELINE;
9942 #else
9943 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
9944         goal = BOUNDARY_SINGLE_CACHELINE;
9945 #else
9946         goal = 0;
9947 #endif
9948 #endif
9949
9950         if (!goal)
9951                 goto out;
9952
9953         /* PCI controllers on most RISC systems tend to disconnect
9954          * when a device tries to burst across a cache-line boundary.
9955          * Therefore, letting tg3 do so just wastes PCI bandwidth.
9956          *
9957          * Unfortunately, for PCI-E there are only limited
9958          * write-side controls for this, and thus for reads
9959          * we will still get the disconnects.  We'll also waste
9960          * these PCI cycles for both read and write for chips
9961          * other than 5700 and 5701 which do not implement the
9962          * boundary bits.
9963          */
9964         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
9965             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
9966                 switch (cacheline_size) {
9967                 case 16:
9968                 case 32:
9969                 case 64:
9970                 case 128:
9971                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9972                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
9973                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
9974                         } else {
9975                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9976                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9977                         }
9978                         break;
9979
9980                 case 256:
9981                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
9982                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
9983                         break;
9984
9985                 default:
9986                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9987                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9988                         break;
9989                 };
9990         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9991                 switch (cacheline_size) {
9992                 case 16:
9993                 case 32:
9994                 case 64:
9995                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9996                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9997                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
9998                                 break;
9999                         }
10000                         /* fallthrough */
10001                 case 128:
10002                 default:
10003                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10004                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10005                         break;
10006                 };
10007         } else {
10008                 switch (cacheline_size) {
10009                 case 16:
10010                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10011                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10012                                         DMA_RWCTRL_WRITE_BNDRY_16);
10013                                 break;
10014                         }
10015                         /* fallthrough */
10016                 case 32:
10017                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10018                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10019                                         DMA_RWCTRL_WRITE_BNDRY_32);
10020                                 break;
10021                         }
10022                         /* fallthrough */
10023                 case 64:
10024                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10025                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10026                                         DMA_RWCTRL_WRITE_BNDRY_64);
10027                                 break;
10028                         }
10029                         /* fallthrough */
10030                 case 128:
10031                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10032                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10033                                         DMA_RWCTRL_WRITE_BNDRY_128);
10034                                 break;
10035                         }
10036                         /* fallthrough */
10037                 case 256:
10038                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10039                                 DMA_RWCTRL_WRITE_BNDRY_256);
10040                         break;
10041                 case 512:
10042                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10043                                 DMA_RWCTRL_WRITE_BNDRY_512);
10044                         break;
10045                 case 1024:
10046                 default:
10047                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10048                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10049                         break;
10050                 };
10051         }
10052
10053 out:
10054         return val;
10055 }
10056
10057 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10058 {
10059         struct tg3_internal_buffer_desc test_desc;
10060         u32 sram_dma_descs;
10061         int i, ret;
10062
10063         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10064
10065         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10066         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10067         tw32(RDMAC_STATUS, 0);
10068         tw32(WDMAC_STATUS, 0);
10069
10070         tw32(BUFMGR_MODE, 0);
10071         tw32(FTQ_RESET, 0);
10072
10073         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10074         test_desc.addr_lo = buf_dma & 0xffffffff;
10075         test_desc.nic_mbuf = 0x00002100;
10076         test_desc.len = size;
10077
10078         /*
10079          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10080          * the *second* time the tg3 driver was getting loaded after an
10081          * initial scan.
10082          *
10083          * Broadcom tells me:
10084          *   ...the DMA engine is connected to the GRC block and a DMA
10085          *   reset may affect the GRC block in some unpredictable way...
10086          *   The behavior of resets to individual blocks has not been tested.
10087          *
10088          * Broadcom noted the GRC reset will also reset all sub-components.
10089          */
10090         if (to_device) {
10091                 test_desc.cqid_sqid = (13 << 8) | 2;
10092
10093                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10094                 udelay(40);
10095         } else {
10096                 test_desc.cqid_sqid = (16 << 8) | 7;
10097
10098                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10099                 udelay(40);
10100         }
10101         test_desc.flags = 0x00000005;
10102
10103         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10104                 u32 val;
10105
10106                 val = *(((u32 *)&test_desc) + i);
10107                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10108                                        sram_dma_descs + (i * sizeof(u32)));
10109                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10110         }
10111         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10112
10113         if (to_device) {
10114                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10115         } else {
10116                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10117         }
10118
10119         ret = -ENODEV;
10120         for (i = 0; i < 40; i++) {
10121                 u32 val;
10122
10123                 if (to_device)
10124                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10125                 else
10126                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10127                 if ((val & 0xffff) == sram_dma_descs) {
10128                         ret = 0;
10129                         break;
10130                 }
10131
10132                 udelay(100);
10133         }
10134
10135         return ret;
10136 }
10137
10138 #define TEST_BUFFER_SIZE        0x2000
10139
10140 static int __devinit tg3_test_dma(struct tg3 *tp)
10141 {
10142         dma_addr_t buf_dma;
10143         u32 *buf, saved_dma_rwctrl;
10144         int ret;
10145
10146         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10147         if (!buf) {
10148                 ret = -ENOMEM;
10149                 goto out_nofree;
10150         }
10151
10152         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10153                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10154
10155         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
10156
10157         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10158                 /* DMA read watermark not used on PCIE */
10159                 tp->dma_rwctrl |= 0x00180000;
10160         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
10161                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10162                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
10163                         tp->dma_rwctrl |= 0x003f0000;
10164                 else
10165                         tp->dma_rwctrl |= 0x003f000f;
10166         } else {
10167                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10168                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10169                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10170
10171                         if (ccval == 0x6 || ccval == 0x7)
10172                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10173
10174                         /* Set bit 23 to enable PCIX hw bug fix */
10175                         tp->dma_rwctrl |= 0x009f0000;
10176                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10177                         /* 5780 always in PCIX mode */
10178                         tp->dma_rwctrl |= 0x00144000;
10179                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10180                         /* 5714 always in PCIX mode */
10181                         tp->dma_rwctrl |= 0x00148000;
10182                 } else {
10183                         tp->dma_rwctrl |= 0x001b000f;
10184                 }
10185         }
10186
10187         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10188             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10189                 tp->dma_rwctrl &= 0xfffffff0;
10190
10191         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10192             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10193                 /* Remove this if it causes problems for some boards. */
10194                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10195
10196                 /* On 5700/5701 chips, we need to set this bit.
10197                  * Otherwise the chip will issue cacheline transactions
10198                  * to streamable DMA memory with not all the byte
10199                  * enables turned on.  This is an error on several
10200                  * RISC PCI controllers, in particular sparc64.
10201                  *
10202                  * On 5703/5704 chips, this bit has been reassigned
10203                  * a different meaning.  In particular, it is used
10204                  * on those chips to enable a PCI-X workaround.
10205                  */
10206                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10207         }
10208
10209         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10210
10211 #if 0
10212         /* Unneeded, already done by tg3_get_invariants.  */
10213         tg3_switch_clocks(tp);
10214 #endif
10215
10216         ret = 0;
10217         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10218             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10219                 goto out;
10220
10221         /* It is best to perform DMA test with maximum write burst size
10222          * to expose the 5700/5701 write DMA bug.
10223          */
10224         saved_dma_rwctrl = tp->dma_rwctrl;
10225         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10226         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10227
10228         while (1) {
10229                 u32 *p = buf, i;
10230
10231                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10232                         p[i] = i;
10233
10234                 /* Send the buffer to the chip. */
10235                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10236                 if (ret) {
10237                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10238                         break;
10239                 }
10240
10241 #if 0
10242                 /* validate data reached card RAM correctly. */
10243                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10244                         u32 val;
10245                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
10246                         if (le32_to_cpu(val) != p[i]) {
10247                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
10248                                 /* ret = -ENODEV here? */
10249                         }
10250                         p[i] = 0;
10251                 }
10252 #endif
10253                 /* Now read it back. */
10254                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10255                 if (ret) {
10256                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10257
10258                         break;
10259                 }
10260
10261                 /* Verify it. */
10262                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10263                         if (p[i] == i)
10264                                 continue;
10265
10266                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10267                             DMA_RWCTRL_WRITE_BNDRY_16) {
10268                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10269                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10270                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10271                                 break;
10272                         } else {
10273                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10274                                 ret = -ENODEV;
10275                                 goto out;
10276                         }
10277                 }
10278
10279                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10280                         /* Success. */
10281                         ret = 0;
10282                         break;
10283                 }
10284         }
10285         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10286             DMA_RWCTRL_WRITE_BNDRY_16) {
10287                 static struct pci_device_id dma_wait_state_chipsets[] = {
10288                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10289                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10290                         { },
10291                 };
10292
10293                 /* DMA test passed without adjusting DMA boundary,
10294                  * now look for chipsets that are known to expose the
10295                  * DMA bug without failing the test.
10296                  */
10297                 if (pci_dev_present(dma_wait_state_chipsets)) {
10298                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10299                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10300                 }
10301                 else
10302                         /* Safe to use the calculated DMA boundary. */
10303                         tp->dma_rwctrl = saved_dma_rwctrl;
10304
10305                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10306         }
10307
10308 out:
10309         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
10310 out_nofree:
10311         return ret;
10312 }
10313
10314 static void __devinit tg3_init_link_config(struct tg3 *tp)
10315 {
10316         tp->link_config.advertising =
10317                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10318                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10319                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
10320                  ADVERTISED_Autoneg | ADVERTISED_MII);
10321         tp->link_config.speed = SPEED_INVALID;
10322         tp->link_config.duplex = DUPLEX_INVALID;
10323         tp->link_config.autoneg = AUTONEG_ENABLE;
10324         netif_carrier_off(tp->dev);
10325         tp->link_config.active_speed = SPEED_INVALID;
10326         tp->link_config.active_duplex = DUPLEX_INVALID;
10327         tp->link_config.phy_is_low_power = 0;
10328         tp->link_config.orig_speed = SPEED_INVALID;
10329         tp->link_config.orig_duplex = DUPLEX_INVALID;
10330         tp->link_config.orig_autoneg = AUTONEG_INVALID;
10331 }
10332
10333 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
10334 {
10335         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10336                 tp->bufmgr_config.mbuf_read_dma_low_water =
10337                         DEFAULT_MB_RDMA_LOW_WATER_5705;
10338                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10339                         DEFAULT_MB_MACRX_LOW_WATER_5705;
10340                 tp->bufmgr_config.mbuf_high_water =
10341                         DEFAULT_MB_HIGH_WATER_5705;
10342
10343                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10344                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
10345                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10346                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
10347                 tp->bufmgr_config.mbuf_high_water_jumbo =
10348                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
10349         } else {
10350                 tp->bufmgr_config.mbuf_read_dma_low_water =
10351                         DEFAULT_MB_RDMA_LOW_WATER;
10352                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10353                         DEFAULT_MB_MACRX_LOW_WATER;
10354                 tp->bufmgr_config.mbuf_high_water =
10355                         DEFAULT_MB_HIGH_WATER;
10356
10357                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10358                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
10359                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10360                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
10361                 tp->bufmgr_config.mbuf_high_water_jumbo =
10362                         DEFAULT_MB_HIGH_WATER_JUMBO;
10363         }
10364
10365         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
10366         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
10367 }
10368
10369 static char * __devinit tg3_phy_string(struct tg3 *tp)
10370 {
10371         switch (tp->phy_id & PHY_ID_MASK) {
10372         case PHY_ID_BCM5400:    return "5400";
10373         case PHY_ID_BCM5401:    return "5401";
10374         case PHY_ID_BCM5411:    return "5411";
10375         case PHY_ID_BCM5701:    return "5701";
10376         case PHY_ID_BCM5703:    return "5703";
10377         case PHY_ID_BCM5704:    return "5704";
10378         case PHY_ID_BCM5705:    return "5705";
10379         case PHY_ID_BCM5750:    return "5750";
10380         case PHY_ID_BCM5752:    return "5752";
10381         case PHY_ID_BCM5714:    return "5714";
10382         case PHY_ID_BCM5780:    return "5780";
10383         case PHY_ID_BCM8002:    return "8002/serdes";
10384         case 0:                 return "serdes";
10385         default:                return "unknown";
10386         };
10387 }
10388
10389 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
10390 {
10391         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10392                 strcpy(str, "PCI Express");
10393                 return str;
10394         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
10395                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
10396
10397                 strcpy(str, "PCIX:");
10398
10399                 if ((clock_ctrl == 7) ||
10400                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
10401                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
10402                         strcat(str, "133MHz");
10403                 else if (clock_ctrl == 0)
10404                         strcat(str, "33MHz");
10405                 else if (clock_ctrl == 2)
10406                         strcat(str, "50MHz");
10407                 else if (clock_ctrl == 4)
10408                         strcat(str, "66MHz");
10409                 else if (clock_ctrl == 6)
10410                         strcat(str, "100MHz");
10411                 else if (clock_ctrl == 7)
10412                         strcat(str, "133MHz");
10413         } else {
10414                 strcpy(str, "PCI:");
10415                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
10416                         strcat(str, "66MHz");
10417                 else
10418                         strcat(str, "33MHz");
10419         }
10420         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
10421                 strcat(str, ":32-bit");
10422         else
10423                 strcat(str, ":64-bit");
10424         return str;
10425 }
10426
10427 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
10428 {
10429         struct pci_dev *peer;
10430         unsigned int func, devnr = tp->pdev->devfn & ~7;
10431
10432         for (func = 0; func < 8; func++) {
10433                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
10434                 if (peer && peer != tp->pdev)
10435                         break;
10436                 pci_dev_put(peer);
10437         }
10438         if (!peer || peer == tp->pdev)
10439                 BUG();
10440
10441         /*
10442          * We don't need to keep the refcount elevated; there's no way
10443          * to remove one half of this device without removing the other
10444          */
10445         pci_dev_put(peer);
10446
10447         return peer;
10448 }
10449
10450 static void __devinit tg3_init_coal(struct tg3 *tp)
10451 {
10452         struct ethtool_coalesce *ec = &tp->coal;
10453
10454         memset(ec, 0, sizeof(*ec));
10455         ec->cmd = ETHTOOL_GCOALESCE;
10456         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
10457         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
10458         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
10459         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
10460         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
10461         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
10462         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
10463         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
10464         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
10465
10466         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
10467                                  HOSTCC_MODE_CLRTICK_TXBD)) {
10468                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
10469                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
10470                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
10471                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
10472         }
10473
10474         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10475                 ec->rx_coalesce_usecs_irq = 0;
10476                 ec->tx_coalesce_usecs_irq = 0;
10477                 ec->stats_block_coalesce_usecs = 0;
10478         }
10479 }
10480
10481 static int __devinit tg3_init_one(struct pci_dev *pdev,
10482                                   const struct pci_device_id *ent)
10483 {
10484         static int tg3_version_printed = 0;
10485         unsigned long tg3reg_base, tg3reg_len;
10486         struct net_device *dev;
10487         struct tg3 *tp;
10488         int i, err, pci_using_dac, pm_cap;
10489         char str[40];
10490
10491         if (tg3_version_printed++ == 0)
10492                 printk(KERN_INFO "%s", version);
10493
10494         err = pci_enable_device(pdev);
10495         if (err) {
10496                 printk(KERN_ERR PFX "Cannot enable PCI device, "
10497                        "aborting.\n");
10498                 return err;
10499         }
10500
10501         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10502                 printk(KERN_ERR PFX "Cannot find proper PCI device "
10503                        "base address, aborting.\n");
10504                 err = -ENODEV;
10505                 goto err_out_disable_pdev;
10506         }
10507
10508         err = pci_request_regions(pdev, DRV_MODULE_NAME);
10509         if (err) {
10510                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
10511                        "aborting.\n");
10512                 goto err_out_disable_pdev;
10513         }
10514
10515         pci_set_master(pdev);
10516
10517         /* Find power-management capability. */
10518         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10519         if (pm_cap == 0) {
10520                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
10521                        "aborting.\n");
10522                 err = -EIO;
10523                 goto err_out_free_res;
10524         }
10525
10526         /* Configure DMA attributes. */
10527         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
10528         if (!err) {
10529                 pci_using_dac = 1;
10530                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
10531                 if (err < 0) {
10532                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
10533                                "for consistent allocations\n");
10534                         goto err_out_free_res;
10535                 }
10536         } else {
10537                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
10538                 if (err) {
10539                         printk(KERN_ERR PFX "No usable DMA configuration, "
10540                                "aborting.\n");
10541                         goto err_out_free_res;
10542                 }
10543                 pci_using_dac = 0;
10544         }
10545
10546         tg3reg_base = pci_resource_start(pdev, 0);
10547         tg3reg_len = pci_resource_len(pdev, 0);
10548
10549         dev = alloc_etherdev(sizeof(*tp));
10550         if (!dev) {
10551                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
10552                 err = -ENOMEM;
10553                 goto err_out_free_res;
10554         }
10555
10556         SET_MODULE_OWNER(dev);
10557         SET_NETDEV_DEV(dev, &pdev->dev);
10558
10559         if (pci_using_dac)
10560                 dev->features |= NETIF_F_HIGHDMA;
10561         dev->features |= NETIF_F_LLTX;
10562 #if TG3_VLAN_TAG_USED
10563         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
10564         dev->vlan_rx_register = tg3_vlan_rx_register;
10565         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
10566 #endif
10567
10568         tp = netdev_priv(dev);
10569         tp->pdev = pdev;
10570         tp->dev = dev;
10571         tp->pm_cap = pm_cap;
10572         tp->mac_mode = TG3_DEF_MAC_MODE;
10573         tp->rx_mode = TG3_DEF_RX_MODE;
10574         tp->tx_mode = TG3_DEF_TX_MODE;
10575         tp->mi_mode = MAC_MI_MODE_BASE;
10576         if (tg3_debug > 0)
10577                 tp->msg_enable = tg3_debug;
10578         else
10579                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
10580
10581         /* The word/byte swap controls here control register access byte
10582          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
10583          * setting below.
10584          */
10585         tp->misc_host_ctrl =
10586                 MISC_HOST_CTRL_MASK_PCI_INT |
10587                 MISC_HOST_CTRL_WORD_SWAP |
10588                 MISC_HOST_CTRL_INDIR_ACCESS |
10589                 MISC_HOST_CTRL_PCISTATE_RW;
10590
10591         /* The NONFRM (non-frame) byte/word swap controls take effect
10592          * on descriptor entries, anything which isn't packet data.
10593          *
10594          * The StrongARM chips on the board (one for tx, one for rx)
10595          * are running in big-endian mode.
10596          */
10597         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
10598                         GRC_MODE_WSWAP_NONFRM_DATA);
10599 #ifdef __BIG_ENDIAN
10600         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
10601 #endif
10602         spin_lock_init(&tp->lock);
10603         spin_lock_init(&tp->tx_lock);
10604         spin_lock_init(&tp->indirect_lock);
10605         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
10606
10607         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
10608         if (tp->regs == 0UL) {
10609                 printk(KERN_ERR PFX "Cannot map device registers, "
10610                        "aborting.\n");
10611                 err = -ENOMEM;
10612                 goto err_out_free_dev;
10613         }
10614
10615         tg3_init_link_config(tp);
10616
10617         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
10618         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
10619         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
10620
10621         dev->open = tg3_open;
10622         dev->stop = tg3_close;
10623         dev->get_stats = tg3_get_stats;
10624         dev->set_multicast_list = tg3_set_rx_mode;
10625         dev->set_mac_address = tg3_set_mac_addr;
10626         dev->do_ioctl = tg3_ioctl;
10627         dev->tx_timeout = tg3_tx_timeout;
10628         dev->poll = tg3_poll;
10629         dev->ethtool_ops = &tg3_ethtool_ops;
10630         dev->weight = 64;
10631         dev->watchdog_timeo = TG3_TX_TIMEOUT;
10632         dev->change_mtu = tg3_change_mtu;
10633         dev->irq = pdev->irq;
10634 #ifdef CONFIG_NET_POLL_CONTROLLER
10635         dev->poll_controller = tg3_poll_controller;
10636 #endif
10637
10638         err = tg3_get_invariants(tp);
10639         if (err) {
10640                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
10641                        "aborting.\n");
10642                 goto err_out_iounmap;
10643         }
10644
10645         tg3_init_bufmgr_config(tp);
10646
10647 #if TG3_TSO_SUPPORT != 0
10648         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
10649                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10650         }
10651         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10652             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10653             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
10654             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
10655                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
10656         } else {
10657                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10658         }
10659
10660         /* TSO is off by default, user can enable using ethtool.  */
10661 #if 0
10662         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
10663                 dev->features |= NETIF_F_TSO;
10664 #endif
10665
10666 #endif
10667
10668         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
10669             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
10670             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
10671                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
10672                 tp->rx_pending = 63;
10673         }
10674
10675         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10676                 tp->pdev_peer = tg3_find_5704_peer(tp);
10677
10678         err = tg3_get_device_address(tp);
10679         if (err) {
10680                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
10681                        "aborting.\n");
10682                 goto err_out_iounmap;
10683         }
10684
10685         /*
10686          * Reset chip in case UNDI or EFI driver did not shutdown
10687          * DMA self test will enable WDMAC and we'll see (spurious)
10688          * pending DMA on the PCI bus at that point.
10689          */
10690         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
10691             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10692                 pci_save_state(tp->pdev);
10693                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
10694                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10695         }
10696
10697         err = tg3_test_dma(tp);
10698         if (err) {
10699                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
10700                 goto err_out_iounmap;
10701         }
10702
10703         /* Tigon3 can do ipv4 only... and some chips have buggy
10704          * checksumming.
10705          */
10706         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
10707                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
10708                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10709         } else
10710                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10711
10712         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
10713                 dev->features &= ~NETIF_F_HIGHDMA;
10714
10715         /* flow control autonegotiation is default behavior */
10716         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10717
10718         tg3_init_coal(tp);
10719
10720         /* Now that we have fully setup the chip, save away a snapshot
10721          * of the PCI config space.  We need to restore this after
10722          * GRC_MISC_CFG core clock resets and some resume events.
10723          */
10724         pci_save_state(tp->pdev);
10725
10726         err = register_netdev(dev);
10727         if (err) {
10728                 printk(KERN_ERR PFX "Cannot register net device, "
10729                        "aborting.\n");
10730                 goto err_out_iounmap;
10731         }
10732
10733         pci_set_drvdata(pdev, dev);
10734
10735         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
10736                dev->name,
10737                tp->board_part_number,
10738                tp->pci_chip_rev_id,
10739                tg3_phy_string(tp),
10740                tg3_bus_string(tp, str),
10741                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
10742
10743         for (i = 0; i < 6; i++)
10744                 printk("%2.2x%c", dev->dev_addr[i],
10745                        i == 5 ? '\n' : ':');
10746
10747         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
10748                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
10749                "TSOcap[%d] \n",
10750                dev->name,
10751                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
10752                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
10753                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
10754                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
10755                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
10756                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
10757                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
10758         printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
10759                dev->name, tp->dma_rwctrl);
10760
10761         return 0;
10762
10763 err_out_iounmap:
10764         if (tp->regs) {
10765                 iounmap(tp->regs);
10766                 tp->regs = NULL;
10767         }
10768
10769 err_out_free_dev:
10770         free_netdev(dev);
10771
10772 err_out_free_res:
10773         pci_release_regions(pdev);
10774
10775 err_out_disable_pdev:
10776         pci_disable_device(pdev);
10777         pci_set_drvdata(pdev, NULL);
10778         return err;
10779 }
10780
10781 static void __devexit tg3_remove_one(struct pci_dev *pdev)
10782 {
10783         struct net_device *dev = pci_get_drvdata(pdev);
10784
10785         if (dev) {
10786                 struct tg3 *tp = netdev_priv(dev);
10787
10788                 unregister_netdev(dev);
10789                 if (tp->regs) {
10790                         iounmap(tp->regs);
10791                         tp->regs = NULL;
10792                 }
10793                 free_netdev(dev);
10794                 pci_release_regions(pdev);
10795                 pci_disable_device(pdev);
10796                 pci_set_drvdata(pdev, NULL);
10797         }
10798 }
10799
10800 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
10801 {
10802         struct net_device *dev = pci_get_drvdata(pdev);
10803         struct tg3 *tp = netdev_priv(dev);
10804         int err;
10805
10806         if (!netif_running(dev))
10807                 return 0;
10808
10809         tg3_netif_stop(tp);
10810
10811         del_timer_sync(&tp->timer);
10812
10813         tg3_full_lock(tp, 1);
10814         tg3_disable_ints(tp);
10815         tg3_full_unlock(tp);
10816
10817         netif_device_detach(dev);
10818
10819         tg3_full_lock(tp, 0);
10820         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10821         tg3_full_unlock(tp);
10822
10823         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
10824         if (err) {
10825                 tg3_full_lock(tp, 0);
10826
10827                 tg3_init_hw(tp);
10828
10829                 tp->timer.expires = jiffies + tp->timer_offset;
10830                 add_timer(&tp->timer);
10831
10832                 netif_device_attach(dev);
10833                 tg3_netif_start(tp);
10834
10835                 tg3_full_unlock(tp);
10836         }
10837
10838         return err;
10839 }
10840
10841 static int tg3_resume(struct pci_dev *pdev)
10842 {
10843         struct net_device *dev = pci_get_drvdata(pdev);
10844         struct tg3 *tp = netdev_priv(dev);
10845         int err;
10846
10847         if (!netif_running(dev))
10848                 return 0;
10849
10850         pci_restore_state(tp->pdev);
10851
10852         err = tg3_set_power_state(tp, 0);
10853         if (err)
10854                 return err;
10855
10856         netif_device_attach(dev);
10857
10858         tg3_full_lock(tp, 0);
10859
10860         tg3_init_hw(tp);
10861
10862         tp->timer.expires = jiffies + tp->timer_offset;
10863         add_timer(&tp->timer);
10864
10865         tg3_netif_start(tp);
10866
10867         tg3_full_unlock(tp);
10868
10869         return 0;
10870 }
10871
10872 static struct pci_driver tg3_driver = {
10873         .name           = DRV_MODULE_NAME,
10874         .id_table       = tg3_pci_tbl,
10875         .probe          = tg3_init_one,
10876         .remove         = __devexit_p(tg3_remove_one),
10877         .suspend        = tg3_suspend,
10878         .resume         = tg3_resume
10879 };
10880
10881 static int __init tg3_init(void)
10882 {
10883         return pci_module_init(&tg3_driver);
10884 }
10885
10886 static void __exit tg3_cleanup(void)
10887 {
10888         pci_unregister_driver(&tg3_driver);
10889 }
10890
10891 module_init(tg3_init);
10892 module_exit(tg3_cleanup);